diff --git a/args.c b/args.c
index 14b031040a4b70057e5fa9485c3d4e045c9842d0..51c0fb9c4ebf4818a07044f3b23b968dd6f7c6f4 100644
--- a/args.c
+++ b/args.c
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #include <stdlib.h>
 #include <string.h>
 #include <limits.h>
@@ -22,42 +21,36 @@ extern void die(const char *fmt, ...) __attribute__((noreturn));
 extern void die(const char *fmt, ...);
 #endif
 
-
 struct arg arg_init(char **argv) {
   struct arg a;
 
-  a.argv      = argv;
+  a.argv = argv;
   a.argv_step = 1;
-  a.name      = NULL;
-  a.val       = NULL;
-  a.def       = NULL;
+  a.name = NULL;
+  a.val = NULL;
+  a.def = NULL;
   return a;
 }
 
 int arg_match(struct arg *arg_, const struct arg_def *def, char **argv) {
   struct arg arg;
 
-  if (!argv[0] || argv[0][0] != '-')
-    return 0;
+  if (!argv[0] || argv[0][0] != '-') return 0;
 
   arg = arg_init(argv);
 
-  if (def->short_name
-      && strlen(arg.argv[0]) == strlen(def->short_name) + 1
-      && !strcmp(arg.argv[0] + 1, def->short_name)) {
-
+  if (def->short_name && strlen(arg.argv[0]) == strlen(def->short_name) + 1 &&
+      !strcmp(arg.argv[0] + 1, def->short_name)) {
     arg.name = arg.argv[0] + 1;
     arg.val = def->has_val ? arg.argv[1] : NULL;
     arg.argv_step = def->has_val ? 2 : 1;
   } else if (def->long_name) {
     const size_t name_len = strlen(def->long_name);
 
-    if (strlen(arg.argv[0]) >= name_len + 2
-        && arg.argv[0][1] == '-'
-        && !strncmp(arg.argv[0] + 2, def->long_name, name_len)
-        && (arg.argv[0][name_len + 2] == '='
-            || arg.argv[0][name_len + 2] == '\0')) {
-
+    if (strlen(arg.argv[0]) >= name_len + 2 && arg.argv[0][1] == '-' &&
+        !strncmp(arg.argv[0] + 2, def->long_name, name_len) &&
+        (arg.argv[0][name_len + 2] == '=' ||
+         arg.argv[0][name_len + 2] == '\0')) {
       arg.name = arg.argv[0] + 2;
       arg.val = arg.name[name_len] == '=' ? arg.name + name_len + 1 : NULL;
       arg.argv_step = 1;
@@ -70,8 +63,7 @@ int arg_match(struct arg *arg_, const struct arg_def *def, char **argv) {
   if (arg.name && arg.val && !def->has_val)
     die("Error: option %s requires no argument.\n", arg.name);
 
-  if (arg.name
-      && (arg.val || !def->has_val)) {
+  if (arg.name && (arg.val || !def->has_val)) {
     arg.def = def;
     *arg_ = arg;
     return 1;
@@ -80,15 +72,12 @@ int arg_match(struct arg *arg_, const struct arg_def *def, char **argv) {
   return 0;
 }
 
-
 const char *arg_next(struct arg *arg) {
-  if (arg->argv[0])
-    arg->argv += arg->argv_step;
+  if (arg->argv[0]) arg->argv += arg->argv_step;
 
   return *arg->argv;
 }
 
-
 char **argv_dup(int argc, const char **argv) {
   char **new_argv = malloc((argc + 1) * sizeof(*argv));
 
@@ -97,9 +86,8 @@ char **argv_dup(int argc, const char **argv) {
   return new_argv;
 }
 
-
 void arg_show_usage(FILE *fp, const struct arg_def *const *defs) {
-  char option_text[40] = {0};
+  char option_text[40] = { 0 };
 
   for (; *defs; defs++) {
     const struct arg_def *def = *defs;
@@ -109,15 +97,12 @@ void arg_show_usage(FILE *fp, const struct arg_def *const *defs) {
     if (def->short_name && def->long_name) {
       char *comma = def->has_val ? "," : ",      ";
 
-      snprintf(option_text, 37, "-%s%s%s --%s%6s",
-               def->short_name, short_val, comma,
-               def->long_name, long_val);
+      snprintf(option_text, 37, "-%s%s%s --%s%6s", def->short_name, short_val,
+               comma, def->long_name, long_val);
     } else if (def->short_name)
-      snprintf(option_text, 37, "-%s%s",
-               def->short_name, short_val);
+      snprintf(option_text, 37, "-%s%s", def->short_name, short_val);
     else if (def->long_name)
-      snprintf(option_text, 37, "          --%s%s",
-               def->long_name, long_val);
+      snprintf(option_text, 37, "          --%s%s", def->long_name, long_val);
 
     fprintf(fp, "  %-37s\t%s\n", option_text, def->desc);
 
@@ -127,59 +112,53 @@ void arg_show_usage(FILE *fp, const struct arg_def *const *defs) {
       fprintf(fp, "  %-37s\t  ", "");
 
       for (listptr = def->enums; listptr->name; listptr++)
-        fprintf(fp, "%s%s", listptr->name,
-                listptr[1].name ? ", " : "\n");
+        fprintf(fp, "%s%s", listptr->name, listptr[1].name ? ", " : "\n");
     }
   }
 }
 
-
 unsigned int arg_parse_uint(const struct arg *arg) {
-  long int   rawval;
-  char      *endptr;
+  long int rawval;
+  char *endptr;
 
   rawval = strtol(arg->val, &endptr, 10);
 
   if (arg->val[0] != '\0' && endptr[0] == '\0') {
-    if (rawval >= 0 && rawval <= UINT_MAX)
-      return rawval;
+    if (rawval >= 0 && rawval <= UINT_MAX) return rawval;
 
-    die("Option %s: Value %ld out of range for unsigned int\n",
-        arg->name, rawval);
+    die("Option %s: Value %ld out of range for unsigned int\n", arg->name,
+        rawval);
   }
 
   die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
   return 0;
 }
 
-
 int arg_parse_int(const struct arg *arg) {
-  long int   rawval;
-  char      *endptr;
+  long int rawval;
+  char *endptr;
 
   rawval = strtol(arg->val, &endptr, 10);
 
   if (arg->val[0] != '\0' && endptr[0] == '\0') {
-    if (rawval >= INT_MIN && rawval <= INT_MAX)
-      return rawval;
+    if (rawval >= INT_MIN && rawval <= INT_MAX) return rawval;
 
-    die("Option %s: Value %ld out of range for signed int\n",
-        arg->name, rawval);
+    die("Option %s: Value %ld out of range for signed int\n", arg->name,
+        rawval);
   }
 
   die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
   return 0;
 }
 
-
 struct vpx_rational {
   int num; /**< fraction numerator */
   int den; /**< fraction denominator */
 };
 struct vpx_rational arg_parse_rational(const struct arg *arg) {
-  long int             rawval;
-  char                *endptr;
-  struct vpx_rational  rat;
+  long int rawval;
+  char *endptr;
+  struct vpx_rational rat;
 
   /* parse numerator */
   rawval = strtol(arg->val, &endptr, 10);
@@ -187,9 +166,11 @@ struct vpx_rational arg_parse_rational(const struct arg *arg) {
   if (arg->val[0] != '\0' && endptr[0] == '/') {
     if (rawval >= INT_MIN && rawval <= INT_MAX)
       rat.num = rawval;
-    else die("Option %s: Value %ld out of range for signed int\n",
-               arg->name, rawval);
-  } else die("Option %s: Expected / at '%c'\n", arg->name, *endptr);
+    else
+      die("Option %s: Value %ld out of range for signed int\n", arg->name,
+          rawval);
+  } else
+    die("Option %s: Expected / at '%c'\n", arg->name, *endptr);
 
   /* parse denominator */
   rawval = strtol(endptr + 1, &endptr, 10);
@@ -197,40 +178,37 @@ struct vpx_rational arg_parse_rational(const struct arg *arg) {
   if (arg->val[0] != '\0' && endptr[0] == '\0') {
     if (rawval >= INT_MIN && rawval <= INT_MAX)
       rat.den = rawval;
-    else die("Option %s: Value %ld out of range for signed int\n",
-               arg->name, rawval);
-  } else die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
+    else
+      die("Option %s: Value %ld out of range for signed int\n", arg->name,
+          rawval);
+  } else
+    die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
 
   return rat;
 }
 
-
 int arg_parse_enum(const struct arg *arg) {
   const struct arg_enum_list *listptr;
-  long int                    rawval;
-  char                       *endptr;
+  long int rawval;
+  char *endptr;
 
   /* First see if the value can be parsed as a raw value */
   rawval = strtol(arg->val, &endptr, 10);
   if (arg->val[0] != '\0' && endptr[0] == '\0') {
     /* Got a raw value, make sure it's valid */
     for (listptr = arg->def->enums; listptr->name; listptr++)
-      if (listptr->val == rawval)
-        return rawval;
+      if (listptr->val == rawval) return rawval;
   }
 
   /* Next see if it can be parsed as a string */
   for (listptr = arg->def->enums; listptr->name; listptr++)
-    if (!strcmp(arg->val, listptr->name))
-      return listptr->val;
+    if (!strcmp(arg->val, listptr->name)) return listptr->val;
 
   die("Option %s: Invalid value '%s'\n", arg->name, arg->val);
   return 0;
 }
 
-
 int arg_parse_enum_or_int(const struct arg *arg) {
-  if (arg->def->enums)
-    return arg_parse_enum(arg);
+  if (arg->def->enums) return arg_parse_enum(arg);
   return arg_parse_int(arg);
 }
diff --git a/args.h b/args.h
index 1f37151a028681d9b0f215bb6975261e19e5fd1d..54abe04607d97903212315bd1a04981b5335728e 100644
--- a/args.h
+++ b/args.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef ARGS_H_
 #define ARGS_H_
 #include <stdio.h>
@@ -18,29 +17,33 @@ extern "C" {
 #endif
 
 struct arg {
-  char                 **argv;
-  const char            *name;
-  const char            *val;
-  unsigned int           argv_step;
-  const struct arg_def  *def;
+  char **argv;
+  const char *name;
+  const char *val;
+  unsigned int argv_step;
+  const struct arg_def *def;
 };
 
 struct arg_enum_list {
   const char *name;
-  int         val;
+  int val;
 };
-#define ARG_ENUM_LIST_END {0}
+#define ARG_ENUM_LIST_END \
+  { 0 }
 
 typedef struct arg_def {
   const char *short_name;
   const char *long_name;
-  int         has_val;
+  int has_val;
   const char *desc;
   const struct arg_enum_list *enums;
 } arg_def_t;
-#define ARG_DEF(s,l,v,d) {s,l,v,d, NULL}
-#define ARG_DEF_ENUM(s,l,v,d,e) {s,l,v,d,e}
-#define ARG_DEF_LIST_END {0}
+#define ARG_DEF(s, l, v, d) \
+  { s, l, v, d, NULL }
+#define ARG_DEF_ENUM(s, l, v, d, e) \
+  { s, l, v, d, e }
+#define ARG_DEF_LIST_END \
+  { 0 }
 
 struct arg arg_init(char **argv);
 int arg_match(struct arg *arg_, const struct arg_def *def, char **argv);
diff --git a/ivfdec.c b/ivfdec.c
index 6dcd66f734d575ceb48474cf332bc1dde693133c..d37e0bc23c99265f86b48628f7a681cd0fa992c7 100644
--- a/ivfdec.c
+++ b/ivfdec.c
@@ -46,7 +46,8 @@ int file_is_ivf(struct VpxInputContext *input_ctx) {
       is_ivf = 1;
 
       if (mem_get_le16(raw_hdr + 4) != 0) {
-        fprintf(stderr, "Error: Unrecognized IVF version! This file may not"
+        fprintf(stderr,
+                "Error: Unrecognized IVF version! This file may not"
                 " decode properly.");
       }
 
@@ -69,14 +70,13 @@ int file_is_ivf(struct VpxInputContext *input_ctx) {
   return is_ivf;
 }
 
-int ivf_read_frame(FILE *infile, uint8_t **buffer,
-                   size_t *bytes_read, size_t *buffer_size) {
-  char raw_header[IVF_FRAME_HDR_SZ] = {0};
+int ivf_read_frame(FILE *infile, uint8_t **buffer, size_t *bytes_read,
+                   size_t *buffer_size) {
+  char raw_header[IVF_FRAME_HDR_SZ] = { 0 };
   size_t frame_size = 0;
 
   if (fread(raw_header, IVF_FRAME_HDR_SZ, 1, infile) != 1) {
-    if (!feof(infile))
-      warn("Failed to read frame size\n");
+    if (!feof(infile)) warn("Failed to read frame size\n");
   } else {
     frame_size = mem_get_le32(raw_header);
 
diff --git a/ivfdec.h b/ivfdec.h
index dd29cc6174b73d579ee3c4e15e372e29d73cc9a6..af725572b48dcfcee53c63c186799321ccdae5e2 100644
--- a/ivfdec.h
+++ b/ivfdec.h
@@ -18,11 +18,11 @@ extern "C" {
 
 int file_is_ivf(struct VpxInputContext *input);
 
-int ivf_read_frame(FILE *infile, uint8_t **buffer,
-                   size_t *bytes_read, size_t *buffer_size);
+int ivf_read_frame(FILE *infile, uint8_t **buffer, size_t *bytes_read,
+                   size_t *buffer_size);
 
 #ifdef __cplusplus
-}  /* extern "C" */
+} /* extern "C" */
 #endif
 
 #endif  // IVFDEC_H_
diff --git a/ivfenc.c b/ivfenc.c
index 4a97c42731c93523379fcd2cb80c2bc5524c3722..a50d31839da01f09f5bf1e2284a3373222e2cc52 100644
--- a/ivfenc.c
+++ b/ivfenc.c
@@ -13,10 +13,8 @@
 #include "vpx/vpx_encoder.h"
 #include "vpx_ports/mem_ops.h"
 
-void ivf_write_file_header(FILE *outfile,
-                           const struct vpx_codec_enc_cfg *cfg,
-                           unsigned int fourcc,
-                           int frame_cnt) {
+void ivf_write_file_header(FILE *outfile, const struct vpx_codec_enc_cfg *cfg,
+                           unsigned int fourcc, int frame_cnt) {
   char header[32];
 
   header[0] = 'D';
diff --git a/ivfenc.h b/ivfenc.h
index 6623687e8444061081a8393c578b3c794f1b7426..ebdce47be8f659f1e36115c1880f33c8dd53c6d2 100644
--- a/ivfenc.h
+++ b/ivfenc.h
@@ -19,17 +19,15 @@ struct vpx_codec_cx_pkt;
 extern "C" {
 #endif
 
-void ivf_write_file_header(FILE *outfile,
-                           const struct vpx_codec_enc_cfg *cfg,
-                           uint32_t fourcc,
-                           int frame_cnt);
+void ivf_write_file_header(FILE *outfile, const struct vpx_codec_enc_cfg *cfg,
+                           uint32_t fourcc, int frame_cnt);
 
 void ivf_write_frame_header(FILE *outfile, int64_t pts, size_t frame_size);
 
 void ivf_write_frame_size(FILE *outfile, size_t frame_size);
 
 #ifdef __cplusplus
-}  /* extern "C" */
+} /* extern "C" */
 #endif
 
 #endif  // IVFENC_H_
diff --git a/md5_utils.c b/md5_utils.c
index f4f893a2d6764e4366f97539ea76371e4e061ff9..1ef8ef3aa49be76fdc1314ccaa63534563773078 100644
--- a/md5_utils.c
+++ b/md5_utils.c
@@ -20,19 +20,17 @@
  * Still in the public domain.
  */
 
-#include <string.h>   /* for memcpy() */
+#include <string.h> /* for memcpy() */
 
 #include "md5_utils.h"
 
-static void
-byteSwap(UWORD32 *buf, unsigned words) {
+static void byteSwap(UWORD32 *buf, unsigned words) {
   md5byte *p;
 
   /* Only swap bytes for big endian machines */
   int i = 1;
 
-  if (*(char *)&i == 1)
-    return;
+  if (*(char *)&i == 1) return;
 
   p = (md5byte *)buf;
 
@@ -47,8 +45,7 @@ byteSwap(UWORD32 *buf, unsigned words) {
  * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
  * initialization constants.
  */
-void
-MD5Init(struct MD5Context *ctx) {
+void MD5Init(struct MD5Context *ctx) {
   ctx->buf[0] = 0x67452301;
   ctx->buf[1] = 0xefcdab89;
   ctx->buf[2] = 0x98badcfe;
@@ -62,8 +59,7 @@ MD5Init(struct MD5Context *ctx) {
  * Update context to reflect the concatenation of another buffer full
  * of bytes.
  */
-void
-MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) {
+void MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) {
   UWORD32 t;
 
   /* Update byte count */
@@ -71,9 +67,9 @@ MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) {
   t = ctx->bytes[0];
 
   if ((ctx->bytes[0] = t + len) < t)
-    ctx->bytes[1]++;  /* Carry from low to high */
+    ctx->bytes[1]++; /* Carry from low to high */
 
-  t = 64 - (t & 0x3f);  /* Space available in ctx->in (at least 1) */
+  t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */
 
   if (t > len) {
     memcpy((md5byte *)ctx->in + 64 - t, buf, len);
@@ -104,8 +100,7 @@ MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) {
  * Final wrapup - pad to 64-byte boundary with the bit pattern
  * 1 0* (64-bit count of bits processed, MSB-first)
  */
-void
-MD5Final(md5byte digest[16], struct MD5Context *ctx) {
+void MD5Final(md5byte digest[16], struct MD5Context *ctx) {
   int count = ctx->bytes[0] & 0x3f; /* Number of bytes in ctx->in */
   md5byte *p = (md5byte *)ctx->in + count;
 
@@ -115,7 +110,7 @@ MD5Final(md5byte digest[16], struct MD5Context *ctx) {
   /* Bytes of padding needed to make 56 bytes (-8..55) */
   count = 56 - 1 - count;
 
-  if (count < 0) {  /* Padding forces an extra block */
+  if (count < 0) { /* Padding forces an extra block */
     memset(p, 0, count + 8);
     byteSwap(ctx->in, 16);
     MD5Transform(ctx->buf, ctx->in);
@@ -147,16 +142,15 @@ MD5Final(md5byte digest[16], struct MD5Context *ctx) {
 #define F4(x, y, z) (y ^ (x | ~z))
 
 /* This is the central step in the MD5 algorithm. */
-#define MD5STEP(f,w,x,y,z,in,s) \
-  (w += f(x,y,z) + in, w = (w<<s | w>>(32-s)) + x)
+#define MD5STEP(f, w, x, y, z, in, s) \
+  (w += f(x, y, z) + in, w = (w << s | w >> (32 - s)) + x)
 
 /*
  * The core of the MD5 algorithm, this alters an existing MD5 hash to
  * reflect the addition of 16 longwords of new data.  MD5Update blocks
  * the data and converts bytes into longwords for this routine.
  */
-void
-MD5Transform(UWORD32 buf[4], UWORD32 const in[16]) {
+void MD5Transform(UWORD32 buf[4], UWORD32 const in[16]) {
   register UWORD32 a, b, c, d;
 
   a = buf[0];
diff --git a/rate_hist.c b/rate_hist.c
index a77222b16186644caceb6d0ba2c5c08eeee76329..872a10bae0b3b69d95083a169f22905a770788a8 100644
--- a/rate_hist.c
+++ b/rate_hist.c
@@ -45,8 +45,7 @@ struct rate_hist *init_rate_histogram(const vpx_codec_enc_cfg_t *cfg,
   hist->samples = cfg->rc_buf_sz * 5 / 4 * fps->num / fps->den / 1000;
 
   // prevent division by zero
-  if (hist->samples == 0)
-    hist->samples = 1;
+  if (hist->samples == 0) hist->samples = 1;
 
   hist->frames = 0;
   hist->total = 0;
@@ -78,18 +77,16 @@ void update_rate_histogram(struct rate_hist *hist,
   int64_t avg_bitrate = 0;
   int64_t sum_sz = 0;
   const int64_t now = pkt->data.frame.pts * 1000 *
-                          (uint64_t)cfg->g_timebase.num /
-                              (uint64_t)cfg->g_timebase.den;
+                      (uint64_t)cfg->g_timebase.num /
+                      (uint64_t)cfg->g_timebase.den;
 
   int idx = hist->frames++ % hist->samples;
   hist->pts[idx] = now;
   hist->sz[idx] = (int)pkt->data.frame.sz;
 
-  if (now < cfg->rc_buf_initial_sz)
-    return;
+  if (now < cfg->rc_buf_initial_sz) return;
 
-  if (!cfg->rc_target_bitrate)
-    return;
+  if (!cfg->rc_target_bitrate) return;
 
   then = now;
 
@@ -98,20 +95,16 @@ void update_rate_histogram(struct rate_hist *hist,
     const int i_idx = (i - 1) % hist->samples;
 
     then = hist->pts[i_idx];
-    if (now - then > cfg->rc_buf_sz)
-      break;
+    if (now - then > cfg->rc_buf_sz) break;
     sum_sz += hist->sz[i_idx];
   }
 
-  if (now == then)
-    return;
+  if (now == then) return;
 
   avg_bitrate = sum_sz * 8 * 1000 / (now - then);
   idx = (int)(avg_bitrate * (RATE_BINS / 2) / (cfg->rc_target_bitrate * 1000));
-  if (idx < 0)
-    idx = 0;
-  if (idx > RATE_BINS - 1)
-    idx = RATE_BINS - 1;
+  if (idx < 0) idx = 0;
+  if (idx > RATE_BINS - 1) idx = RATE_BINS - 1;
   if (hist->bucket[idx].low > avg_bitrate)
     hist->bucket[idx].low = (int)avg_bitrate;
   if (hist->bucket[idx].high < avg_bitrate)
@@ -120,8 +113,8 @@ void update_rate_histogram(struct rate_hist *hist,
   hist->total++;
 }
 
-static int merge_hist_buckets(struct hist_bucket *bucket,
-                              int max_buckets, int *num_buckets) {
+static int merge_hist_buckets(struct hist_bucket *bucket, int max_buckets,
+                              int *num_buckets) {
   int small_bucket = 0, merge_bucket = INT_MAX, big_bucket = 0;
   int buckets = *num_buckets;
   int i;
@@ -129,10 +122,8 @@ static int merge_hist_buckets(struct hist_bucket *bucket,
   /* Find the extrema for this list of buckets */
   big_bucket = small_bucket = 0;
   for (i = 0; i < buckets; i++) {
-    if (bucket[i].count < bucket[small_bucket].count)
-      small_bucket = i;
-    if (bucket[i].count > bucket[big_bucket].count)
-      big_bucket = i;
+    if (bucket[i].count < bucket[small_bucket].count) small_bucket = i;
+    if (bucket[i].count > bucket[big_bucket].count) big_bucket = i;
   }
 
   /* If we have too many buckets, merge the smallest with an adjacent
@@ -174,13 +165,10 @@ static int merge_hist_buckets(struct hist_bucket *bucket,
      */
     big_bucket = small_bucket = 0;
     for (i = 0; i < buckets; i++) {
-      if (i > merge_bucket)
-        bucket[i] = bucket[i + 1];
+      if (i > merge_bucket) bucket[i] = bucket[i + 1];
 
-      if (bucket[i].count < bucket[small_bucket].count)
-        small_bucket = i;
-      if (bucket[i].count > bucket[big_bucket].count)
-        big_bucket = i;
+      if (bucket[i].count < bucket[small_bucket].count) small_bucket = i;
+      if (bucket[i].count > bucket[big_bucket].count) big_bucket = i;
     }
   }
 
@@ -188,8 +176,8 @@ static int merge_hist_buckets(struct hist_bucket *bucket,
   return bucket[big_bucket].count;
 }
 
-static void show_histogram(const struct hist_bucket *bucket,
-                           int buckets, int total, int scale) {
+static void show_histogram(const struct hist_bucket *bucket, int buckets,
+                           int total, int scale) {
   const char *pat1, *pat2;
   int i;
 
@@ -232,8 +220,7 @@ static void show_histogram(const struct hist_bucket *bucket,
 
     pct = (float)(100.0 * bucket[i].count / total);
     len = HIST_BAR_MAX * bucket[i].count / scale;
-    if (len < 1)
-      len = 1;
+    if (len < 1) len = 1;
     assert(len <= HIST_BAR_MAX);
 
     if (bucket[i].low == bucket[i].high)
@@ -241,8 +228,7 @@ static void show_histogram(const struct hist_bucket *bucket,
     else
       fprintf(stderr, pat2, bucket[i].low, bucket[i].high);
 
-    for (j = 0; j < HIST_BAR_MAX; j++)
-      fprintf(stderr, j < len ? "=" : " ");
+    for (j = 0; j < HIST_BAR_MAX; j++) fprintf(stderr, j < len ? "=" : " ");
     fprintf(stderr, "\t%5d (%6.2f%%)\n", bucket[i].count, pct);
   }
 }
@@ -268,14 +254,13 @@ void show_q_histogram(const int counts[64], int max_buckets) {
   show_histogram(bucket, buckets, total, scale);
 }
 
-void show_rate_histogram(struct rate_hist *hist,
-                         const vpx_codec_enc_cfg_t *cfg, int max_buckets) {
+void show_rate_histogram(struct rate_hist *hist, const vpx_codec_enc_cfg_t *cfg,
+                         int max_buckets) {
   int i, scale;
   int buckets = 0;
 
   for (i = 0; i < RATE_BINS; i++) {
-    if (hist->bucket[i].low == INT_MAX)
-      continue;
+    if (hist->bucket[i].low == INT_MAX) continue;
     hist->bucket[buckets++] = hist->bucket[i];
   }
 
diff --git a/test/acm_random.h b/test/acm_random.h
index ff5c93ea1d4fe3fef7c2e37a1a3205d9906e04b8..0545416f3f685db8ea5cbeb835736335f861c55f 100644
--- a/test/acm_random.h
+++ b/test/acm_random.h
@@ -23,9 +23,7 @@ class ACMRandom {
 
   explicit ACMRandom(int seed) : random_(seed) {}
 
-  void Reset(int seed) {
-    random_.Reseed(seed);
-  }
+  void Reset(int seed) { random_.Reseed(seed); }
   uint16_t Rand16(void) {
     const uint32_t value =
         random_.Generate(testing::internal::Random::kMaxRange);
@@ -46,17 +44,11 @@ class ACMRandom {
     return r < 128 ? r << 4 : r >> 4;
   }
 
-  int PseudoUniform(int range) {
-    return random_.Generate(range);
-  }
+  int PseudoUniform(int range) { return random_.Generate(range); }
 
-  int operator()(int n) {
-    return PseudoUniform(n);
-  }
+  int operator()(int n) { return PseudoUniform(n); }
 
-  static int DeterministicSeed(void) {
-    return 0xbaba;
-  }
+  static int DeterministicSeed(void) { return 0xbaba; }
 
  private:
   testing::internal::Random random_;
diff --git a/test/active_map_refresh_test.cc b/test/active_map_refresh_test.cc
index c318c03d088512053f4356d2999f89e263a99bc4..3fea3432b8d204d979d9ac354a05436b5cfdbdb1 100644
--- a/test/active_map_refresh_test.cc
+++ b/test/active_map_refresh_test.cc
@@ -17,8 +17,8 @@
 namespace {
 
 // Check if any pixel in a 16x16 macroblock varies between frames.
-int CheckMb(const vpx_image_t &current, const vpx_image_t &previous,
-            int mb_r, int mb_c) {
+int CheckMb(const vpx_image_t &current, const vpx_image_t &previous, int mb_r,
+            int mb_c) {
   for (int plane = 0; plane < 3; plane++) {
     int r = 16 * mb_r;
     int c0 = 16 * mb_c;
@@ -122,6 +122,6 @@ TEST_P(ActiveMapRefreshTest, Test) {
 }
 
 VP10_INSTANTIATE_TEST_CASE(ActiveMapRefreshTest,
-                          ::testing::Values(::libvpx_test::kRealTime),
-                          ::testing::Range(5, 6));
+                           ::testing::Values(::libvpx_test::kRealTime),
+                           ::testing::Range(5, 6));
 }  // namespace
diff --git a/test/active_map_test.cc b/test/active_map_test.cc
index cb4377ad6f7c7993987337eca67502c06f771575..32e530089d95813befa150578956191853a8c9cb 100644
--- a/test/active_map_test.cc
+++ b/test/active_map_test.cc
@@ -40,15 +40,11 @@ class ActiveMapTest
     } else if (video->frame() == 3) {
       vpx_active_map_t map = vpx_active_map_t();
       uint8_t active_map[9 * 13] = {
-        1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
-        1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
-        1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
-        1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
-        0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1,
-        0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1,
-        0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1,
-        0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1,
-        1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0,
+        1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0,
+        0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+        1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0,
+        0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0,
+        0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0,
       };
       map.cols = (kWidth + 15) / 16;
       map.rows = (kHeight + 15) / 16;
@@ -77,13 +73,13 @@ TEST_P(ActiveMapTest, Test) {
   cfg_.rc_end_usage = VPX_CBR;
   cfg_.kf_max_dist = 90000;
 
-  ::libvpx_test::I420VideoSource video("hantro_odd.yuv", kWidth, kHeight, 30,
-                                       1, 0, 20);
+  ::libvpx_test::I420VideoSource video("hantro_odd.yuv", kWidth, kHeight, 30, 1,
+                                       0, 20);
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
 VP10_INSTANTIATE_TEST_CASE(ActiveMapTest,
-                          ::testing::Values(::libvpx_test::kRealTime),
-                          ::testing::Range(0, 6));
+                           ::testing::Values(::libvpx_test::kRealTime),
+                           ::testing::Range(0, 6));
 }  // namespace
diff --git a/test/aq_segment_test.cc b/test/aq_segment_test.cc
index 270c2ffe4da8838e45bd9ea0299bffc9d72674e7..43dcb65fac51f4af1f00b81424154970bad4873e 100644
--- a/test/aq_segment_test.cc
+++ b/test/aq_segment_test.cc
@@ -57,7 +57,7 @@ TEST_P(AqSegmentTest, TestNoMisMatchAQ1) {
   aq_mode_ = 1;
 
   ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
-                                        30, 1, 0, 100);
+                                       30, 1, 0, 100);
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
@@ -77,7 +77,7 @@ TEST_P(AqSegmentTest, TestNoMisMatchAQ2) {
   aq_mode_ = 2;
 
   ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
-                                        30, 1, 0, 100);
+                                       30, 1, 0, 100);
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
@@ -97,13 +97,13 @@ TEST_P(AqSegmentTest, TestNoMisMatchAQ3) {
   aq_mode_ = 3;
 
   ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
-                                        30, 1, 0, 100);
+                                       30, 1, 0, 100);
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
 VP10_INSTANTIATE_TEST_CASE(AqSegmentTest,
-                          ::testing::Values(::libvpx_test::kRealTime,
-                                            ::libvpx_test::kOnePassGood),
-                          ::testing::Range(3, 9));
+                           ::testing::Values(::libvpx_test::kRealTime,
+                                             ::libvpx_test::kOnePassGood),
+                           ::testing::Range(3, 9));
 }  // namespace
diff --git a/test/arf_freq_test.cc b/test/arf_freq_test.cc
index 0e48f7a7382645106a1a264d712172d0e05c2788..79fa804ed03126040fefbb4a3bc601cebe0291f8 100644
--- a/test/arf_freq_test.cc
+++ b/test/arf_freq_test.cc
@@ -22,8 +22,8 @@ namespace {
 const unsigned int kFrames = 100;
 const int kBitrate = 500;
 
-#define ARF_NOT_SEEN               1000001
-#define ARF_SEEN_ONCE              1000000
+#define ARF_NOT_SEEN 1000001
+#define ARF_SEEN_ONCE 1000000
 
 typedef struct {
   const char *filename;
@@ -44,24 +44,20 @@ typedef struct {
 
 const TestVideoParam kTestVectors[] = {
   // artificially increase framerate to trigger default check
-  {"hantro_collage_w352h288.yuv", 352, 288, 5000, 1,
-    8, VPX_IMG_FMT_I420, VPX_BITS_8, 0},
-  {"hantro_collage_w352h288.yuv", 352, 288, 30, 1,
-    8, VPX_IMG_FMT_I420, VPX_BITS_8, 0},
-  {"rush_hour_444.y4m", 352, 288, 30, 1,
-    8, VPX_IMG_FMT_I444, VPX_BITS_8, 1},
+  { "hantro_collage_w352h288.yuv", 352, 288, 5000, 1, 8, VPX_IMG_FMT_I420,
+    VPX_BITS_8, 0 },
+  { "hantro_collage_w352h288.yuv", 352, 288, 30, 1, 8, VPX_IMG_FMT_I420,
+    VPX_BITS_8, 0 },
+  { "rush_hour_444.y4m", 352, 288, 30, 1, 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
 #if CONFIG_VPX_HIGHBITDEPTH
-  // Add list of profile 2/3 test videos here ...
+// Add list of profile 2/3 test videos here ...
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 };
 
 const TestEncodeParam kEncodeVectors[] = {
-  {::libvpx_test::kOnePassGood, 2},
-  {::libvpx_test::kOnePassGood, 5},
-  {::libvpx_test::kTwoPassGood, 1},
-  {::libvpx_test::kTwoPassGood, 2},
-  {::libvpx_test::kTwoPassGood, 5},
-  {::libvpx_test::kRealTime, 5},
+  { ::libvpx_test::kOnePassGood, 2 }, { ::libvpx_test::kOnePassGood, 5 },
+  { ::libvpx_test::kTwoPassGood, 1 }, { ::libvpx_test::kTwoPassGood, 2 },
+  { ::libvpx_test::kTwoPassGood, 5 }, { ::libvpx_test::kRealTime, 5 },
 };
 
 const int kMinArfVectors[] = {
@@ -80,15 +76,12 @@ int is_extension_y4m(const char *filename) {
 
 class ArfFreqTest
     : public ::libvpx_test::EncoderTest,
-      public ::libvpx_test::CodecTestWith3Params<TestVideoParam, \
+      public ::libvpx_test::CodecTestWith3Params<TestVideoParam,
                                                  TestEncodeParam, int> {
  protected:
   ArfFreqTest()
-      : EncoderTest(GET_PARAM(0)),
-        test_video_param_(GET_PARAM(1)),
-        test_encode_param_(GET_PARAM(2)),
-        min_arf_requested_(GET_PARAM(3)) {
-  }
+      : EncoderTest(GET_PARAM(0)), test_video_param_(GET_PARAM(1)),
+        test_encode_param_(GET_PARAM(2)), min_arf_requested_(GET_PARAM(3)) {}
 
   virtual ~ArfFreqTest() {}
 
@@ -114,17 +107,16 @@ class ArfFreqTest
   }
 
   int GetNumFramesInPkt(const vpx_codec_cx_pkt_t *pkt) {
-    const uint8_t *buffer = reinterpret_cast<uint8_t*>(pkt->data.frame.buf);
+    const uint8_t *buffer = reinterpret_cast<uint8_t *>(pkt->data.frame.buf);
     const uint8_t marker = buffer[pkt->data.frame.sz - 1];
     const int mag = ((marker >> 3) & 3) + 1;
     int frames = (marker & 0x7) + 1;
-    const unsigned int index_sz = 2 + mag  * frames;
+    const unsigned int index_sz = 2 + mag * frames;
     // Check for superframe or not.
     // Assume superframe has only one visible frame, the rest being
     // invisible. If superframe index is not found, then there is only
     // one frame.
-    if (!((marker & 0xe0) == 0xc0 &&
-          pkt->data.frame.sz >= index_sz &&
+    if (!((marker & 0xe0) == 0xc0 && pkt->data.frame.sz >= index_sz &&
           buffer[pkt->data.frame.sz - index_sz] == marker)) {
       frames = 1;
     }
@@ -132,8 +124,7 @@ class ArfFreqTest
   }
 
   virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
-    if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
-      return;
+    if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return;
     const int frames = GetNumFramesInPkt(pkt);
     if (frames == 1) {
       run_of_visible_frames_++;
@@ -167,9 +158,7 @@ class ArfFreqTest
     }
   }
 
-  int GetMinVisibleRun() const {
-    return min_run_;
-  }
+  int GetMinVisibleRun() const { return min_run_; }
 
   int GetMinArfDistanceRequested() const {
     if (min_arf_requested_)
@@ -178,7 +167,7 @@ class ArfFreqTest
       return vp10_rc_get_default_min_gf_interval(
           test_video_param_.width, test_video_param_.height,
           (double)test_video_param_.framerate_num /
-          test_video_param_.framerate_den);
+              test_video_param_.framerate_den);
   }
 
   TestVideoParam test_video_param_;
@@ -197,21 +186,18 @@ TEST_P(ArfFreqTest, MinArfFreqTest) {
   cfg_.g_input_bit_depth = test_video_param_.input_bit_depth;
   cfg_.g_bit_depth = test_video_param_.bit_depth;
   init_flags_ = VPX_CODEC_USE_PSNR;
-  if (cfg_.g_bit_depth > 8)
-    init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
+  if (cfg_.g_bit_depth > 8) init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
 
   libvpx_test::VideoSource *video;
   if (is_extension_y4m(test_video_param_.filename)) {
-    video = new libvpx_test::Y4mVideoSource(test_video_param_.filename,
-                                            0, kFrames);
+    video =
+        new libvpx_test::Y4mVideoSource(test_video_param_.filename, 0, kFrames);
   } else {
-    video = new libvpx_test::YUVVideoSource(test_video_param_.filename,
-                                            test_video_param_.fmt,
-                                            test_video_param_.width,
-                                            test_video_param_.height,
-                                            test_video_param_.framerate_num,
-                                            test_video_param_.framerate_den,
-                                            0, kFrames);
+    video = new libvpx_test::YUVVideoSource(
+        test_video_param_.filename, test_video_param_.fmt,
+        test_video_param_.width, test_video_param_.height,
+        test_video_param_.framerate_num, test_video_param_.framerate_den, 0,
+        kFrames);
   }
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(video));
@@ -221,26 +207,23 @@ TEST_P(ArfFreqTest, MinArfFreqTest) {
     const int min_arf_dist = min_run + 1;
     EXPECT_GE(min_arf_dist, min_arf_dist_requested);
   }
-  delete(video);
+  delete (video);
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-# if CONFIG_VP10_ENCODER
+#if CONFIG_VP10_ENCODER
 // TODO(angiebird): 25-29 fail in high bitdepth mode.
 INSTANTIATE_TEST_CASE_P(
     DISABLED_VP10, ArfFreqTest,
     ::testing::Combine(
         ::testing::Values(static_cast<const libvpx_test::CodecFactory *>(
             &libvpx_test::kVP10)),
-        ::testing::ValuesIn(kTestVectors),
-        ::testing::ValuesIn(kEncodeVectors),
+        ::testing::ValuesIn(kTestVectors), ::testing::ValuesIn(kEncodeVectors),
         ::testing::ValuesIn(kMinArfVectors)));
-# endif  // CONFIG_VP10_ENCODER
+#endif  // CONFIG_VP10_ENCODER
 #else
-VP10_INSTANTIATE_TEST_CASE(
-    ArfFreqTest,
-    ::testing::ValuesIn(kTestVectors),
-    ::testing::ValuesIn(kEncodeVectors),
-    ::testing::ValuesIn(kMinArfVectors));
+VP10_INSTANTIATE_TEST_CASE(ArfFreqTest, ::testing::ValuesIn(kTestVectors),
+                           ::testing::ValuesIn(kEncodeVectors),
+                           ::testing::ValuesIn(kMinArfVectors));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 }  // namespace
diff --git a/test/avg_test.cc b/test/avg_test.cc
index 7d5380f7c235ffe8cc43157ba6af984a394a3d69..ae35fb1ce4fad5fbc95093f765fedac138d2744f 100644
--- a/test/avg_test.cc
+++ b/test/avg_test.cc
@@ -31,7 +31,7 @@ class AverageTestBase : public ::testing::Test {
   AverageTestBase(int width, int height) : width_(width), height_(height) {}
 
   static void SetUpTestCase() {
-    source_data_ = reinterpret_cast<uint8_t*>(
+    source_data_ = reinterpret_cast<uint8_t *>(
         vpx_memalign(kDataAlignment, kDataBlockSize));
   }
 
@@ -40,9 +40,7 @@ class AverageTestBase : public ::testing::Test {
     source_data_ = NULL;
   }
 
-  virtual void TearDown() {
-    libvpx_test::ClearSystemState();
-  }
+  virtual void TearDown() { libvpx_test::ClearSystemState(); }
 
  protected:
   // Handle blocks up to 4 blocks 64x64 with stride up to 128
@@ -55,47 +53,44 @@ class AverageTestBase : public ::testing::Test {
   }
 
   // Sum Pixels
-  unsigned int ReferenceAverage8x8(const uint8_t* source, int pitch ) {
+  unsigned int ReferenceAverage8x8(const uint8_t *source, int pitch) {
     unsigned int average = 0;
     for (int h = 0; h < 8; ++h)
-      for (int w = 0; w < 8; ++w)
-        average += source[h * source_stride_ + w];
+      for (int w = 0; w < 8; ++w) average += source[h * source_stride_ + w];
     return ((average + 32) >> 6);
   }
 
-  unsigned int ReferenceAverage4x4(const uint8_t* source, int pitch ) {
+  unsigned int ReferenceAverage4x4(const uint8_t *source, int pitch) {
     unsigned int average = 0;
     for (int h = 0; h < 4; ++h)
-      for (int w = 0; w < 4; ++w)
-        average += source[h * source_stride_ + w];
+      for (int w = 0; w < 4; ++w) average += source[h * source_stride_ + w];
     return ((average + 8) >> 4);
   }
 
   void FillConstant(uint8_t fill_constant) {
     for (int i = 0; i < width_ * height_; ++i) {
-        source_data_[i] = fill_constant;
+      source_data_[i] = fill_constant;
     }
   }
 
   void FillRandom() {
     for (int i = 0; i < width_ * height_; ++i) {
-        source_data_[i] = rnd_.Rand8();
+      source_data_[i] = rnd_.Rand8();
     }
   }
 
   int width_, height_;
-  static uint8_t* source_data_;
+  static uint8_t *source_data_;
   int source_stride_;
 
   ACMRandom rnd_;
 };
-typedef unsigned int (*AverageFunction)(const uint8_t* s, int pitch);
+typedef unsigned int (*AverageFunction)(const uint8_t *s, int pitch);
 
 typedef std::tr1::tuple<int, int, int, int, AverageFunction> AvgFunc;
 
-class AverageTest
-    : public AverageTestBase,
-      public ::testing::WithParamInterface<AvgFunc>{
+class AverageTest : public AverageTestBase,
+                    public ::testing::WithParamInterface<AvgFunc> {
  public:
   AverageTest() : AverageTestBase(GET_PARAM(0), GET_PARAM(1)) {}
 
@@ -103,17 +98,17 @@ class AverageTest
   void CheckAverages() {
     unsigned int expected = 0;
     if (GET_PARAM(3) == 8) {
-      expected = ReferenceAverage8x8(source_data_+ GET_PARAM(2),
-                                     source_stride_);
-    } else  if (GET_PARAM(3) == 4) {
-      expected = ReferenceAverage4x4(source_data_+ GET_PARAM(2),
-                                     source_stride_);
+      expected =
+          ReferenceAverage8x8(source_data_ + GET_PARAM(2), source_stride_);
+    } else if (GET_PARAM(3) == 4) {
+      expected =
+          ReferenceAverage4x4(source_data_ + GET_PARAM(2), source_stride_);
     }
 
-    ASM_REGISTER_STATE_CHECK(GET_PARAM(4)(source_data_+ GET_PARAM(2),
-                                          source_stride_));
-    unsigned int actual = GET_PARAM(4)(source_data_+ GET_PARAM(2),
-                                       source_stride_);
+    ASM_REGISTER_STATE_CHECK(
+        GET_PARAM(4)(source_data_ + GET_PARAM(2), source_stride_));
+    unsigned int actual =
+        GET_PARAM(4)(source_data_ + GET_PARAM(2), source_stride_);
 
     EXPECT_EQ(expected, actual);
   }
@@ -124,23 +119,20 @@ typedef void (*IntProRowFunc)(int16_t hbuf[16], uint8_t const *ref,
 
 typedef std::tr1::tuple<int, IntProRowFunc, IntProRowFunc> IntProRowParam;
 
-class IntProRowTest
-    : public AverageTestBase,
-      public ::testing::WithParamInterface<IntProRowParam> {
+class IntProRowTest : public AverageTestBase,
+                      public ::testing::WithParamInterface<IntProRowParam> {
  public:
   IntProRowTest()
-    : AverageTestBase(16, GET_PARAM(0)),
-      hbuf_asm_(NULL),
-      hbuf_c_(NULL) {
+      : AverageTestBase(16, GET_PARAM(0)), hbuf_asm_(NULL), hbuf_c_(NULL) {
     asm_func_ = GET_PARAM(1);
     c_func_ = GET_PARAM(2);
   }
 
  protected:
   virtual void SetUp() {
-    hbuf_asm_ = reinterpret_cast<int16_t*>(
+    hbuf_asm_ = reinterpret_cast<int16_t *>(
         vpx_memalign(kDataAlignment, sizeof(*hbuf_asm_) * 16));
-    hbuf_c_ = reinterpret_cast<int16_t*>(
+    hbuf_c_ = reinterpret_cast<int16_t *>(
         vpx_memalign(kDataAlignment, sizeof(*hbuf_c_) * 16));
   }
 
@@ -169,9 +161,8 @@ typedef int16_t (*IntProColFunc)(uint8_t const *ref, const int width);
 
 typedef std::tr1::tuple<int, IntProColFunc, IntProColFunc> IntProColParam;
 
-class IntProColTest
-    : public AverageTestBase,
-      public ::testing::WithParamInterface<IntProColParam> {
+class IntProColTest : public AverageTestBase,
+                      public ::testing::WithParamInterface<IntProColParam> {
  public:
   IntProColTest() : AverageTestBase(GET_PARAM(0), 1), sum_asm_(0), sum_c_(0) {
     asm_func_ = GET_PARAM(1);
@@ -195,15 +186,14 @@ class IntProColTest
 typedef int (*SatdFunc)(const int16_t *coeffs, int length);
 typedef std::tr1::tuple<int, SatdFunc> SatdTestParam;
 
-class SatdTest
-    : public ::testing::Test,
-      public ::testing::WithParamInterface<SatdTestParam> {
+class SatdTest : public ::testing::Test,
+                 public ::testing::WithParamInterface<SatdTestParam> {
  protected:
   virtual void SetUp() {
     satd_size_ = GET_PARAM(0);
     satd_func_ = GET_PARAM(1);
     rnd_.Reset(ACMRandom::DeterministicSeed());
-    src_ = reinterpret_cast<int16_t*>(
+    src_ = reinterpret_cast<int16_t *>(
         vpx_memalign(16, sizeof(*src_) * satd_size_));
     ASSERT_TRUE(src_ != NULL);
   }
@@ -235,7 +225,7 @@ class SatdTest
   ACMRandom rnd_;
 };
 
-uint8_t* AverageTestBase::source_data_ = NULL;
+uint8_t *AverageTestBase::source_data_ = NULL;
 
 TEST_P(AverageTest, MinValue) {
   FillConstant(0);
@@ -286,7 +276,6 @@ TEST_P(IntProColTest, Random) {
   RunComparison();
 }
 
-
 TEST_P(SatdTest, MinValue) {
   const int kMin = -32640;
   const int expected = -kMin * satd_size_;
@@ -320,92 +309,86 @@ using std::tr1::make_tuple;
 
 INSTANTIATE_TEST_CASE_P(
     C, AverageTest,
-    ::testing::Values(
-        make_tuple(16, 16, 1, 8, &vpx_avg_8x8_c),
-        make_tuple(16, 16, 1, 4, &vpx_avg_4x4_c)));
+    ::testing::Values(make_tuple(16, 16, 1, 8, &vpx_avg_8x8_c),
+                      make_tuple(16, 16, 1, 4, &vpx_avg_4x4_c)));
 
-INSTANTIATE_TEST_CASE_P(
-    C, SatdTest,
-    ::testing::Values(
-        make_tuple(16, &vpx_satd_c),
-        make_tuple(64, &vpx_satd_c),
-        make_tuple(256, &vpx_satd_c),
-        make_tuple(1024, &vpx_satd_c)));
+INSTANTIATE_TEST_CASE_P(C, SatdTest,
+                        ::testing::Values(make_tuple(16, &vpx_satd_c),
+                                          make_tuple(64, &vpx_satd_c),
+                                          make_tuple(256, &vpx_satd_c),
+                                          make_tuple(1024, &vpx_satd_c)));
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(
     SSE2, AverageTest,
-    ::testing::Values(
-        make_tuple(16, 16, 0, 8, &vpx_avg_8x8_sse2),
-        make_tuple(16, 16, 5, 8, &vpx_avg_8x8_sse2),
-        make_tuple(32, 32, 15, 8, &vpx_avg_8x8_sse2),
-        make_tuple(16, 16, 0, 4, &vpx_avg_4x4_sse2),
-        make_tuple(16, 16, 5, 4, &vpx_avg_4x4_sse2),
-        make_tuple(32, 32, 15, 4, &vpx_avg_4x4_sse2)));
+    ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_sse2),
+                      make_tuple(16, 16, 5, 8, &vpx_avg_8x8_sse2),
+                      make_tuple(32, 32, 15, 8, &vpx_avg_8x8_sse2),
+                      make_tuple(16, 16, 0, 4, &vpx_avg_4x4_sse2),
+                      make_tuple(16, 16, 5, 4, &vpx_avg_4x4_sse2),
+                      make_tuple(32, 32, 15, 4, &vpx_avg_4x4_sse2)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2, IntProRowTest, ::testing::Values(
-        make_tuple(16, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
-        make_tuple(32, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
-        make_tuple(64, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c)));
+    SSE2, IntProRowTest,
+    ::testing::Values(make_tuple(16, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
+                      make_tuple(32, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
+                      make_tuple(64, &vpx_int_pro_row_sse2,
+                                 &vpx_int_pro_row_c)));
 
 INSTANTIATE_TEST_CASE_P(
-    SSE2, IntProColTest, ::testing::Values(
-        make_tuple(16, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
-        make_tuple(32, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
-        make_tuple(64, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c)));
-
-INSTANTIATE_TEST_CASE_P(
-    SSE2, SatdTest,
-    ::testing::Values(
-        make_tuple(16, &vpx_satd_sse2),
-        make_tuple(64, &vpx_satd_sse2),
-        make_tuple(256, &vpx_satd_sse2),
-        make_tuple(1024, &vpx_satd_sse2)));
+    SSE2, IntProColTest,
+    ::testing::Values(make_tuple(16, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
+                      make_tuple(32, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
+                      make_tuple(64, &vpx_int_pro_col_sse2,
+                                 &vpx_int_pro_col_c)));
+
+INSTANTIATE_TEST_CASE_P(SSE2, SatdTest,
+                        ::testing::Values(make_tuple(16, &vpx_satd_sse2),
+                                          make_tuple(64, &vpx_satd_sse2),
+                                          make_tuple(256, &vpx_satd_sse2),
+                                          make_tuple(1024, &vpx_satd_sse2)));
 #endif
 
 #if HAVE_NEON
 INSTANTIATE_TEST_CASE_P(
     NEON, AverageTest,
-    ::testing::Values(
-        make_tuple(16, 16, 0, 8, &vpx_avg_8x8_neon),
-        make_tuple(16, 16, 5, 8, &vpx_avg_8x8_neon),
-        make_tuple(32, 32, 15, 8, &vpx_avg_8x8_neon),
-        make_tuple(16, 16, 0, 4, &vpx_avg_4x4_neon),
-        make_tuple(16, 16, 5, 4, &vpx_avg_4x4_neon),
-        make_tuple(32, 32, 15, 4, &vpx_avg_4x4_neon)));
-
-INSTANTIATE_TEST_CASE_P(
-    NEON, IntProRowTest, ::testing::Values(
-        make_tuple(16, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
-        make_tuple(32, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
-        make_tuple(64, &vpx_int_pro_row_neon, &vpx_int_pro_row_c)));
+    ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_neon),
+                      make_tuple(16, 16, 5, 8, &vpx_avg_8x8_neon),
+                      make_tuple(32, 32, 15, 8, &vpx_avg_8x8_neon),
+                      make_tuple(16, 16, 0, 4, &vpx_avg_4x4_neon),
+                      make_tuple(16, 16, 5, 4, &vpx_avg_4x4_neon),
+                      make_tuple(32, 32, 15, 4, &vpx_avg_4x4_neon)));
 
 INSTANTIATE_TEST_CASE_P(
-    NEON, IntProColTest, ::testing::Values(
-        make_tuple(16, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
-        make_tuple(32, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
-        make_tuple(64, &vpx_int_pro_col_neon, &vpx_int_pro_col_c)));
+    NEON, IntProRowTest,
+    ::testing::Values(make_tuple(16, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
+                      make_tuple(32, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
+                      make_tuple(64, &vpx_int_pro_row_neon,
+                                 &vpx_int_pro_row_c)));
 
 INSTANTIATE_TEST_CASE_P(
-    NEON, SatdTest,
-    ::testing::Values(
-        make_tuple(16, &vpx_satd_neon),
-        make_tuple(64, &vpx_satd_neon),
-        make_tuple(256, &vpx_satd_neon),
-        make_tuple(1024, &vpx_satd_neon)));
+    NEON, IntProColTest,
+    ::testing::Values(make_tuple(16, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
+                      make_tuple(32, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
+                      make_tuple(64, &vpx_int_pro_col_neon,
+                                 &vpx_int_pro_col_c)));
+
+INSTANTIATE_TEST_CASE_P(NEON, SatdTest,
+                        ::testing::Values(make_tuple(16, &vpx_satd_neon),
+                                          make_tuple(64, &vpx_satd_neon),
+                                          make_tuple(256, &vpx_satd_neon),
+                                          make_tuple(1024, &vpx_satd_neon)));
 #endif
 
 #if HAVE_MSA
 INSTANTIATE_TEST_CASE_P(
     MSA, AverageTest,
-    ::testing::Values(
-        make_tuple(16, 16, 0, 8, &vpx_avg_8x8_msa),
-        make_tuple(16, 16, 5, 8, &vpx_avg_8x8_msa),
-        make_tuple(32, 32, 15, 8, &vpx_avg_8x8_msa),
-        make_tuple(16, 16, 0, 4, &vpx_avg_4x4_msa),
-        make_tuple(16, 16, 5, 4, &vpx_avg_4x4_msa),
-        make_tuple(32, 32, 15, 4, &vpx_avg_4x4_msa)));
+    ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_msa),
+                      make_tuple(16, 16, 5, 8, &vpx_avg_8x8_msa),
+                      make_tuple(32, 32, 15, 8, &vpx_avg_8x8_msa),
+                      make_tuple(16, 16, 0, 4, &vpx_avg_4x4_msa),
+                      make_tuple(16, 16, 5, 4, &vpx_avg_4x4_msa),
+                      make_tuple(32, 32, 15, 4, &vpx_avg_4x4_msa)));
 #endif
 
 }  // namespace
diff --git a/test/boolcoder_test.cc b/test/boolcoder_test.cc
index 2bc56f9ef5cc93eff46d26a9ffd61dbff09d249b..2121e95c372c2b9441fd7bd7dc5680bb6c57d050 100644
--- a/test/boolcoder_test.cc
+++ b/test/boolcoder_test.cc
@@ -28,22 +28,33 @@ const int num_tests = 10;
 TEST(VP10, TestBitIO) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   for (int n = 0; n < num_tests; ++n) {
-    for (int method = 0; method <= 7; ++method) {   // we generate various proba
+    for (int method = 0; method <= 7; ++method) {  // we generate various proba
       const int kBitsToTest = 1000;
       uint8_t probas[kBitsToTest];
 
       for (int i = 0; i < kBitsToTest; ++i) {
         const int parity = i & 1;
         probas[i] =
-          (method == 0) ? 0 : (method == 1) ? 255 :
-          (method == 2) ? 128 :
-          (method == 3) ? rnd.Rand8() :
-          (method == 4) ? (parity ? 0 : 255) :
-            // alternate between low and high proba:
-            (method == 5) ? (parity ? rnd(128) : 255 - rnd(128)) :
-            (method == 6) ?
-            (parity ? rnd(64) : 255 - rnd(64)) :
-            (parity ? rnd(32) : 255 - rnd(32));
+            (method == 0)
+                ? 0
+                : (method == 1)
+                      ? 255
+                      : (method == 2)
+                            ? 128
+                            : (method == 3)
+                                  ? rnd.Rand8()
+                                  : (method == 4)
+                                        ? (parity ? 0 : 255)
+                                        :
+                                        // alternate between low and high proba:
+                                        (method == 5)
+                                            ? (parity ? rnd(128)
+                                                      : 255 - rnd(128))
+                                            : (method == 6)
+                                                  ? (parity ? rnd(64)
+                                                            : 255 - rnd(64))
+                                                  : (parity ? rnd(32)
+                                                            : 255 - rnd(32));
       }
       for (int bit_method = 0; bit_method <= 3; ++bit_method) {
         const int random_seed = 6432;
@@ -79,8 +90,7 @@ TEST(VP10, TestBitIO) {
           }
           GTEST_ASSERT_EQ(vpx_read(&br, probas[i]), bit)
               << "pos: " << i << " / " << kBitsToTest
-              << " bit_method: " << bit_method
-              << " method: " << method;
+              << " bit_method: " << bit_method << " method: " << method;
         }
       }
     }
diff --git a/test/borders_test.cc b/test/borders_test.cc
index 00e26a57fc7e44afd08be4930c1e7c72fce4a9c3..9fdb5bf3fabd84507000f508a9a980f29f5f9e8c 100644
--- a/test/borders_test.cc
+++ b/test/borders_test.cc
@@ -17,8 +17,9 @@
 
 namespace {
 
-class BordersTest : public ::libvpx_test::EncoderTest,
-    public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class BordersTest
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
  protected:
   BordersTest() : EncoderTest(GET_PARAM(0)) {}
   virtual ~BordersTest() {}
@@ -78,6 +79,6 @@ TEST_P(BordersTest, TestLowBitrate) {
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(BordersTest, ::testing::Values(
-    ::libvpx_test::kTwoPassGood));
+VP10_INSTANTIATE_TEST_CASE(BordersTest,
+                           ::testing::Values(::libvpx_test::kTwoPassGood));
 }  // namespace
diff --git a/test/clear_system_state.h b/test/clear_system_state.h
index 5e767974431a130173a001f7087ac90453575f7c..044a5c758346356c25006c047005dc1ae70aaaee 100644
--- a/test/clear_system_state.h
+++ b/test/clear_system_state.h
@@ -12,7 +12,7 @@
 
 #include "./vpx_config.h"
 #if ARCH_X86 || ARCH_X86_64
-# include "vpx_ports/x86.h"
+#include "vpx_ports/x86.h"
 #endif
 
 namespace libvpx_test {
diff --git a/test/codec_factory.h b/test/codec_factory.h
index e16dbbe2016a85a00b2842085a08807c3b91156e..ebc84018b27c3991336be66edd2ca555993a69f8 100644
--- a/test/codec_factory.h
+++ b/test/codec_factory.h
@@ -38,14 +38,14 @@ class CodecFactory {
   virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
                                  const vpx_codec_flags_t flags,
                                  unsigned long deadline)  // NOLINT(runtime/int)
-                                 const = 0;
+      const = 0;
 
   virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
                                  unsigned long deadline,
                                  const unsigned long init_flags,
-                                 TwopassStatsStore *stats) const = 0;
+                                 TwopassStatsStore* stats) const = 0;
 
-  virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+  virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t* cfg,
                                                int usage) const = 0;
 };
 
@@ -53,20 +53,20 @@ class CodecFactory {
  * to avoid having to include a pointer to the CodecFactory in every test
  * definition.
  */
-template<class T1>
-class CodecTestWithParam : public ::testing::TestWithParam<
-    std::tr1::tuple< const libvpx_test::CodecFactory*, T1 > > {
-};
+template <class T1>
+class CodecTestWithParam
+    : public ::testing::TestWithParam<
+          std::tr1::tuple<const libvpx_test::CodecFactory*, T1> > {};
 
-template<class T1, class T2>
-class CodecTestWith2Params : public ::testing::TestWithParam<
-    std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2 > > {
-};
+template <class T1, class T2>
+class CodecTestWith2Params
+    : public ::testing::TestWithParam<
+          std::tr1::tuple<const libvpx_test::CodecFactory*, T1, T2> > {};
 
-template<class T1, class T2, class T3>
-class CodecTestWith3Params : public ::testing::TestWithParam<
-    std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2, T3 > > {
-};
+template <class T1, class T2, class T3>
+class CodecTestWith3Params
+    : public ::testing::TestWithParam<
+          std::tr1::tuple<const libvpx_test::CodecFactory*, T1, T2, T3> > {};
 
 /*
  * VP10 Codec Definitions
@@ -94,7 +94,7 @@ class VP10Decoder : public Decoder {
 class VP10Encoder : public Encoder {
  public:
   VP10Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline,
-              const unsigned long init_flags, TwopassStatsStore *stats)
+              const unsigned long init_flags, TwopassStatsStore* stats)
       : Encoder(cfg, deadline, init_flags, stats) {}
 
  protected:
@@ -129,7 +129,7 @@ class VP10CodecFactory : public CodecFactory {
   virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
                                  unsigned long deadline,
                                  const unsigned long init_flags,
-                                 TwopassStatsStore *stats) const {
+                                 TwopassStatsStore* stats) const {
 #if CONFIG_VP10_ENCODER
     return new VP10Encoder(cfg, deadline, init_flags, stats);
 #else
@@ -137,7 +137,7 @@ class VP10CodecFactory : public CodecFactory {
 #endif
   }
 
-  virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+  virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t* cfg,
                                                int usage) const {
 #if CONFIG_VP10_ENCODER
     return vpx_codec_enc_config_default(&vpx_codec_vp10_cx_algo, cfg, usage);
@@ -149,11 +149,12 @@ class VP10CodecFactory : public CodecFactory {
 
 const libvpx_test::VP10CodecFactory kVP10;
 
-#define VP10_INSTANTIATE_TEST_CASE(test, ...)\
-  INSTANTIATE_TEST_CASE_P(VP10, test, \
-      ::testing::Combine( \
+#define VP10_INSTANTIATE_TEST_CASE(test, ...)                              \
+  INSTANTIATE_TEST_CASE_P(                                                 \
+      VP10, test,                                                          \
+      ::testing::Combine(                                                  \
           ::testing::Values(static_cast<const libvpx_test::CodecFactory*>( \
-               &libvpx_test::kVP10)), \
+              &libvpx_test::kVP10)),                                       \
           __VA_ARGS__))
 #else
 #define VP10_INSTANTIATE_TEST_CASE(test, ...)
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index bb2d219674fc31b7ba99574119637b49d8d30d98..88eb6540eb56f14b33f69c540cd7e42c8778732e 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -37,14 +37,12 @@ typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
                              int w, int h);
 
 struct ConvolveFunctions {
-  ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg,
-                    ConvolveFunc h8, ConvolveFunc h8_avg,
-                    ConvolveFunc v8, ConvolveFunc v8_avg,
-                    ConvolveFunc hv8, ConvolveFunc hv8_avg,
-                    ConvolveFunc sh8, ConvolveFunc sh8_avg,
-                    ConvolveFunc sv8, ConvolveFunc sv8_avg,
-                    ConvolveFunc shv8, ConvolveFunc shv8_avg,
-                    int bd)
+  ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg, ConvolveFunc h8,
+                    ConvolveFunc h8_avg, ConvolveFunc v8, ConvolveFunc v8_avg,
+                    ConvolveFunc hv8, ConvolveFunc hv8_avg, ConvolveFunc sh8,
+                    ConvolveFunc sh8_avg, ConvolveFunc sv8,
+                    ConvolveFunc sv8_avg, ConvolveFunc shv8,
+                    ConvolveFunc shv8_avg, int bd)
       : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
         v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8),
         sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg),
@@ -58,12 +56,12 @@ struct ConvolveFunctions {
   ConvolveFunc h8_avg_;
   ConvolveFunc v8_avg_;
   ConvolveFunc hv8_avg_;
-  ConvolveFunc sh8_;        // scaled horiz
-  ConvolveFunc sv8_;        // scaled vert
-  ConvolveFunc shv8_;       // scaled horiz/vert
-  ConvolveFunc sh8_avg_;    // scaled avg horiz
-  ConvolveFunc sv8_avg_;    // scaled avg vert
-  ConvolveFunc shv8_avg_;   // scaled avg horiz/vert
+  ConvolveFunc sh8_;       // scaled horiz
+  ConvolveFunc sv8_;       // scaled vert
+  ConvolveFunc shv8_;      // scaled horiz/vert
+  ConvolveFunc sh8_avg_;   // scaled avg horiz
+  ConvolveFunc sv8_avg_;   // scaled avg vert
+  ConvolveFunc shv8_avg_;  // scaled avg horiz/vert
   int use_highbd_;  // 0 if high bitdepth not used, else the actual bit depth.
 };
 
@@ -72,20 +70,12 @@ typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
 // Reference 8-tap subpixel filter, slightly modified to fit into this test.
 #define VP9_FILTER_WEIGHT 128
 #define VP9_FILTER_SHIFT 7
-uint8_t clip_pixel(int x) {
-  return x < 0 ? 0 :
-         x > 255 ? 255 :
-         x;
-}
+uint8_t clip_pixel(int x) { return x < 0 ? 0 : x > 255 ? 255 : x; }
 
-void filter_block2d_8_c(const uint8_t *src_ptr,
-                        const unsigned int src_stride,
-                        const int16_t *HFilter,
-                        const int16_t *VFilter,
-                        uint8_t *dst_ptr,
-                        unsigned int dst_stride,
-                        unsigned int output_width,
-                        unsigned int output_height) {
+void filter_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride,
+                        const int16_t *HFilter, const int16_t *VFilter,
+                        uint8_t *dst_ptr, unsigned int dst_stride,
+                        unsigned int output_width, unsigned int output_height) {
   // Between passes, we use an intermediate buffer whose height is extended to
   // have enough horizontally filtered values as input for the vertical pass.
   // This buffer is allocated to be big enough for the largest block type we
@@ -112,15 +102,11 @@ void filter_block2d_8_c(const uint8_t *src_ptr,
   for (i = 0; i < intermediate_height; ++i) {
     for (j = 0; j < output_width; ++j) {
       // Apply filter...
-      const int temp = (src_ptr[0] * HFilter[0]) +
-          (src_ptr[1] * HFilter[1]) +
-          (src_ptr[2] * HFilter[2]) +
-          (src_ptr[3] * HFilter[3]) +
-          (src_ptr[4] * HFilter[4]) +
-          (src_ptr[5] * HFilter[5]) +
-          (src_ptr[6] * HFilter[6]) +
-          (src_ptr[7] * HFilter[7]) +
-          (VP9_FILTER_WEIGHT >> 1);  // Rounding
+      const int temp = (src_ptr[0] * HFilter[0]) + (src_ptr[1] * HFilter[1]) +
+                       (src_ptr[2] * HFilter[2]) + (src_ptr[3] * HFilter[3]) +
+                       (src_ptr[4] * HFilter[4]) + (src_ptr[5] * HFilter[5]) +
+                       (src_ptr[6] * HFilter[6]) + (src_ptr[7] * HFilter[7]) +
+                       (VP9_FILTER_WEIGHT >> 1);  // Rounding
 
       // Normalize back to 0-255...
       *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
@@ -137,15 +123,11 @@ void filter_block2d_8_c(const uint8_t *src_ptr,
   for (i = 0; i < output_height; ++i) {
     for (j = 0; j < output_width; ++j) {
       // Apply filter...
-      const int temp = (src_ptr[0] * VFilter[0]) +
-          (src_ptr[1] * VFilter[1]) +
-          (src_ptr[2] * VFilter[2]) +
-          (src_ptr[3] * VFilter[3]) +
-          (src_ptr[4] * VFilter[4]) +
-          (src_ptr[5] * VFilter[5]) +
-          (src_ptr[6] * VFilter[6]) +
-          (src_ptr[7] * VFilter[7]) +
-          (VP9_FILTER_WEIGHT >> 1);  // Rounding
+      const int temp = (src_ptr[0] * VFilter[0]) + (src_ptr[1] * VFilter[1]) +
+                       (src_ptr[2] * VFilter[2]) + (src_ptr[3] * VFilter[3]) +
+                       (src_ptr[4] * VFilter[4]) + (src_ptr[5] * VFilter[5]) +
+                       (src_ptr[6] * VFilter[6]) + (src_ptr[7] * VFilter[7]) +
+                       (VP9_FILTER_WEIGHT >> 1);  // Rounding
 
       // Normalize back to 0-255...
       *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
@@ -156,12 +138,9 @@ void filter_block2d_8_c(const uint8_t *src_ptr,
   }
 }
 
-void block2d_average_c(uint8_t *src,
-                       unsigned int src_stride,
-                       uint8_t *output_ptr,
-                       unsigned int output_stride,
-                       unsigned int output_width,
-                       unsigned int output_height) {
+void block2d_average_c(uint8_t *src, unsigned int src_stride,
+                       uint8_t *output_ptr, unsigned int output_stride,
+                       unsigned int output_width, unsigned int output_height) {
   unsigned int i, j;
   for (i = 0; i < output_height; ++i) {
     for (j = 0; j < output_width; ++j) {
@@ -173,10 +152,8 @@ void block2d_average_c(uint8_t *src,
 
 void filter_average_block2d_8_c(const uint8_t *src_ptr,
                                 const unsigned int src_stride,
-                                const int16_t *HFilter,
-                                const int16_t *VFilter,
-                                uint8_t *dst_ptr,
-                                unsigned int dst_stride,
+                                const int16_t *HFilter, const int16_t *VFilter,
+                                uint8_t *dst_ptr, unsigned int dst_stride,
                                 unsigned int output_width,
                                 unsigned int output_height) {
   uint8_t tmp[kMaxDimension * kMaxDimension];
@@ -185,20 +162,16 @@ void filter_average_block2d_8_c(const uint8_t *src_ptr,
   assert(output_height <= kMaxDimension);
   filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
                      output_width, output_height);
-  block2d_average_c(tmp, 64, dst_ptr, dst_stride,
-                    output_width, output_height);
+  block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width, output_height);
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
 void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
                                const unsigned int src_stride,
-                               const int16_t *HFilter,
-                               const int16_t *VFilter,
-                               uint16_t *dst_ptr,
-                               unsigned int dst_stride,
+                               const int16_t *HFilter, const int16_t *VFilter,
+                               uint16_t *dst_ptr, unsigned int dst_stride,
                                unsigned int output_width,
-                               unsigned int output_height,
-                               int bd) {
+                               unsigned int output_height, int bd) {
   // Between passes, we use an intermediate buffer whose height is extended to
   // have enough horizontally filtered values as input for the vertical pass.
   // This buffer is allocated to be big enough for the largest block type we
@@ -226,14 +199,10 @@ void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
     for (i = 0; i < intermediate_height; ++i) {
       for (j = 0; j < output_width; ++j) {
         // Apply filter...
-        const int temp = (src_ptr[0] * HFilter[0]) +
-                         (src_ptr[1] * HFilter[1]) +
-                         (src_ptr[2] * HFilter[2]) +
-                         (src_ptr[3] * HFilter[3]) +
-                         (src_ptr[4] * HFilter[4]) +
-                         (src_ptr[5] * HFilter[5]) +
-                         (src_ptr[6] * HFilter[6]) +
-                         (src_ptr[7] * HFilter[7]) +
+        const int temp = (src_ptr[0] * HFilter[0]) + (src_ptr[1] * HFilter[1]) +
+                         (src_ptr[2] * HFilter[2]) + (src_ptr[3] * HFilter[3]) +
+                         (src_ptr[4] * HFilter[4]) + (src_ptr[5] * HFilter[5]) +
+                         (src_ptr[6] * HFilter[6]) + (src_ptr[7] * HFilter[7]) +
                          (VP9_FILTER_WEIGHT >> 1);  // Rounding
 
         // Normalize back to 0-255...
@@ -254,14 +223,10 @@ void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
     for (i = 0; i < output_height; ++i) {
       for (j = 0; j < output_width; ++j) {
         // Apply filter...
-        const int temp = (src_ptr[0] * VFilter[0]) +
-                         (src_ptr[1] * VFilter[1]) +
-                         (src_ptr[2] * VFilter[2]) +
-                         (src_ptr[3] * VFilter[3]) +
-                         (src_ptr[4] * VFilter[4]) +
-                         (src_ptr[5] * VFilter[5]) +
-                         (src_ptr[6] * VFilter[6]) +
-                         (src_ptr[7] * VFilter[7]) +
+        const int temp = (src_ptr[0] * VFilter[0]) + (src_ptr[1] * VFilter[1]) +
+                         (src_ptr[2] * VFilter[2]) + (src_ptr[3] * VFilter[3]) +
+                         (src_ptr[4] * VFilter[4]) + (src_ptr[5] * VFilter[5]) +
+                         (src_ptr[6] * VFilter[6]) + (src_ptr[7] * VFilter[7]) +
                          (VP9_FILTER_WEIGHT >> 1);  // Rounding
 
         // Normalize back to 0-255...
@@ -274,13 +239,10 @@ void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
   }
 }
 
-void highbd_block2d_average_c(uint16_t *src,
-                              unsigned int src_stride,
-                              uint16_t *output_ptr,
-                              unsigned int output_stride,
+void highbd_block2d_average_c(uint16_t *src, unsigned int src_stride,
+                              uint16_t *output_ptr, unsigned int output_stride,
                               unsigned int output_width,
-                              unsigned int output_height,
-                              int bd) {
+                              unsigned int output_height, int bd) {
   unsigned int i, j;
   for (i = 0; i < output_height; ++i) {
     for (j = 0; j < output_width; ++j) {
@@ -290,23 +252,19 @@ void highbd_block2d_average_c(uint16_t *src,
   }
 }
 
-void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr,
-                                       const unsigned int src_stride,
-                                       const int16_t *HFilter,
-                                       const int16_t *VFilter,
-                                       uint16_t *dst_ptr,
-                                       unsigned int dst_stride,
-                                       unsigned int output_width,
-                                       unsigned int output_height,
-                                       int bd) {
+void highbd_filter_average_block2d_8_c(
+    const uint16_t *src_ptr, const unsigned int src_stride,
+    const int16_t *HFilter, const int16_t *VFilter, uint16_t *dst_ptr,
+    unsigned int dst_stride, unsigned int output_width,
+    unsigned int output_height, int bd) {
   uint16_t tmp[kMaxDimension * kMaxDimension];
 
   assert(output_width <= kMaxDimension);
   assert(output_height <= kMaxDimension);
   highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
                             output_width, output_height, bd);
-  highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride,
-                           output_width, output_height, bd);
+  highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width,
+                           output_height, bd);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
@@ -314,19 +272,20 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
  public:
   static void SetUpTestCase() {
     // Force input_ to be unaligned, output to be 16 byte aligned.
-    input_ = reinterpret_cast<uint8_t*>(
-        vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
-    output_ = reinterpret_cast<uint8_t*>(
+    input_ = reinterpret_cast<uint8_t *>(
+                 vpx_memalign(kDataAlignment, kInputBufferSize + 1)) +
+             1;
+    output_ = reinterpret_cast<uint8_t *>(
         vpx_memalign(kDataAlignment, kOutputBufferSize));
-    output_ref_ = reinterpret_cast<uint8_t*>(
+    output_ref_ = reinterpret_cast<uint8_t *>(
         vpx_memalign(kDataAlignment, kOutputBufferSize));
 #if CONFIG_VPX_HIGHBITDEPTH
-    input16_ = reinterpret_cast<uint16_t*>(
-        vpx_memalign(kDataAlignment,
-                     (kInputBufferSize + 1) * sizeof(uint16_t))) + 1;
-    output16_ = reinterpret_cast<uint16_t*>(
+    input16_ = reinterpret_cast<uint16_t *>(vpx_memalign(
+                   kDataAlignment, (kInputBufferSize + 1) * sizeof(uint16_t))) +
+               1;
+    output16_ = reinterpret_cast<uint16_t *>(
         vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
-    output16_ref_ = reinterpret_cast<uint16_t*>(
+    output16_ref_ = reinterpret_cast<uint16_t *>(
         vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
 #endif
   }
@@ -421,8 +380,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
 
   void CheckGuardBlocks() {
     for (int i = 0; i < kOutputBufferSize; ++i) {
-      if (IsIndexInBorder(i))
-        EXPECT_EQ(255, output_[i]);
+      if (IsIndexInBorder(i)) EXPECT_EQ(255, output_[i]);
     }
   }
 
@@ -480,98 +438,88 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
   void assign_val(uint8_t *list, int index, uint16_t val) const {
 #if CONFIG_VPX_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
-      list[index] = (uint8_t) val;
+      list[index] = (uint8_t)val;
     } else {
       CONVERT_TO_SHORTPTR(list)[index] = val;
     }
 #else
-    list[index] = (uint8_t) val;
+    list[index] = (uint8_t)val;
 #endif
   }
 
-  void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
-                                          const unsigned int src_stride,
-                                          const int16_t *HFilter,
-                                          const int16_t *VFilter,
-                                          uint8_t *dst_ptr,
-                                          unsigned int dst_stride,
-                                          unsigned int output_width,
-                                          unsigned int output_height) {
+  void wrapper_filter_average_block2d_8_c(
+      const uint8_t *src_ptr, const unsigned int src_stride,
+      const int16_t *HFilter, const int16_t *VFilter, uint8_t *dst_ptr,
+      unsigned int dst_stride, unsigned int output_width,
+      unsigned int output_height) {
 #if CONFIG_VPX_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
-      filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
-                                 dst_ptr, dst_stride, output_width,
-                                 output_height);
+      filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
+                                 dst_stride, output_width, output_height);
     } else {
-      highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
-                                        src_stride, HFilter, VFilter,
-                                        CONVERT_TO_SHORTPTR(dst_ptr),
-                                        dst_stride, output_width, output_height,
-                                        UUT_->use_highbd_);
+      highbd_filter_average_block2d_8_c(
+          CONVERT_TO_SHORTPTR(src_ptr), src_stride, HFilter, VFilter,
+          CONVERT_TO_SHORTPTR(dst_ptr), dst_stride, output_width, output_height,
+          UUT_->use_highbd_);
     }
 #else
-    filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
-                               dst_ptr, dst_stride, output_width,
-                               output_height);
+    filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
+                               dst_stride, output_width, output_height);
 #endif
   }
 
   void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
                                   const unsigned int src_stride,
                                   const int16_t *HFilter,
-                                  const int16_t *VFilter,
-                                  uint8_t *dst_ptr,
+                                  const int16_t *VFilter, uint8_t *dst_ptr,
                                   unsigned int dst_stride,
                                   unsigned int output_width,
                                   unsigned int output_height) {
 #if CONFIG_VPX_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
-      filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
-                         dst_ptr, dst_stride, output_width, output_height);
+      filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
+                         dst_stride, output_width, output_height);
     } else {
       highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
-                                HFilter, VFilter,
-                                CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
-                                output_width, output_height, UUT_->use_highbd_);
+                                HFilter, VFilter, CONVERT_TO_SHORTPTR(dst_ptr),
+                                dst_stride, output_width, output_height,
+                                UUT_->use_highbd_);
     }
 #else
-    filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
-                       dst_ptr, dst_stride, output_width, output_height);
+    filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
+                       dst_stride, output_width, output_height);
 #endif
   }
 
-  const ConvolveFunctions* UUT_;
-  static uint8_t* input_;
-  static uint8_t* output_;
-  static uint8_t* output_ref_;
+  const ConvolveFunctions *UUT_;
+  static uint8_t *input_;
+  static uint8_t *output_;
+  static uint8_t *output_ref_;
 #if CONFIG_VPX_HIGHBITDEPTH
-  static uint16_t* input16_;
-  static uint16_t* output16_;
-  static uint16_t* output16_ref_;
+  static uint16_t *input16_;
+  static uint16_t *output16_;
+  static uint16_t *output16_ref_;
   int mask_;
 #endif
 };
 
-uint8_t* ConvolveTest::input_ = NULL;
-uint8_t* ConvolveTest::output_ = NULL;
-uint8_t* ConvolveTest::output_ref_ = NULL;
+uint8_t *ConvolveTest::input_ = NULL;
+uint8_t *ConvolveTest::output_ = NULL;
+uint8_t *ConvolveTest::output_ref_ = NULL;
 #if CONFIG_VPX_HIGHBITDEPTH
-uint16_t* ConvolveTest::input16_ = NULL;
-uint16_t* ConvolveTest::output16_ = NULL;
-uint16_t* ConvolveTest::output16_ref_ = NULL;
+uint16_t *ConvolveTest::input16_ = NULL;
+uint16_t *ConvolveTest::output16_ = NULL;
+uint16_t *ConvolveTest::output16_ref_ = NULL;
 #endif
 
-TEST_P(ConvolveTest, GuardBlocks) {
-  CheckGuardBlocks();
-}
+TEST_P(ConvolveTest, GuardBlocks) { CheckGuardBlocks(); }
 
 TEST_P(ConvolveTest, Copy) {
-  uint8_t* const in = input();
-  uint8_t* const out = output();
+  uint8_t *const in = input();
+  uint8_t *const out = output();
 
-  ASM_REGISTER_STATE_CHECK(
-      UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
-                  Width(), Height()));
+  ASM_REGISTER_STATE_CHECK(UUT_->copy_(in, kInputStride, out, kOutputStride,
+                                       NULL, 0, NULL, 0, Width(), Height()));
 
   CheckGuardBlocks();
 
@@ -583,14 +531,13 @@ TEST_P(ConvolveTest, Copy) {
 }
 
 TEST_P(ConvolveTest, Avg) {
-  uint8_t* const in = input();
-  uint8_t* const out = output();
-  uint8_t* const out_ref = output_ref();
+  uint8_t *const in = input();
+  uint8_t *const out = output();
+  uint8_t *const out_ref = output_ref();
   CopyOutputToRef();
 
-  ASM_REGISTER_STATE_CHECK(
-      UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
-                Width(), Height()));
+  ASM_REGISTER_STATE_CHECK(UUT_->avg_(in, kInputStride, out, kOutputStride,
+                                      NULL, 0, NULL, 0, Width(), Height()));
 
   CheckGuardBlocks();
 
@@ -598,18 +545,20 @@ TEST_P(ConvolveTest, Avg) {
     for (int x = 0; x < Width(); ++x)
       ASSERT_EQ(lookup(out, y * kOutputStride + x),
                 ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
-                                   lookup(out_ref, y * kOutputStride + x), 1))
+                                       lookup(out_ref, y * kOutputStride + x),
+                                   1))
           << "(" << x << "," << y << ")";
 }
 
 TEST_P(ConvolveTest, CopyHoriz) {
-  uint8_t* const in = input();
-  uint8_t* const out = output();
-  DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+  uint8_t *const in = input();
+  uint8_t *const out = output();
+  DECLARE_ALIGNED(256, const int16_t,
+                  filter8[8]) = { 0, 0, 0, 128, 0, 0, 0, 0 };
 
-  ASM_REGISTER_STATE_CHECK(
-      UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
-                 Width(), Height()));
+  ASM_REGISTER_STATE_CHECK(UUT_->sh8_(in, kInputStride, out, kOutputStride,
+                                      filter8, 16, filter8, 16, Width(),
+                                      Height()));
 
   CheckGuardBlocks();
 
@@ -621,13 +570,14 @@ TEST_P(ConvolveTest, CopyHoriz) {
 }
 
 TEST_P(ConvolveTest, CopyVert) {
-  uint8_t* const in = input();
-  uint8_t* const out = output();
-  DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+  uint8_t *const in = input();
+  uint8_t *const out = output();
+  DECLARE_ALIGNED(256, const int16_t,
+                  filter8[8]) = { 0, 0, 0, 128, 0, 0, 0, 0 };
 
-  ASM_REGISTER_STATE_CHECK(
-      UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
-                 Width(), Height()));
+  ASM_REGISTER_STATE_CHECK(UUT_->sv8_(in, kInputStride, out, kOutputStride,
+                                      filter8, 16, filter8, 16, Width(),
+                                      Height()));
 
   CheckGuardBlocks();
 
@@ -639,13 +589,14 @@ TEST_P(ConvolveTest, CopyVert) {
 }
 
 TEST_P(ConvolveTest, Copy2D) {
-  uint8_t* const in = input();
-  uint8_t* const out = output();
-  DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+  uint8_t *const in = input();
+  uint8_t *const out = output();
+  DECLARE_ALIGNED(256, const int16_t,
+                  filter8[8]) = { 0, 0, 0, 128, 0, 0, 0, 0 };
 
-  ASM_REGISTER_STATE_CHECK(
-      UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8,
-                  16, Width(), Height()));
+  ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
+                                       filter8, 16, filter8, 16, Width(),
+                                       Height()));
 
   CheckGuardBlocks();
 
@@ -683,12 +634,12 @@ TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
 const int16_t kInvalidFilter[8] = { 0 };
 
 TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
-  uint8_t* const in = input();
-  uint8_t* const out = output();
+  uint8_t *const in = input();
+  uint8_t *const out = output();
 #if CONFIG_VPX_HIGHBITDEPTH
   uint8_t ref8[kOutputStride * kMaxDimension];
   uint16_t ref16[kOutputStride * kMaxDimension];
-  uint8_t* ref;
+  uint8_t *ref;
   if (UUT_->use_highbd_ == 0) {
     ref = ref8;
   } else {
@@ -704,31 +655,26 @@ TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
 
     for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
       for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
-        wrapper_filter_block2d_8_c(in, kInputStride,
-                                   filters[filter_x], filters[filter_y],
-                                   ref, kOutputStride,
+        wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x],
+                                   filters[filter_y], ref, kOutputStride,
                                    Width(), Height());
 
         if (filter_x && filter_y)
-          ASM_REGISTER_STATE_CHECK(
-              UUT_->hv8_(in, kInputStride, out, kOutputStride,
-                         filters[filter_x], 16, filters[filter_y], 16,
-                         Width(), Height()));
+          ASM_REGISTER_STATE_CHECK(UUT_->hv8_(
+              in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+              filters[filter_y], 16, Width(), Height()));
         else if (filter_y)
           ASM_REGISTER_STATE_CHECK(
-              UUT_->v8_(in, kInputStride, out, kOutputStride,
-                        kInvalidFilter, 16, filters[filter_y], 16,
-                        Width(), Height()));
+              UUT_->v8_(in, kInputStride, out, kOutputStride, kInvalidFilter,
+                        16, filters[filter_y], 16, Width(), Height()));
         else if (filter_x)
           ASM_REGISTER_STATE_CHECK(
-              UUT_->h8_(in, kInputStride, out, kOutputStride,
-                        filters[filter_x], 16, kInvalidFilter, 16,
-                        Width(), Height()));
+              UUT_->h8_(in, kInputStride, out, kOutputStride, filters[filter_x],
+                        16, kInvalidFilter, 16, Width(), Height()));
         else
           ASM_REGISTER_STATE_CHECK(
-              UUT_->copy_(in, kInputStride, out, kOutputStride,
-                          kInvalidFilter, 0, kInvalidFilter, 0,
-                          Width(), Height()));
+              UUT_->copy_(in, kInputStride, out, kOutputStride, kInvalidFilter,
+                          0, kInvalidFilter, 0, Width(), Height()));
 
         CheckGuardBlocks();
 
@@ -737,20 +683,20 @@ TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
             ASSERT_EQ(lookup(ref, y * kOutputStride + x),
                       lookup(out, y * kOutputStride + x))
                 << "mismatch at (" << x << "," << y << "), "
-                << "filters (" << filter_bank << ","
-                << filter_x << "," << filter_y << ")";
+                << "filters (" << filter_bank << "," << filter_x << ","
+                << filter_y << ")";
       }
     }
   }
 }
 
 TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
-  uint8_t* const in = input();
-  uint8_t* const out = output();
+  uint8_t *const in = input();
+  uint8_t *const out = output();
 #if CONFIG_VPX_HIGHBITDEPTH
   uint8_t ref8[kOutputStride * kMaxDimension];
   uint16_t ref16[kOutputStride * kMaxDimension];
-  uint8_t* ref;
+  uint8_t *ref;
   if (UUT_->use_highbd_ == 0) {
     ref = ref8;
   } else {
@@ -786,31 +732,26 @@ TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
 
     for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
       for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
-        wrapper_filter_average_block2d_8_c(in, kInputStride,
-                                           filters[filter_x], filters[filter_y],
-                                           ref, kOutputStride,
-                                           Width(), Height());
+        wrapper_filter_average_block2d_8_c(in, kInputStride, filters[filter_x],
+                                           filters[filter_y], ref,
+                                           kOutputStride, Width(), Height());
 
         if (filter_x && filter_y)
-          ASM_REGISTER_STATE_CHECK(
-              UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
-                             filters[filter_x], 16, filters[filter_y], 16,
-                             Width(), Height()));
+          ASM_REGISTER_STATE_CHECK(UUT_->hv8_avg_(
+              in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+              filters[filter_y], 16, Width(), Height()));
         else if (filter_y)
-          ASM_REGISTER_STATE_CHECK(
-              UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
-                            kInvalidFilter, 16, filters[filter_y], 16,
-                            Width(), Height()));
+          ASM_REGISTER_STATE_CHECK(UUT_->v8_avg_(
+              in, kInputStride, out, kOutputStride, kInvalidFilter, 16,
+              filters[filter_y], 16, Width(), Height()));
         else if (filter_x)
-          ASM_REGISTER_STATE_CHECK(
-              UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
-                            filters[filter_x], 16, kInvalidFilter, 16,
-                            Width(), Height()));
+          ASM_REGISTER_STATE_CHECK(UUT_->h8_avg_(
+              in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+              kInvalidFilter, 16, Width(), Height()));
         else
           ASM_REGISTER_STATE_CHECK(
-              UUT_->avg_(in, kInputStride, out, kOutputStride,
-                          kInvalidFilter, 0, kInvalidFilter, 0,
-                          Width(), Height()));
+              UUT_->avg_(in, kInputStride, out, kOutputStride, kInvalidFilter,
+                         0, kInvalidFilter, 0, Width(), Height()));
 
         CheckGuardBlocks();
 
@@ -819,8 +760,8 @@ TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
             ASSERT_EQ(lookup(ref, y * kOutputStride + x),
                       lookup(out, y * kOutputStride + x))
                 << "mismatch at (" << x << "," << y << "), "
-                << "filters (" << filter_bank << ","
-                << filter_x << "," << filter_y << ")";
+                << "filters (" << filter_bank << "," << filter_x << ","
+                << filter_y << ")";
       }
     }
   }
@@ -867,16 +808,16 @@ TEST_P(ConvolveTest, FilterExtremes) {
       for (int y = 0; y < 8; ++y) {
         for (int x = 0; x < 8; ++x) {
 #if CONFIG_VPX_HIGHBITDEPTH
-            assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
-                       ((seed_val >> (axis ? y : x)) & 1) * mask_);
+          assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+                     ((seed_val >> (axis ? y : x)) & 1) * mask_);
 #else
-            assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
-                       ((seed_val >> (axis ? y : x)) & 1) * 255);
+          assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+                     ((seed_val >> (axis ? y : x)) & 1) * 255);
 #endif
           if (axis) seed_val++;
         }
         if (axis)
-          seed_val-= 8;
+          seed_val -= 8;
         else
           seed_val++;
       }
@@ -887,38 +828,33 @@ TEST_P(ConvolveTest, FilterExtremes) {
             vp10_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
         for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
           for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
-            wrapper_filter_block2d_8_c(in, kInputStride,
-                                       filters[filter_x], filters[filter_y],
-                                       ref, kOutputStride,
+            wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x],
+                                       filters[filter_y], ref, kOutputStride,
                                        Width(), Height());
             if (filter_x && filter_y)
-              ASM_REGISTER_STATE_CHECK(
-                  UUT_->hv8_(in, kInputStride, out, kOutputStride,
-                             filters[filter_x], 16, filters[filter_y], 16,
-                             Width(), Height()));
+              ASM_REGISTER_STATE_CHECK(UUT_->hv8_(
+                  in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+                  filters[filter_y], 16, Width(), Height()));
             else if (filter_y)
-              ASM_REGISTER_STATE_CHECK(
-                  UUT_->v8_(in, kInputStride, out, kOutputStride,
-                            kInvalidFilter, 16, filters[filter_y], 16,
-                            Width(), Height()));
+              ASM_REGISTER_STATE_CHECK(UUT_->v8_(
+                  in, kInputStride, out, kOutputStride, kInvalidFilter, 16,
+                  filters[filter_y], 16, Width(), Height()));
             else if (filter_x)
-              ASM_REGISTER_STATE_CHECK(
-                  UUT_->h8_(in, kInputStride, out, kOutputStride,
-                            filters[filter_x], 16, kInvalidFilter, 16,
-                            Width(), Height()));
+              ASM_REGISTER_STATE_CHECK(UUT_->h8_(
+                  in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+                  kInvalidFilter, 16, Width(), Height()));
             else
-              ASM_REGISTER_STATE_CHECK(
-                  UUT_->copy_(in, kInputStride, out, kOutputStride,
-                              kInvalidFilter, 0, kInvalidFilter, 0,
-                              Width(), Height()));
+              ASM_REGISTER_STATE_CHECK(UUT_->copy_(
+                  in, kInputStride, out, kOutputStride, kInvalidFilter, 0,
+                  kInvalidFilter, 0, Width(), Height()));
 
             for (int y = 0; y < Height(); ++y)
               for (int x = 0; x < Width(); ++x)
                 ASSERT_EQ(lookup(ref, y * kOutputStride + x),
                           lookup(out, y * kOutputStride + x))
                     << "mismatch at (" << x << "," << y << "), "
-                    << "filters (" << filter_bank << ","
-                    << filter_x << "," << filter_y << ")";
+                    << "filters (" << filter_bank << "," << filter_x << ","
+                    << filter_y << ")";
           }
         }
       }
@@ -929,8 +865,8 @@ TEST_P(ConvolveTest, FilterExtremes) {
 /* This test exercises that enough rows and columns are filtered with every
    possible initial fractional positions and scaling steps. */
 TEST_P(ConvolveTest, CheckScalingFiltering) {
-  uint8_t* const in = input();
-  uint8_t* const out = output();
+  uint8_t *const in = input();
+  uint8_t *const out = output();
   const InterpKernel *const eighttap = vp10_filter_kernels[EIGHTTAP];
 
   SetConstantInput(127);
@@ -939,9 +875,8 @@ TEST_P(ConvolveTest, CheckScalingFiltering) {
     for (int step = 1; step <= 32; ++step) {
       /* Test the horizontal and vertical filters in combination. */
       ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
-                                           eighttap[frac], step,
-                                           eighttap[frac], step,
-                                           Width(), Height()));
+                                           eighttap[frac], step, eighttap[frac],
+                                           step, Width(), Height()));
 
       CheckGuardBlocks();
 
@@ -949,8 +884,8 @@ TEST_P(ConvolveTest, CheckScalingFiltering) {
         for (int x = 0; x < Width(); ++x) {
           ASSERT_EQ(lookup(in, y * kInputStride + x),
                     lookup(out, y * kOutputStride + x))
-              << "x == " << x << ", y == " << y
-              << ", frac == " << frac << ", step == " << step;
+              << "x == " << x << ", y == " << y << ", frac == " << frac
+              << ", step == " << step;
         }
       }
     }
@@ -960,18 +895,14 @@ TEST_P(ConvolveTest, CheckScalingFiltering) {
 using std::tr1::make_tuple;
 
 #if CONFIG_VPX_HIGHBITDEPTH
-#define WRAP(func, bd) \
-void wrap_ ## func ## _ ## bd(const uint8_t *src, ptrdiff_t src_stride, \
-                              uint8_t *dst, ptrdiff_t dst_stride, \
-                              const int16_t *filter_x, \
-                              int filter_x_stride, \
-                              const int16_t *filter_y, \
-                              int filter_y_stride, \
-                              int w, int h) { \
-  vpx_highbd_ ## func(src, src_stride, dst, dst_stride, filter_x, \
-                      filter_x_stride, filter_y, filter_y_stride, \
-                      w, h, bd); \
-}
+#define WRAP(func, bd)                                                       \
+  void wrap_##func##_##bd(                                                   \
+      const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                \
+      ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride,    \
+      const int16_t *filter_y, int filter_y_stride, int w, int h) {          \
+    vpx_highbd_##func(src, src_stride, dst, dst_stride, filter_x,            \
+                      filter_x_stride, filter_y, filter_y_stride, w, h, bd); \
+  }
 #if HAVE_SSE2 && ARCH_X86_64
 #if CONFIG_USE_X86INC
 WRAP(convolve_copy_sse2, 8)
@@ -1028,97 +959,76 @@ WRAP(convolve8_avg_c, 12)
 #undef WRAP
 
 const ConvolveFunctions convolve8_c(
-    wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
-    wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
-    wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
-    wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
+    wrap_convolve_copy_c_8, wrap_convolve_avg_c_8, wrap_convolve8_horiz_c_8,
+    wrap_convolve8_avg_horiz_c_8, wrap_convolve8_vert_c_8,
+    wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
     wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
-    wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
-    wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
-INSTANTIATE_TEST_CASE_P(C_8, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve8_c),
-    make_tuple(8, 4, &convolve8_c),
-    make_tuple(4, 8, &convolve8_c),
-    make_tuple(8, 8, &convolve8_c),
-    make_tuple(16, 8, &convolve8_c),
-    make_tuple(8, 16, &convolve8_c),
-    make_tuple(16, 16, &convolve8_c),
-    make_tuple(32, 16, &convolve8_c),
-    make_tuple(16, 32, &convolve8_c),
-    make_tuple(32, 32, &convolve8_c),
-    make_tuple(64, 32, &convolve8_c),
-    make_tuple(32, 64, &convolve8_c),
-    make_tuple(64, 64, &convolve8_c)));
+    wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8,
+    wrap_convolve8_avg_c_8, 8);
+INSTANTIATE_TEST_CASE_P(
+    C_8, ConvolveTest,
+    ::testing::Values(
+        make_tuple(4, 4, &convolve8_c), make_tuple(8, 4, &convolve8_c),
+        make_tuple(4, 8, &convolve8_c), make_tuple(8, 8, &convolve8_c),
+        make_tuple(16, 8, &convolve8_c), make_tuple(8, 16, &convolve8_c),
+        make_tuple(16, 16, &convolve8_c), make_tuple(32, 16, &convolve8_c),
+        make_tuple(16, 32, &convolve8_c), make_tuple(32, 32, &convolve8_c),
+        make_tuple(64, 32, &convolve8_c), make_tuple(32, 64, &convolve8_c),
+        make_tuple(64, 64, &convolve8_c)));
 const ConvolveFunctions convolve10_c(
-    wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+    wrap_convolve_copy_c_10, wrap_convolve_avg_c_10, wrap_convolve8_horiz_c_10,
+    wrap_convolve8_avg_horiz_c_10, wrap_convolve8_vert_c_10,
+    wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
     wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
-    wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
-    wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
-    wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
-    wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
-    wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10);
-INSTANTIATE_TEST_CASE_P(C_10, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve10_c),
-    make_tuple(8, 4, &convolve10_c),
-    make_tuple(4, 8, &convolve10_c),
-    make_tuple(8, 8, &convolve10_c),
-    make_tuple(16, 8, &convolve10_c),
-    make_tuple(8, 16, &convolve10_c),
-    make_tuple(16, 16, &convolve10_c),
-    make_tuple(32, 16, &convolve10_c),
-    make_tuple(16, 32, &convolve10_c),
-    make_tuple(32, 32, &convolve10_c),
-    make_tuple(64, 32, &convolve10_c),
-    make_tuple(32, 64, &convolve10_c),
-    make_tuple(64, 64, &convolve10_c)));
+    wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10,
+    wrap_convolve8_avg_c_10, 10);
+INSTANTIATE_TEST_CASE_P(
+    C_10, ConvolveTest,
+    ::testing::Values(
+        make_tuple(4, 4, &convolve10_c), make_tuple(8, 4, &convolve10_c),
+        make_tuple(4, 8, &convolve10_c), make_tuple(8, 8, &convolve10_c),
+        make_tuple(16, 8, &convolve10_c), make_tuple(8, 16, &convolve10_c),
+        make_tuple(16, 16, &convolve10_c), make_tuple(32, 16, &convolve10_c),
+        make_tuple(16, 32, &convolve10_c), make_tuple(32, 32, &convolve10_c),
+        make_tuple(64, 32, &convolve10_c), make_tuple(32, 64, &convolve10_c),
+        make_tuple(64, 64, &convolve10_c)));
 const ConvolveFunctions convolve12_c(
-    wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
-    wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
-    wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
-    wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
+    wrap_convolve_copy_c_12, wrap_convolve_avg_c_12, wrap_convolve8_horiz_c_12,
+    wrap_convolve8_avg_horiz_c_12, wrap_convolve8_vert_c_12,
+    wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
     wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
-    wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
-    wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12);
-INSTANTIATE_TEST_CASE_P(C_12, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve12_c),
-    make_tuple(8, 4, &convolve12_c),
-    make_tuple(4, 8, &convolve12_c),
-    make_tuple(8, 8, &convolve12_c),
-    make_tuple(16, 8, &convolve12_c),
-    make_tuple(8, 16, &convolve12_c),
-    make_tuple(16, 16, &convolve12_c),
-    make_tuple(32, 16, &convolve12_c),
-    make_tuple(16, 32, &convolve12_c),
-    make_tuple(32, 32, &convolve12_c),
-    make_tuple(64, 32, &convolve12_c),
-    make_tuple(32, 64, &convolve12_c),
-    make_tuple(64, 64, &convolve12_c)));
+    wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12,
+    wrap_convolve8_avg_c_12, 12);
+INSTANTIATE_TEST_CASE_P(
+    C_12, ConvolveTest,
+    ::testing::Values(
+        make_tuple(4, 4, &convolve12_c), make_tuple(8, 4, &convolve12_c),
+        make_tuple(4, 8, &convolve12_c), make_tuple(8, 8, &convolve12_c),
+        make_tuple(16, 8, &convolve12_c), make_tuple(8, 16, &convolve12_c),
+        make_tuple(16, 16, &convolve12_c), make_tuple(32, 16, &convolve12_c),
+        make_tuple(16, 32, &convolve12_c), make_tuple(32, 32, &convolve12_c),
+        make_tuple(64, 32, &convolve12_c), make_tuple(32, 64, &convolve12_c),
+        make_tuple(64, 64, &convolve12_c)));
 
 #else
 
 const ConvolveFunctions convolve8_c(
-    vpx_convolve_copy_c, vpx_convolve_avg_c,
-    vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c,
-    vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
-    vpx_convolve8_c, vpx_convolve8_avg_c,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
-    vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+    vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_c,
+    vpx_convolve8_avg_horiz_c, vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+    vpx_convolve8_c, vpx_convolve8_avg_c, vpx_scaled_horiz_c,
+    vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
     vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
 
-INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve8_c),
-    make_tuple(8, 4, &convolve8_c),
-    make_tuple(4, 8, &convolve8_c),
-    make_tuple(8, 8, &convolve8_c),
-    make_tuple(16, 8, &convolve8_c),
-    make_tuple(8, 16, &convolve8_c),
-    make_tuple(16, 16, &convolve8_c),
-    make_tuple(32, 16, &convolve8_c),
-    make_tuple(16, 32, &convolve8_c),
-    make_tuple(32, 32, &convolve8_c),
-    make_tuple(64, 32, &convolve8_c),
-    make_tuple(32, 64, &convolve8_c),
-    make_tuple(64, 64, &convolve8_c)));
+INSTANTIATE_TEST_CASE_P(
+    C, ConvolveTest,
+    ::testing::Values(
+        make_tuple(4, 4, &convolve8_c), make_tuple(8, 4, &convolve8_c),
+        make_tuple(4, 8, &convolve8_c), make_tuple(8, 8, &convolve8_c),
+        make_tuple(16, 8, &convolve8_c), make_tuple(8, 16, &convolve8_c),
+        make_tuple(16, 16, &convolve8_c), make_tuple(32, 16, &convolve8_c),
+        make_tuple(16, 32, &convolve8_c), make_tuple(32, 32, &convolve8_c),
+        make_tuple(64, 32, &convolve8_c), make_tuple(32, 64, &convolve8_c),
+        make_tuple(64, 64, &convolve8_c)));
 #endif
 
 #if HAVE_SSE2 && ARCH_X86_64
@@ -1159,46 +1069,40 @@ const ConvolveFunctions convolve12_sse2(
     wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
     wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
     wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
-INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve8_sse2),
-    make_tuple(8, 4, &convolve8_sse2),
-    make_tuple(4, 8, &convolve8_sse2),
-    make_tuple(8, 8, &convolve8_sse2),
-    make_tuple(16, 8, &convolve8_sse2),
-    make_tuple(8, 16, &convolve8_sse2),
-    make_tuple(16, 16, &convolve8_sse2),
-    make_tuple(32, 16, &convolve8_sse2),
-    make_tuple(16, 32, &convolve8_sse2),
-    make_tuple(32, 32, &convolve8_sse2),
-    make_tuple(64, 32, &convolve8_sse2),
-    make_tuple(32, 64, &convolve8_sse2),
-    make_tuple(64, 64, &convolve8_sse2),
-    make_tuple(4, 4, &convolve10_sse2),
-    make_tuple(8, 4, &convolve10_sse2),
-    make_tuple(4, 8, &convolve10_sse2),
-    make_tuple(8, 8, &convolve10_sse2),
-    make_tuple(16, 8, &convolve10_sse2),
-    make_tuple(8, 16, &convolve10_sse2),
-    make_tuple(16, 16, &convolve10_sse2),
-    make_tuple(32, 16, &convolve10_sse2),
-    make_tuple(16, 32, &convolve10_sse2),
-    make_tuple(32, 32, &convolve10_sse2),
-    make_tuple(64, 32, &convolve10_sse2),
-    make_tuple(32, 64, &convolve10_sse2),
-    make_tuple(64, 64, &convolve10_sse2),
-    make_tuple(4, 4, &convolve12_sse2),
-    make_tuple(8, 4, &convolve12_sse2),
-    make_tuple(4, 8, &convolve12_sse2),
-    make_tuple(8, 8, &convolve12_sse2),
-    make_tuple(16, 8, &convolve12_sse2),
-    make_tuple(8, 16, &convolve12_sse2),
-    make_tuple(16, 16, &convolve12_sse2),
-    make_tuple(32, 16, &convolve12_sse2),
-    make_tuple(16, 32, &convolve12_sse2),
-    make_tuple(32, 32, &convolve12_sse2),
-    make_tuple(64, 32, &convolve12_sse2),
-    make_tuple(32, 64, &convolve12_sse2),
-    make_tuple(64, 64, &convolve12_sse2)));
+INSTANTIATE_TEST_CASE_P(
+    SSE2, ConvolveTest,
+    ::testing::Values(
+        make_tuple(4, 4, &convolve8_sse2), make_tuple(8, 4, &convolve8_sse2),
+        make_tuple(4, 8, &convolve8_sse2), make_tuple(8, 8, &convolve8_sse2),
+        make_tuple(16, 8, &convolve8_sse2), make_tuple(8, 16, &convolve8_sse2),
+        make_tuple(16, 16, &convolve8_sse2),
+        make_tuple(32, 16, &convolve8_sse2),
+        make_tuple(16, 32, &convolve8_sse2),
+        make_tuple(32, 32, &convolve8_sse2),
+        make_tuple(64, 32, &convolve8_sse2),
+        make_tuple(32, 64, &convolve8_sse2),
+        make_tuple(64, 64, &convolve8_sse2), make_tuple(4, 4, &convolve10_sse2),
+        make_tuple(8, 4, &convolve10_sse2), make_tuple(4, 8, &convolve10_sse2),
+        make_tuple(8, 8, &convolve10_sse2), make_tuple(16, 8, &convolve10_sse2),
+        make_tuple(8, 16, &convolve10_sse2),
+        make_tuple(16, 16, &convolve10_sse2),
+        make_tuple(32, 16, &convolve10_sse2),
+        make_tuple(16, 32, &convolve10_sse2),
+        make_tuple(32, 32, &convolve10_sse2),
+        make_tuple(64, 32, &convolve10_sse2),
+        make_tuple(32, 64, &convolve10_sse2),
+        make_tuple(64, 64, &convolve10_sse2),
+        make_tuple(4, 4, &convolve12_sse2), make_tuple(8, 4, &convolve12_sse2),
+        make_tuple(4, 8, &convolve12_sse2), make_tuple(8, 8, &convolve12_sse2),
+        make_tuple(16, 8, &convolve12_sse2),
+        make_tuple(8, 16, &convolve12_sse2),
+        make_tuple(16, 16, &convolve12_sse2),
+        make_tuple(32, 16, &convolve12_sse2),
+        make_tuple(16, 32, &convolve12_sse2),
+        make_tuple(32, 32, &convolve12_sse2),
+        make_tuple(64, 32, &convolve12_sse2),
+        make_tuple(32, 64, &convolve12_sse2),
+        make_tuple(64, 64, &convolve12_sse2)));
 #else
 const ConvolveFunctions convolve8_sse2(
 #if CONFIG_USE_X86INC
@@ -1207,167 +1111,152 @@ const ConvolveFunctions convolve8_sse2(
     vpx_convolve_copy_c, vpx_convolve_avg_c,
 #endif  // CONFIG_USE_X86INC
     vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2,
-    vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2,
-    vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
-    vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
-    vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
-
-INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve8_sse2),
-    make_tuple(8, 4, &convolve8_sse2),
-    make_tuple(4, 8, &convolve8_sse2),
-    make_tuple(8, 8, &convolve8_sse2),
-    make_tuple(16, 8, &convolve8_sse2),
-    make_tuple(8, 16, &convolve8_sse2),
-    make_tuple(16, 16, &convolve8_sse2),
-    make_tuple(32, 16, &convolve8_sse2),
-    make_tuple(16, 32, &convolve8_sse2),
-    make_tuple(32, 32, &convolve8_sse2),
-    make_tuple(64, 32, &convolve8_sse2),
-    make_tuple(32, 64, &convolve8_sse2),
-    make_tuple(64, 64, &convolve8_sse2)));
+    vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2, vpx_convolve8_sse2,
+    vpx_convolve8_avg_sse2, vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+    vpx_scaled_vert_c, vpx_scaled_avg_vert_c, vpx_scaled_2d_c,
+    vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest,
+                        ::testing::Values(make_tuple(4, 4, &convolve8_sse2),
+                                          make_tuple(8, 4, &convolve8_sse2),
+                                          make_tuple(4, 8, &convolve8_sse2),
+                                          make_tuple(8, 8, &convolve8_sse2),
+                                          make_tuple(16, 8, &convolve8_sse2),
+                                          make_tuple(8, 16, &convolve8_sse2),
+                                          make_tuple(16, 16, &convolve8_sse2),
+                                          make_tuple(32, 16, &convolve8_sse2),
+                                          make_tuple(16, 32, &convolve8_sse2),
+                                          make_tuple(32, 32, &convolve8_sse2),
+                                          make_tuple(64, 32, &convolve8_sse2),
+                                          make_tuple(32, 64, &convolve8_sse2),
+                                          make_tuple(64, 64, &convolve8_sse2)));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 #endif
 
 #if HAVE_SSSE3
 const ConvolveFunctions convolve8_ssse3(
-    vpx_convolve_copy_c, vpx_convolve_avg_c,
-    vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3,
-    vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3,
-    vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
-    vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
-    vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
-
-INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve8_ssse3),
-    make_tuple(8, 4, &convolve8_ssse3),
-    make_tuple(4, 8, &convolve8_ssse3),
-    make_tuple(8, 8, &convolve8_ssse3),
-    make_tuple(16, 8, &convolve8_ssse3),
-    make_tuple(8, 16, &convolve8_ssse3),
-    make_tuple(16, 16, &convolve8_ssse3),
-    make_tuple(32, 16, &convolve8_ssse3),
-    make_tuple(16, 32, &convolve8_ssse3),
-    make_tuple(32, 32, &convolve8_ssse3),
-    make_tuple(64, 32, &convolve8_ssse3),
-    make_tuple(32, 64, &convolve8_ssse3),
-    make_tuple(64, 64, &convolve8_ssse3)));
+    vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_ssse3,
+    vpx_convolve8_avg_horiz_ssse3, vpx_convolve8_vert_ssse3,
+    vpx_convolve8_avg_vert_ssse3, vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
+    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest,
+                        ::testing::Values(make_tuple(4, 4, &convolve8_ssse3),
+                                          make_tuple(8, 4, &convolve8_ssse3),
+                                          make_tuple(4, 8, &convolve8_ssse3),
+                                          make_tuple(8, 8, &convolve8_ssse3),
+                                          make_tuple(16, 8, &convolve8_ssse3),
+                                          make_tuple(8, 16, &convolve8_ssse3),
+                                          make_tuple(16, 16, &convolve8_ssse3),
+                                          make_tuple(32, 16, &convolve8_ssse3),
+                                          make_tuple(16, 32, &convolve8_ssse3),
+                                          make_tuple(32, 32, &convolve8_ssse3),
+                                          make_tuple(64, 32, &convolve8_ssse3),
+                                          make_tuple(32, 64, &convolve8_ssse3),
+                                          make_tuple(64, 64,
+                                                     &convolve8_ssse3)));
 #endif
 
 #if HAVE_AVX2 && HAVE_SSSE3
 const ConvolveFunctions convolve8_avx2(
-    vpx_convolve_copy_c, vpx_convolve_avg_c,
-    vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3,
-    vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3,
-    vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
-    vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
-    vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
-
-INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve8_avx2),
-    make_tuple(8, 4, &convolve8_avx2),
-    make_tuple(4, 8, &convolve8_avx2),
-    make_tuple(8, 8, &convolve8_avx2),
-    make_tuple(8, 16, &convolve8_avx2),
-    make_tuple(16, 8, &convolve8_avx2),
-    make_tuple(16, 16, &convolve8_avx2),
-    make_tuple(32, 16, &convolve8_avx2),
-    make_tuple(16, 32, &convolve8_avx2),
-    make_tuple(32, 32, &convolve8_avx2),
-    make_tuple(64, 32, &convolve8_avx2),
-    make_tuple(32, 64, &convolve8_avx2),
-    make_tuple(64, 64, &convolve8_avx2)));
+    vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_avx2,
+    vpx_convolve8_avg_horiz_ssse3, vpx_convolve8_vert_avx2,
+    vpx_convolve8_avg_vert_ssse3, vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
+    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest,
+                        ::testing::Values(make_tuple(4, 4, &convolve8_avx2),
+                                          make_tuple(8, 4, &convolve8_avx2),
+                                          make_tuple(4, 8, &convolve8_avx2),
+                                          make_tuple(8, 8, &convolve8_avx2),
+                                          make_tuple(8, 16, &convolve8_avx2),
+                                          make_tuple(16, 8, &convolve8_avx2),
+                                          make_tuple(16, 16, &convolve8_avx2),
+                                          make_tuple(32, 16, &convolve8_avx2),
+                                          make_tuple(16, 32, &convolve8_avx2),
+                                          make_tuple(32, 32, &convolve8_avx2),
+                                          make_tuple(64, 32, &convolve8_avx2),
+                                          make_tuple(32, 64, &convolve8_avx2),
+                                          make_tuple(64, 64, &convolve8_avx2)));
 #endif  // HAVE_AVX2 && HAVE_SSSE3
 
 #if HAVE_NEON
 #if HAVE_NEON_ASM
 const ConvolveFunctions convolve8_neon(
-    vpx_convolve_copy_neon, vpx_convolve_avg_neon,
-    vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
-    vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
-    vpx_convolve8_neon, vpx_convolve8_avg_neon,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
-    vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
-    vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
-#else  // HAVE_NEON
+    vpx_convolve_copy_neon, vpx_convolve_avg_neon, vpx_convolve8_horiz_neon,
+    vpx_convolve8_avg_horiz_neon, vpx_convolve8_vert_neon,
+    vpx_convolve8_avg_vert_neon, vpx_convolve8_neon, vpx_convolve8_avg_neon,
+    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#else   // HAVE_NEON
 const ConvolveFunctions convolve8_neon(
-    vpx_convolve_copy_neon, vpx_convolve_avg_neon,
-    vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
-    vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
-    vpx_convolve8_neon, vpx_convolve8_avg_neon,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
-    vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
-    vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+    vpx_convolve_copy_neon, vpx_convolve_avg_neon, vpx_convolve8_horiz_neon,
+    vpx_convolve8_avg_horiz_neon, vpx_convolve8_vert_neon,
+    vpx_convolve8_avg_vert_neon, vpx_convolve8_neon, vpx_convolve8_avg_neon,
+    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
 #endif  // HAVE_NEON_ASM
 
-INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve8_neon),
-    make_tuple(8, 4, &convolve8_neon),
-    make_tuple(4, 8, &convolve8_neon),
-    make_tuple(8, 8, &convolve8_neon),
-    make_tuple(16, 8, &convolve8_neon),
-    make_tuple(8, 16, &convolve8_neon),
-    make_tuple(16, 16, &convolve8_neon),
-    make_tuple(32, 16, &convolve8_neon),
-    make_tuple(16, 32, &convolve8_neon),
-    make_tuple(32, 32, &convolve8_neon),
-    make_tuple(64, 32, &convolve8_neon),
-    make_tuple(32, 64, &convolve8_neon),
-    make_tuple(64, 64, &convolve8_neon)));
+INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest,
+                        ::testing::Values(make_tuple(4, 4, &convolve8_neon),
+                                          make_tuple(8, 4, &convolve8_neon),
+                                          make_tuple(4, 8, &convolve8_neon),
+                                          make_tuple(8, 8, &convolve8_neon),
+                                          make_tuple(16, 8, &convolve8_neon),
+                                          make_tuple(8, 16, &convolve8_neon),
+                                          make_tuple(16, 16, &convolve8_neon),
+                                          make_tuple(32, 16, &convolve8_neon),
+                                          make_tuple(16, 32, &convolve8_neon),
+                                          make_tuple(32, 32, &convolve8_neon),
+                                          make_tuple(64, 32, &convolve8_neon),
+                                          make_tuple(32, 64, &convolve8_neon),
+                                          make_tuple(64, 64, &convolve8_neon)));
 #endif  // HAVE_NEON
 
 #if HAVE_DSPR2
 const ConvolveFunctions convolve8_dspr2(
-    vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2,
-    vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2,
-    vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2,
-    vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
-    vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
-    vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
-
-INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve8_dspr2),
-    make_tuple(8, 4, &convolve8_dspr2),
-    make_tuple(4, 8, &convolve8_dspr2),
-    make_tuple(8, 8, &convolve8_dspr2),
-    make_tuple(16, 8, &convolve8_dspr2),
-    make_tuple(8, 16, &convolve8_dspr2),
-    make_tuple(16, 16, &convolve8_dspr2),
-    make_tuple(32, 16, &convolve8_dspr2),
-    make_tuple(16, 32, &convolve8_dspr2),
-    make_tuple(32, 32, &convolve8_dspr2),
-    make_tuple(64, 32, &convolve8_dspr2),
-    make_tuple(32, 64, &convolve8_dspr2),
-    make_tuple(64, 64, &convolve8_dspr2)));
+    vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2, vpx_convolve8_horiz_dspr2,
+    vpx_convolve8_avg_horiz_dspr2, vpx_convolve8_vert_dspr2,
+    vpx_convolve8_avg_vert_dspr2, vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
+    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest,
+                        ::testing::Values(make_tuple(4, 4, &convolve8_dspr2),
+                                          make_tuple(8, 4, &convolve8_dspr2),
+                                          make_tuple(4, 8, &convolve8_dspr2),
+                                          make_tuple(8, 8, &convolve8_dspr2),
+                                          make_tuple(16, 8, &convolve8_dspr2),
+                                          make_tuple(8, 16, &convolve8_dspr2),
+                                          make_tuple(16, 16, &convolve8_dspr2),
+                                          make_tuple(32, 16, &convolve8_dspr2),
+                                          make_tuple(16, 32, &convolve8_dspr2),
+                                          make_tuple(32, 32, &convolve8_dspr2),
+                                          make_tuple(64, 32, &convolve8_dspr2),
+                                          make_tuple(32, 64, &convolve8_dspr2),
+                                          make_tuple(64, 64,
+                                                     &convolve8_dspr2)));
 #endif
 
 #if HAVE_MSA
 const ConvolveFunctions convolve8_msa(
-    vpx_convolve_copy_msa, vpx_convolve_avg_msa,
-    vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa,
-    vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa,
-    vpx_convolve8_msa, vpx_convolve8_avg_msa,
-    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
-    vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
-    vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
-
-INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, ::testing::Values(
-    make_tuple(4, 4, &convolve8_msa),
-    make_tuple(8, 4, &convolve8_msa),
-    make_tuple(4, 8, &convolve8_msa),
-    make_tuple(8, 8, &convolve8_msa),
-    make_tuple(16, 8, &convolve8_msa),
-    make_tuple(8, 16, &convolve8_msa),
-    make_tuple(16, 16, &convolve8_msa),
-    make_tuple(32, 16, &convolve8_msa),
-    make_tuple(16, 32, &convolve8_msa),
-    make_tuple(32, 32, &convolve8_msa),
-    make_tuple(64, 32, &convolve8_msa),
-    make_tuple(32, 64, &convolve8_msa),
-    make_tuple(64, 64, &convolve8_msa)));
+    vpx_convolve_copy_msa, vpx_convolve_avg_msa, vpx_convolve8_horiz_msa,
+    vpx_convolve8_avg_horiz_msa, vpx_convolve8_vert_msa,
+    vpx_convolve8_avg_vert_msa, vpx_convolve8_msa, vpx_convolve8_avg_msa,
+    vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+    vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(
+    MSA, ConvolveTest,
+    ::testing::Values(
+        make_tuple(4, 4, &convolve8_msa), make_tuple(8, 4, &convolve8_msa),
+        make_tuple(4, 8, &convolve8_msa), make_tuple(8, 8, &convolve8_msa),
+        make_tuple(16, 8, &convolve8_msa), make_tuple(8, 16, &convolve8_msa),
+        make_tuple(16, 16, &convolve8_msa), make_tuple(32, 16, &convolve8_msa),
+        make_tuple(16, 32, &convolve8_msa), make_tuple(32, 32, &convolve8_msa),
+        make_tuple(64, 32, &convolve8_msa), make_tuple(32, 64, &convolve8_msa),
+        make_tuple(64, 64, &convolve8_msa)));
 #endif  // HAVE_MSA
 }  // namespace
diff --git a/test/cpu_speed_test.cc b/test/cpu_speed_test.cc
index 4e5cba1ffb3c67f5e708d53c90a794ba8908b7bc..0620f56c8a56ccd6327e67a4c5a66560ffa43942 100644
--- a/test/cpu_speed_test.cc
+++ b/test/cpu_speed_test.cc
@@ -23,10 +23,8 @@ class CpuSpeedTest
       public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
  protected:
   CpuSpeedTest()
-      : EncoderTest(GET_PARAM(0)),
-        encoding_mode_(GET_PARAM(1)),
-        set_cpu_used_(GET_PARAM(2)),
-        min_psnr_(kMaxPSNR) {}
+      : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)),
+        set_cpu_used_(GET_PARAM(2)), min_psnr_(kMaxPSNR) {}
   virtual ~CpuSpeedTest() {}
 
   virtual void SetUp() {
@@ -41,9 +39,7 @@ class CpuSpeedTest
     }
   }
 
-  virtual void BeginPassHook(unsigned int /*pass*/) {
-    min_psnr_ = kMaxPSNR;
-  }
+  virtual void BeginPassHook(unsigned int /*pass*/) { min_psnr_ = kMaxPSNR; }
 
   virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
                                   ::libvpx_test::Encoder *encoder) {
@@ -59,8 +55,7 @@ class CpuSpeedTest
   }
 
   virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
-    if (pkt->data.psnr.psnr[0] < min_psnr_)
-      min_psnr_ = pkt->data.psnr.psnr[0];
+    if (pkt->data.psnr.psnr[0] < min_psnr_) min_psnr_ = pkt->data.psnr.psnr[0];
   }
 
   ::libvpx_test::TestMode encoding_mode_;
@@ -135,8 +130,8 @@ TEST_P(CpuSpeedTest, TestLowBitrate) {
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(
-    CpuSpeedTest,
-    ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood),
-    ::testing::Range(0, 3));
+VP10_INSTANTIATE_TEST_CASE(CpuSpeedTest,
+                           ::testing::Values(::libvpx_test::kTwoPassGood,
+                                             ::libvpx_test::kOnePassGood),
+                           ::testing::Range(0, 3));
 }  // namespace
diff --git a/test/datarate_test.cc b/test/datarate_test.cc
index fa9de45708b37a6be15c1441a64dd946812ce35b..f46881e4bdb0e105165dcedc4244517947dc5fd1 100644
--- a/test/datarate_test.cc
+++ b/test/datarate_test.cc
@@ -18,8 +18,9 @@
 
 namespace {
 
-class DatarateTestLarge : public ::libvpx_test::EncoderTest,
-    public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class DatarateTestLarge
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
  public:
   DatarateTestLarge() : EncoderTest(GET_PARAM(0)) {}
 
@@ -71,8 +72,7 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest,
     // http://code.google.com/p/webm/issues/detail?id=496 is fixed.
     // For now the codec assumes buffer starts at starting buffer rate
     // plus one frame's time.
-    if (last_pts_ == 0)
-      duration = 1;
+    if (last_pts_ == 0) duration = 1;
 
     // Add to the buffer the bits we'd expect from a constant bitrate server.
     bits_in_buffer_model_ += static_cast<int64_t>(
@@ -83,11 +83,11 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest,
      * empty - and then stop showing frames until we've got enough bits to
      * show one. As noted in comment below (issue 495), this does not currently
      * apply to key frames. For now exclude key frames in condition below. */
-    const bool key_frame = (pkt->data.frame.flags & VPX_FRAME_IS_KEY)
-                         ? true: false;
+    const bool key_frame =
+        (pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
     if (!key_frame) {
       ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame "
-          << pkt->data.frame.pts;
+                                          << pkt->data.frame.pts;
     }
 
     const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
@@ -99,8 +99,7 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest,
     bits_total_ += frame_size_in_bits;
 
     // If first drop not set and we have a drop set it to this time.
-    if (!first_drop_ && duration > 1)
-      first_drop_ = last_pts_ + 1;
+    if (!first_drop_ && duration > 1) first_drop_ = last_pts_ + 1;
 
     // Update the most recent pts.
     last_pts_ = pkt->data.frame.pts;
@@ -119,8 +118,8 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest,
       duration_ = (last_pts_ + 1) * timebase_;
 
       // Effective file datarate includes the time spent prebuffering.
-      effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0
-          / (cfg_.rc_buf_initial_sz / 1000.0 + duration_);
+      effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0 /
+                            (cfg_.rc_buf_initial_sz / 1000.0 + duration_);
 
       file_datarate_ = file_size_in_kb / duration_;
     }
@@ -256,8 +255,9 @@ TEST_P(DatarateTestLarge, ChangingDropFrameThresh) {
   }
 }
 
-class DatarateTestVP9Large : public ::libvpx_test::EncoderTest,
-    public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
+class DatarateTestVP9Large
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
  public:
   DatarateTestVP9Large() : EncoderTest(GET_PARAM(0)) {}
 
@@ -307,8 +307,8 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest,
     if (num_temp_layers == 2) {
       if (frame_num % 2 == 0) {
         // Layer 0: predict from L and ARF, update L.
-        frame_flags = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
-                      VP8_EFLAG_NO_UPD_ARF;
+        frame_flags =
+            VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
       } else {
         // Layer 1: predict from L, G and ARF, and update G.
         frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
@@ -317,15 +317,15 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest,
     } else if (num_temp_layers == 3) {
       if (frame_num % 4 == 0) {
         // Layer 0: predict from L and ARF; update L.
-        frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
-                      VP8_EFLAG_NO_REF_GF;
+        frame_flags =
+            VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF;
       } else if ((frame_num - 2) % 4 == 0) {
         // Layer 1: predict from L, G, ARF; update G.
         frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
-      }  else if ((frame_num - 1) % 2 == 0) {
+      } else if ((frame_num - 1) % 2 == 0) {
         // Layer 2: predict from L, G, ARF; update none.
-        frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
-                      VP8_EFLAG_NO_UPD_LAST;
+        frame_flags =
+            VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
       }
     }
     return frame_flags;
@@ -353,8 +353,7 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest,
 
   virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
                                   ::libvpx_test::Encoder *encoder) {
-    if (video->frame() == 0)
-      encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+    if (video->frame() == 0) encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
 
     if (denoiser_offon_test_) {
       ASSERT_GT(denoiser_offon_period_, 0)
@@ -374,8 +373,8 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest,
       vpx_svc_layer_id_t layer_id;
       layer_id.spatial_layer_id = 0;
       frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
-      layer_id.temporal_layer_id = SetLayerId(video->frame(),
-                                              cfg_.ts_number_layers);
+      layer_id.temporal_layer_id =
+          SetLayerId(video->frame(), cfg_.ts_number_layers);
       encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
     }
     const vpx_rational_t tb = video->timebase();
@@ -383,15 +382,13 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest,
     duration_ = 0;
   }
 
-
   virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
     // Time since last timestamp = duration.
     vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
 
     if (duration > 1) {
       // If first drop not set and we have a drop set it to this time.
-      if (!first_drop_)
-        first_drop_ = last_pts_ + 1;
+      if (!first_drop_) first_drop_ = last_pts_ + 1;
       // Update the number of frame drops.
       num_drops_ += static_cast<int>(duration - 1);
       // Update counter for total number of frames (#frames input to encoder).
@@ -407,7 +404,7 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest,
 
     // Buffer should not go negative.
     ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame "
-        << pkt->data.frame.pts;
+                                        << pkt->data.frame.pts;
 
     const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
 
@@ -425,7 +422,7 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest,
 
   virtual void EndPassHook(void) {
     for (int layer = 0; layer < static_cast<int>(cfg_.ts_number_layers);
-        ++layer) {
+         ++layer) {
       duration_ = (last_pts_ + 1) * timebase_;
       if (bits_total_[layer]) {
         // Effective file datarate:
@@ -499,7 +496,7 @@ TEST_P(DatarateTestVP9Large, BasicRateTargeting444) {
     ASSERT_LE(static_cast<double>(cfg_.rc_target_bitrate),
               effective_datarate_[0] * 1.15)
         << " The datarate for the file missed the target!"
-        << cfg_.rc_target_bitrate << " "<< effective_datarate_;
+        << cfg_.rc_target_bitrate << " " << effective_datarate_;
   }
 }
 
@@ -682,11 +679,13 @@ TEST_P(DatarateTestVP9Large, BasicRateTargeting3TemporalLayersFrameDropping) {
 }
 #endif
 
-class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest,
-    public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
+class DatarateOnePassCbrSvc
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
  public:
   DatarateOnePassCbrSvc() : EncoderTest(GET_PARAM(0)) {}
   virtual ~DatarateOnePassCbrSvc() {}
+
  protected:
   virtual void SetUp() {
     InitializeConfig();
@@ -704,8 +703,7 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest,
     mismatch_psnr_ = 0.0;
     mismatch_nframes_ = 0;
   }
-  virtual void BeginPassHook(unsigned int /*pass*/) {
-  }
+  virtual void BeginPassHook(unsigned int /*pass*/) {}
   virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
                                   ::libvpx_test::Encoder *encoder) {
     if (video->frame() == 0) {
@@ -731,21 +729,19 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest,
   }
   virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
     vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
-    if (last_pts_ == 0)
-      duration = 1;
+    if (last_pts_ == 0) duration = 1;
     bits_in_buffer_model_ += static_cast<int64_t>(
         duration * timebase_ * cfg_.rc_target_bitrate * 1000);
-    const bool key_frame = (pkt->data.frame.flags & VPX_FRAME_IS_KEY)
-                         ? true: false;
+    const bool key_frame =
+        (pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
     if (!key_frame) {
       ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame "
-          << pkt->data.frame.pts;
+                                          << pkt->data.frame.pts;
     }
     const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
     bits_in_buffer_model_ -= frame_size_in_bits;
     bits_total_ += frame_size_in_bits;
-    if (!first_drop_ && duration > 1)
-      first_drop_ = last_pts_ + 1;
+    if (!first_drop_ && duration > 1) first_drop_ = last_pts_ + 1;
     last_pts_ = pkt->data.frame.pts;
     bits_in_last_frame_ = frame_size_in_bits;
     ++frame_number_;
@@ -754,22 +750,19 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest,
     if (bits_total_) {
       const double file_size_in_kb = bits_total_ / 1000.;  // bits per kilobit
       duration_ = (last_pts_ + 1) * timebase_;
-      effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0
-          / (cfg_.rc_buf_initial_sz / 1000.0 + duration_);
+      effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0 /
+                            (cfg_.rc_buf_initial_sz / 1000.0 + duration_);
       file_datarate_ = file_size_in_kb / duration_;
     }
   }
 
-  virtual void MismatchHook(const vpx_image_t *img1,
-                            const vpx_image_t *img2) {
+  virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2) {
     double mismatch_psnr = compute_psnr(img1, img2);
     mismatch_psnr_ += mismatch_psnr;
     ++mismatch_nframes_;
   }
 
-  unsigned int GetMismatchFrames() {
-    return mismatch_nframes_;
-  }
+  unsigned int GetMismatchFrames() { return mismatch_nframes_; }
 
   vpx_codec_pts_t last_pts_;
   int64_t bits_in_buffer_model_;
@@ -787,38 +780,32 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest,
   int mismatch_nframes_;
 };
 static void assign_layer_bitrates(vpx_codec_enc_cfg_t *const enc_cfg,
-    const vpx_svc_extra_cfg_t *svc_params,
-    int spatial_layers,
-    int temporal_layers,
-    int temporal_layering_mode,
-    unsigned int total_rate) {
+                                  const vpx_svc_extra_cfg_t *svc_params,
+                                  int spatial_layers, int temporal_layers,
+                                  int temporal_layering_mode,
+                                  unsigned int total_rate) {
   int sl, spatial_layer_target;
   float total = 0;
-  float alloc_ratio[VPX_MAX_LAYERS] = {0};
+  float alloc_ratio[VPX_MAX_LAYERS] = { 0 };
   for (sl = 0; sl < spatial_layers; ++sl) {
     if (svc_params->scaling_factor_den[sl] > 0) {
-      alloc_ratio[sl] = (float)(svc_params->scaling_factor_num[sl] *
-          1.0 / svc_params->scaling_factor_den[sl]);
+      alloc_ratio[sl] = (float)(svc_params->scaling_factor_num[sl] * 1.0 /
+                                svc_params->scaling_factor_den[sl]);
       total += alloc_ratio[sl];
     }
   }
   for (sl = 0; sl < spatial_layers; ++sl) {
     enc_cfg->ss_target_bitrate[sl] = spatial_layer_target =
-        (unsigned int)(enc_cfg->rc_target_bitrate *
-            alloc_ratio[sl] / total);
+        (unsigned int)(enc_cfg->rc_target_bitrate * alloc_ratio[sl] / total);
     const int index = sl * temporal_layers;
     if (temporal_layering_mode == 3) {
-      enc_cfg->layer_target_bitrate[index] =
-          spatial_layer_target >> 1;
+      enc_cfg->layer_target_bitrate[index] = spatial_layer_target >> 1;
       enc_cfg->layer_target_bitrate[index + 1] =
           (spatial_layer_target >> 1) + (spatial_layer_target >> 2);
-      enc_cfg->layer_target_bitrate[index + 2] =
-          spatial_layer_target;
+      enc_cfg->layer_target_bitrate[index + 2] = spatial_layer_target;
     } else if (temporal_layering_mode == 2) {
-      enc_cfg->layer_target_bitrate[index] =
-          spatial_layer_target * 2 / 3;
-      enc_cfg->layer_target_bitrate[index + 1] =
-          spatial_layer_target;
+      enc_cfg->layer_target_bitrate[index] = spatial_layer_target * 2 / 3;
+      enc_cfg->layer_target_bitrate[index + 1] = spatial_layer_target;
     }
   }
 }
@@ -856,14 +843,14 @@ TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc) {
     cfg_.rc_target_bitrate = i;
     ResetModel();
     assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers,
-        cfg_.ts_number_layers, cfg_.temporal_layering_mode,
-        cfg_.rc_target_bitrate);
+                          cfg_.ts_number_layers, cfg_.temporal_layering_mode,
+                          cfg_.rc_target_bitrate);
     ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
     ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85)
-            << " The datarate for the file exceeds the target by too much!";
+        << " The datarate for the file exceeds the target by too much!";
     ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15)
         << " The datarate for the file is lower than the target by too much!";
-    EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+    EXPECT_EQ(GetMismatchFrames(), (unsigned int)0);
   }
 }
 
@@ -892,19 +879,19 @@ TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc4threads) {
   // TODO(wonkap/marpan): No frame drop for now, we need to implement correct
   // frame dropping for SVC.
   cfg_.rc_dropframe_thresh = 0;
-  ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720,
-                                       30, 1, 0, 300);
+  ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720, 30,
+                                       1, 0, 300);
   cfg_.rc_target_bitrate = 800;
   ResetModel();
   assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers,
-      cfg_.ts_number_layers, cfg_.temporal_layering_mode,
-      cfg_.rc_target_bitrate);
+                        cfg_.ts_number_layers, cfg_.temporal_layering_mode,
+                        cfg_.rc_target_bitrate);
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
   ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85)
-          << " The datarate for the file exceeds the target by too much!";
+      << " The datarate for the file exceeds the target by too much!";
   ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15)
       << " The datarate for the file is lower than the target by too much!";
-  EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+  EXPECT_EQ(GetMismatchFrames(), (unsigned int)0);
 }
 
 VP10_INSTANTIATE_TEST_CASE(DatarateTestVP9Large,
diff --git a/test/dct16x16_test.cc b/test/dct16x16_test.cc
index 225815a46d0aa4dc1f17f994d04069f4dc175243..fc3fc23c7e1a757ca72f4ee01bc5b64653d20219 100644
--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -62,16 +62,16 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) {
   double temp1, temp2;
 
   // step 1
-  step[ 0] = input[0] + input[15];
-  step[ 1] = input[1] + input[14];
-  step[ 2] = input[2] + input[13];
-  step[ 3] = input[3] + input[12];
-  step[ 4] = input[4] + input[11];
-  step[ 5] = input[5] + input[10];
-  step[ 6] = input[6] + input[ 9];
-  step[ 7] = input[7] + input[ 8];
-  step[ 8] = input[7] - input[ 8];
-  step[ 9] = input[6] - input[ 9];
+  step[0] = input[0] + input[15];
+  step[1] = input[1] + input[14];
+  step[2] = input[2] + input[13];
+  step[3] = input[3] + input[12];
+  step[4] = input[4] + input[11];
+  step[5] = input[5] + input[10];
+  step[6] = input[6] + input[9];
+  step[7] = input[7] + input[8];
+  step[8] = input[7] - input[8];
+  step[9] = input[6] - input[9];
   step[10] = input[5] - input[10];
   step[11] = input[4] - input[11];
   step[12] = input[3] - input[12];
@@ -89,13 +89,13 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) {
   output[6] = step[1] - step[6];
   output[7] = step[0] - step[7];
 
-  temp1 = step[ 8] * C7;
+  temp1 = step[8] * C7;
   temp2 = step[15] * C9;
-  output[ 8] = temp1 + temp2;
+  output[8] = temp1 + temp2;
 
-  temp1 = step[ 9] * C11;
+  temp1 = step[9] * C11;
   temp2 = step[14] * C5;
-  output[ 9] = temp1 - temp2;
+  output[9] = temp1 - temp2;
 
   temp1 = step[10] * C3;
   temp2 = step[13] * C13;
@@ -113,40 +113,40 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) {
   temp2 = step[13] * C3;
   output[13] = temp2 - temp1;
 
-  temp1 = step[ 9] * C5;
+  temp1 = step[9] * C5;
   temp2 = step[14] * C11;
   output[14] = temp2 + temp1;
 
-  temp1 = step[ 8] * C9;
+  temp1 = step[8] * C9;
   temp2 = step[15] * C7;
   output[15] = temp2 - temp1;
 
   // step 3
-  step[ 0] = output[0] + output[3];
-  step[ 1] = output[1] + output[2];
-  step[ 2] = output[1] - output[2];
-  step[ 3] = output[0] - output[3];
+  step[0] = output[0] + output[3];
+  step[1] = output[1] + output[2];
+  step[2] = output[1] - output[2];
+  step[3] = output[0] - output[3];
 
   temp1 = output[4] * C14;
   temp2 = output[7] * C2;
-  step[ 4] = temp1 + temp2;
+  step[4] = temp1 + temp2;
 
   temp1 = output[5] * C10;
   temp2 = output[6] * C6;
-  step[ 5] = temp1 + temp2;
+  step[5] = temp1 + temp2;
 
   temp1 = output[5] * C6;
   temp2 = output[6] * C10;
-  step[ 6] = temp2 - temp1;
+  step[6] = temp2 - temp1;
 
   temp1 = output[4] * C2;
   temp2 = output[7] * C14;
-  step[ 7] = temp2 - temp1;
+  step[7] = temp2 - temp1;
 
-  step[ 8] = output[ 8] + output[11];
-  step[ 9] = output[ 9] + output[10];
-  step[10] = output[ 9] - output[10];
-  step[11] = output[ 8] - output[11];
+  step[8] = output[8] + output[11];
+  step[9] = output[9] + output[10];
+  step[10] = output[9] - output[10];
+  step[11] = output[8] - output[11];
 
   step[12] = output[12] + output[15];
   step[13] = output[13] + output[14];
@@ -154,25 +154,25 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) {
   step[15] = output[12] - output[15];
 
   // step 4
-  output[ 0] = (step[ 0] + step[ 1]);
-  output[ 8] = (step[ 0] - step[ 1]);
+  output[0] = (step[0] + step[1]);
+  output[8] = (step[0] - step[1]);
 
   temp1 = step[2] * C12;
   temp2 = step[3] * C4;
   temp1 = temp1 + temp2;
-  output[ 4] = 2*(temp1 * C8);
+  output[4] = 2 * (temp1 * C8);
 
   temp1 = step[2] * C4;
   temp2 = step[3] * C12;
   temp1 = temp2 - temp1;
   output[12] = 2 * (temp1 * C8);
 
-  output[ 2] = 2 * ((step[4] + step[ 5]) * C8);
-  output[14] = 2 * ((step[7] - step[ 6]) * C8);
+  output[2] = 2 * ((step[4] + step[5]) * C8);
+  output[14] = 2 * ((step[7] - step[6]) * C8);
 
   temp1 = step[4] - step[5];
   temp2 = step[6] + step[7];
-  output[ 6] = (temp1 + temp2);
+  output[6] = (temp1 + temp2);
   output[10] = (temp1 - temp2);
 
   intermediate[8] = step[8] + step[14];
@@ -188,18 +188,18 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) {
   temp1 = temp2 + temp1;
   output[13] = 2 * (temp1 * C8);
 
-  output[ 9] = 2 * ((step[10] + step[11]) * C8);
+  output[9] = 2 * ((step[10] + step[11]) * C8);
 
   intermediate[11] = step[10] - step[11];
   intermediate[12] = step[12] + step[13];
   intermediate[13] = step[12] - step[13];
-  intermediate[14] = step[ 8] - step[14];
-  intermediate[15] = step[ 9] - step[15];
+  intermediate[14] = step[8] - step[14];
+  intermediate[15] = step[9] - step[15];
 
   output[15] = (intermediate[11] + intermediate[12]);
-  output[ 1] = -(intermediate[11] - intermediate[12]);
+  output[1] = -(intermediate[11] - intermediate[12]);
 
-  output[ 7] = 2 * (intermediate[13] * C8);
+  output[7] = 2 * (intermediate[13] * C8);
 
   temp1 = intermediate[14] * C12;
   temp2 = intermediate[15] * C4;
@@ -209,28 +209,24 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) {
   temp1 = intermediate[14] * C4;
   temp2 = intermediate[15] * C12;
   temp1 = temp2 + temp1;
-  output[ 5] = 2 * (temp1 * C8);
+  output[5] = 2 * (temp1 * C8);
 }
 
 void reference_16x16_dct_2d(int16_t input[256], double output[256]) {
   // First transform columns
   for (int i = 0; i < 16; ++i) {
     double temp_in[16], temp_out[16];
-    for (int j = 0; j < 16; ++j)
-      temp_in[j] = input[j * 16 + i];
+    for (int j = 0; j < 16; ++j) temp_in[j] = input[j * 16 + i];
     butterfly_16x16_dct_1d(temp_in, temp_out);
-    for (int j = 0; j < 16; ++j)
-      output[j * 16 + i] = temp_out[j];
+    for (int j = 0; j < 16; ++j) output[j * 16 + i] = temp_out[j];
   }
   // Then transform rows
   for (int i = 0; i < 16; ++i) {
     double temp_in[16], temp_out[16];
-    for (int j = 0; j < 16; ++j)
-      temp_in[j] = output[j + i * 16];
+    for (int j = 0; j < 16; ++j) temp_in[j] = output[j + i * 16];
     butterfly_16x16_dct_1d(temp_in, temp_out);
     // Scale by some magic number
-    for (int j = 0; j < 16; ++j)
-      output[j + i * 16] = temp_out[j]/2;
+    for (int j = 0; j < 16; ++j) output[j + i * 16] = temp_out[j] / 2;
   }
 }
 
@@ -256,8 +252,7 @@ void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
   vpx_idct16x16_256_add_c(in, dest, stride);
 }
 
-void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
-                  int tx_type) {
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
   vp10_fht16x16_c(in, out, stride, tx_type);
 }
 
@@ -359,11 +354,10 @@ class Trans16x16TestBase {
         }
       }
 
-      ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
-                                          test_temp_block, pitch_));
+      ASM_REGISTER_STATE_CHECK(
+          RunFwdTxfm(test_input_block, test_temp_block, pitch_));
       if (bit_depth_ == VPX_BITS_8) {
-        ASM_REGISTER_STATE_CHECK(
-            RunInvTxfm(test_temp_block, dst, pitch_));
+        ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
@@ -374,18 +368,17 @@ class Trans16x16TestBase {
       for (int j = 0; j < kNumCoeffs; ++j) {
 #if CONFIG_VPX_HIGHBITDEPTH
         const uint32_t diff =
-            bit_depth_ == VPX_BITS_8 ?  dst[j] - src[j] : dst16[j] - src16[j];
+            bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
         const uint32_t diff = dst[j] - src[j];
 #endif
         const uint32_t error = diff * diff;
-        if (max_error < error)
-          max_error = error;
+        if (max_error < error) max_error = error;
         total_error += error;
       }
     }
 
-    EXPECT_GE(1u  << 2 * (bit_depth_ - 8), max_error)
+    EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
         << "Error: 16x16 FHT/IHT has an individual round trip error > 1";
 
     EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
@@ -426,16 +419,14 @@ class Trans16x16TestBase {
         input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
       }
       if (i == 0) {
-        for (int j = 0; j < kNumCoeffs; ++j)
-          input_extreme_block[j] = mask_;
+        for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_;
       } else if (i == 1) {
-        for (int j = 0; j < kNumCoeffs; ++j)
-          input_extreme_block[j] = -mask_;
+        for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_;
       }
 
       fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
-      ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
-                                          output_block, pitch_));
+      ASM_REGISTER_STATE_CHECK(
+          RunFwdTxfm(input_extreme_block, output_block, pitch_));
 
       // The minimum quant value is 4.
       for (int j = 0; j < kNumCoeffs; ++j) {
@@ -465,11 +456,9 @@ class Trans16x16TestBase {
         input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
       }
       if (i == 0)
-        for (int j = 0; j < kNumCoeffs; ++j)
-          input_extreme_block[j] = mask_;
+        for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_;
       if (i == 1)
-        for (int j = 0; j < kNumCoeffs; ++j)
-          input_extreme_block[j] = -mask_;
+        for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_;
 
       fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
 
@@ -492,17 +481,15 @@ class Trans16x16TestBase {
       } else {
         inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
                      tx_type_);
-        ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
-                                            CONVERT_TO_BYTEPTR(dst16), pitch_));
+        ASM_REGISTER_STATE_CHECK(
+            RunInvTxfm(output_ref_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
 #endif
       }
       if (bit_depth_ == VPX_BITS_8) {
-        for (int j = 0; j < kNumCoeffs; ++j)
-          EXPECT_EQ(ref[j], dst[j]);
+        for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref[j], dst[j]);
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
-        for (int j = 0; j < kNumCoeffs; ++j)
-          EXPECT_EQ(ref16[j], dst16[j]);
+        for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref16[j], dst16[j]);
 #endif
       }
     }
@@ -546,8 +533,8 @@ class Trans16x16TestBase {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
-        ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
-                                            16));
+        ASM_REGISTER_STATE_CHECK(
+            RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), 16));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
       }
 
@@ -559,9 +546,8 @@ class Trans16x16TestBase {
         const uint32_t diff = dst[j] - src[j];
 #endif  // CONFIG_VPX_HIGHBITDEPTH
         const uint32_t error = diff * diff;
-        EXPECT_GE(1u, error)
-            << "Error: 16x16 IDCT has error " << error
-            << " at index " << j;
+        EXPECT_GE(1u, error) << "Error: 16x16 IDCT has error " << error
+                             << " at index " << j;
       }
     }
   }
@@ -603,8 +589,8 @@ class Trans16x16TestBase {
       } else {
 #if CONFIG_VPX_HIGHBITDEPTH
         ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
-        ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
-                                 pitch_));
+        ASM_REGISTER_STATE_CHECK(
+            RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
       }
 
@@ -616,9 +602,8 @@ class Trans16x16TestBase {
         const uint32_t diff = dst[j] - ref[j];
 #endif  // CONFIG_VPX_HIGHBITDEPTH
         const uint32_t error = diff * diff;
-        EXPECT_EQ(0u, error)
-            << "Error: 16x16 IDCT Comparison has error " << error
-            << " at index " << j;
+        EXPECT_EQ(0u, error) << "Error: 16x16 IDCT Comparison has error "
+                             << error << " at index " << j;
       }
     }
   }
@@ -631,32 +616,25 @@ class Trans16x16TestBase {
   IhtFunc inv_txfm_ref;
 };
 
-class Trans16x16DCT
-    : public Trans16x16TestBase,
-      public ::testing::TestWithParam<Dct16x16Param> {
+class Trans16x16DCT : public Trans16x16TestBase,
+                      public ::testing::TestWithParam<Dct16x16Param> {
  public:
   virtual ~Trans16x16DCT() {}
 
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
     inv_txfm_ = GET_PARAM(1);
-    tx_type_  = GET_PARAM(2);
+    tx_type_ = GET_PARAM(2);
     bit_depth_ = GET_PARAM(3);
-    pitch_    = 16;
+    pitch_ = 16;
     fwd_txfm_ref = fdct16x16_ref;
     inv_txfm_ref = idct16x16_ref;
     mask_ = (1 << bit_depth_) - 1;
 #if CONFIG_VPX_HIGHBITDEPTH
     switch (bit_depth_) {
-      case VPX_BITS_10:
-        inv_txfm_ref = idct16x16_10_ref;
-        break;
-      case VPX_BITS_12:
-        inv_txfm_ref = idct16x16_12_ref;
-        break;
-      default:
-        inv_txfm_ref = idct16x16_ref;
-        break;
+      case VPX_BITS_10: inv_txfm_ref = idct16x16_10_ref; break;
+      case VPX_BITS_12: inv_txfm_ref = idct16x16_12_ref; break;
+      default: inv_txfm_ref = idct16x16_ref; break;
     }
 #else
     inv_txfm_ref = idct16x16_ref;
@@ -676,17 +654,11 @@ class Trans16x16DCT
   IdctFunc inv_txfm_;
 };
 
-TEST_P(Trans16x16DCT, AccuracyCheck) {
-  RunAccuracyCheck();
-}
+TEST_P(Trans16x16DCT, AccuracyCheck) { RunAccuracyCheck(); }
 
-TEST_P(Trans16x16DCT, CoeffCheck) {
-  RunCoeffCheck();
-}
+TEST_P(Trans16x16DCT, CoeffCheck) { RunCoeffCheck(); }
 
-TEST_P(Trans16x16DCT, MemCheck) {
-  RunMemCheck();
-}
+TEST_P(Trans16x16DCT, MemCheck) { RunMemCheck(); }
 
 TEST_P(Trans16x16DCT, QuantCheck) {
   // Use maximally allowed quantization step sizes for DC and AC
@@ -694,36 +666,27 @@ TEST_P(Trans16x16DCT, QuantCheck) {
   RunQuantCheck(1336, 1828);
 }
 
-TEST_P(Trans16x16DCT, InvAccuracyCheck) {
-  RunInvAccuracyCheck();
-}
+TEST_P(Trans16x16DCT, InvAccuracyCheck) { RunInvAccuracyCheck(); }
 
-class Trans16x16HT
-    : public Trans16x16TestBase,
-      public ::testing::TestWithParam<Ht16x16Param> {
+class Trans16x16HT : public Trans16x16TestBase,
+                     public ::testing::TestWithParam<Ht16x16Param> {
  public:
   virtual ~Trans16x16HT() {}
 
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
     inv_txfm_ = GET_PARAM(1);
-    tx_type_  = GET_PARAM(2);
+    tx_type_ = GET_PARAM(2);
     bit_depth_ = GET_PARAM(3);
-    pitch_    = 16;
+    pitch_ = 16;
     fwd_txfm_ref = fht16x16_ref;
     inv_txfm_ref = iht16x16_ref;
     mask_ = (1 << bit_depth_) - 1;
 #if CONFIG_VPX_HIGHBITDEPTH
     switch (bit_depth_) {
-      case VPX_BITS_10:
-        inv_txfm_ref = iht16x16_10;
-        break;
-      case VPX_BITS_12:
-        inv_txfm_ref = iht16x16_12;
-        break;
-      default:
-        inv_txfm_ref = iht16x16_ref;
-        break;
+      case VPX_BITS_10: inv_txfm_ref = iht16x16_10; break;
+      case VPX_BITS_12: inv_txfm_ref = iht16x16_12; break;
+      default: inv_txfm_ref = iht16x16_ref; break;
     }
 #else
     inv_txfm_ref = iht16x16_ref;
@@ -743,17 +706,11 @@ class Trans16x16HT
   IhtFunc inv_txfm_;
 };
 
-TEST_P(Trans16x16HT, AccuracyCheck) {
-  RunAccuracyCheck();
-}
+TEST_P(Trans16x16HT, AccuracyCheck) { RunAccuracyCheck(); }
 
-TEST_P(Trans16x16HT, CoeffCheck) {
-  RunCoeffCheck();
-}
+TEST_P(Trans16x16HT, CoeffCheck) { RunCoeffCheck(); }
 
-TEST_P(Trans16x16HT, MemCheck) {
-  RunMemCheck();
-}
+TEST_P(Trans16x16HT, MemCheck) { RunMemCheck(); }
 
 TEST_P(Trans16x16HT, QuantCheck) {
   // The encoder skips any non-DC intra prediction modes,
@@ -761,9 +718,8 @@ TEST_P(Trans16x16HT, QuantCheck) {
   RunQuantCheck(429, 729);
 }
 
-class InvTrans16x16DCT
-    : public Trans16x16TestBase,
-      public ::testing::TestWithParam<Idct16x16Param> {
+class InvTrans16x16DCT : public Trans16x16TestBase,
+                         public ::testing::TestWithParam<Idct16x16Param> {
  public:
   virtual ~InvTrans16x16DCT() {}
 
@@ -774,7 +730,7 @@ class InvTrans16x16DCT
     bit_depth_ = GET_PARAM(3);
     pitch_ = 16;
     mask_ = (1 << bit_depth_) - 1;
-}
+  }
   virtual void TearDown() { libvpx_test::ClearSystemState(); }
 
  protected:
@@ -802,10 +758,10 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
         make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
 #else
-INSTANTIATE_TEST_CASE_P(
-    C, Trans16x16DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(C, Trans16x16DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct16x16_c,
+                                                     &vpx_idct16x16_256_add_c,
+                                                     0, VPX_BITS_8)));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -837,86 +793,77 @@ INSTANTIATE_TEST_CASE_P(
 #if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, Trans16x16DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct16x16_c,
-                   &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_neon,
+                                 0, VPX_BITS_8)));
 #endif
 
 #if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct16x16_sse2,
-                   &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vpx_fdct16x16_sse2,
+                                 &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16HT,
-    ::testing::Values(
-        make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 0,
-                   VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 1,
-                   VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 2,
-                   VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 3,
-                   VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vp10_fht16x16_sse2,
+                                 &vp10_iht16x16_256_add_sse2, 0, VPX_BITS_8),
+                      make_tuple(&vp10_fht16x16_sse2,
+                                 &vp10_iht16x16_256_add_sse2, 1, VPX_BITS_8),
+                      make_tuple(&vp10_fht16x16_sse2,
+                                 &vp10_iht16x16_256_add_sse2, 2, VPX_BITS_8),
+                      make_tuple(&vp10_fht16x16_sse2,
+                                 &vp10_iht16x16_256_add_sse2, 3, VPX_BITS_8)));
 #endif  // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16DCT,
     ::testing::Values(
-        make_tuple(&vpx_highbd_fdct16x16_sse2,
-                   &idct16x16_10, 0, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct16x16_c,
-                   &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct16x16_sse2,
-                   &idct16x16_12, 0, VPX_BITS_12),
-        make_tuple(&vpx_highbd_fdct16x16_c,
-                   &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
-        make_tuple(&vpx_fdct16x16_sse2,
-                   &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+        make_tuple(&vpx_highbd_fdct16x16_sse2, &idct16x16_10, 0, VPX_BITS_10),
+        make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_256_add_10_sse2, 0,
+                   VPX_BITS_10),
+        make_tuple(&vpx_highbd_fdct16x16_sse2, &idct16x16_12, 0, VPX_BITS_12),
+        make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_256_add_12_sse2, 0,
+                   VPX_BITS_12),
+        make_tuple(&vpx_fdct16x16_sse2, &vpx_idct16x16_256_add_c, 0,
+                   VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16HT,
-    ::testing::Values(
-        make_tuple(&vp10_fht16x16_sse2,
-                   &vp10_iht16x16_256_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_sse2,
-                   &vp10_iht16x16_256_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_sse2,
-                   &vp10_iht16x16_256_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c, 3,
-                   VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+                                 0, VPX_BITS_8),
+                      make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+                                 1, VPX_BITS_8),
+                      make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+                                 2, VPX_BITS_8),
+                      make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+                                 3, VPX_BITS_8)));
 // Optimizations take effect at a threshold of 3155, so we use a value close to
 // that to test both branches.
 INSTANTIATE_TEST_CASE_P(
     SSE2, InvTrans16x16DCT,
-    ::testing::Values(
-        make_tuple(&idct16x16_10_add_10_c,
-                   &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
-        make_tuple(&idct16x16_10,
-                   &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
-        make_tuple(&idct16x16_10_add_12_c,
-                   &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
-        make_tuple(&idct16x16_12,
-                   &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+    ::testing::Values(make_tuple(&idct16x16_10_add_10_c,
+                                 &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+                      make_tuple(&idct16x16_10, &idct16x16_256_add_10_sse2,
+                                 3167, VPX_BITS_10),
+                      make_tuple(&idct16x16_10_add_12_c,
+                                 &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+                      make_tuple(&idct16x16_12, &idct16x16_256_add_12_sse2,
+                                 3167, VPX_BITS_12)));
 #endif  // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    MSA, Trans16x16DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct16x16_msa,
-                   &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(MSA, Trans16x16DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct16x16_msa,
+                                                     &vpx_idct16x16_256_add_msa,
+                                                     0, VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans16x16HT,
-    ::testing::Values(
-        make_tuple(&vp10_fht16x16_msa,
-                   &vp10_iht16x16_256_add_msa, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_msa,
-                   &vp10_iht16x16_256_add_msa, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_msa,
-                   &vp10_iht16x16_256_add_msa, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_msa,
-                   &vp10_iht16x16_256_add_msa, 3, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+                                 0, VPX_BITS_8),
+                      make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+                                 1, VPX_BITS_8),
+                      make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+                                 2, VPX_BITS_8),
+                      make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+                                 3, VPX_BITS_8)));
 #endif  // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/dct32x32_test.cc b/test/dct32x32_test.cc
index a574a6450d590cac8216d02a1da97a08ceb82838..a45fab300e387d0ed7a5d7d2d1a39217e748d824 100644
--- a/test/dct32x32_test.cc
+++ b/test/dct32x32_test.cc
@@ -46,8 +46,7 @@ void reference_32x32_dct_1d(const double in[32], double out[32]) {
     out[k] = 0.0;
     for (int n = 0; n < 32; n++)
       out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 64.0);
-    if (k == 0)
-      out[k] = out[k] * kInvSqrt2;
+    if (k == 0) out[k] = out[k] * kInvSqrt2;
   }
 }
 
@@ -56,21 +55,17 @@ void reference_32x32_dct_2d(const int16_t input[kNumCoeffs],
   // First transform columns
   for (int i = 0; i < 32; ++i) {
     double temp_in[32], temp_out[32];
-    for (int j = 0; j < 32; ++j)
-      temp_in[j] = input[j*32 + i];
+    for (int j = 0; j < 32; ++j) temp_in[j] = input[j * 32 + i];
     reference_32x32_dct_1d(temp_in, temp_out);
-    for (int j = 0; j < 32; ++j)
-      output[j * 32 + i] = temp_out[j];
+    for (int j = 0; j < 32; ++j) output[j * 32 + i] = temp_out[j];
   }
   // Then transform rows
   for (int i = 0; i < 32; ++i) {
     double temp_in[32], temp_out[32];
-    for (int j = 0; j < 32; ++j)
-      temp_in[j] = output[j + i*32];
+    for (int j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
     reference_32x32_dct_1d(temp_in, temp_out);
     // Scale by some magic number
-    for (int j = 0; j < 32; ++j)
-      output[j + i * 32] = temp_out[j] / 4;
+    for (int j = 0; j < 32; ++j) output[j + i * 32] = temp_out[j] / 4;
   }
 }
 
@@ -100,8 +95,8 @@ class Trans32x32Test : public ::testing::TestWithParam<Trans32x32Param> {
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
     inv_txfm_ = GET_PARAM(1);
-    version_  = GET_PARAM(2);  // 0: high precision forward transform
-                               // 1: low precision version for rd loop
+    version_ = GET_PARAM(2);  // 0: high precision forward transform
+                              // 1: low precision version for rd loop
     bit_depth_ = GET_PARAM(3);
     mask_ = (1 << bit_depth_) - 1;
   }
@@ -151,8 +146,8 @@ TEST_P(Trans32x32Test, AccuracyCheck) {
       ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, dst, 32));
 #if CONFIG_VPX_HIGHBITDEPTH
     } else {
-      ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block,
-                                         CONVERT_TO_BYTEPTR(dst16), 32));
+      ASM_REGISTER_STATE_CHECK(
+          inv_txfm_(test_temp_block, CONVERT_TO_BYTEPTR(dst16), 32));
 #endif
     }
 
@@ -164,8 +159,7 @@ TEST_P(Trans32x32Test, AccuracyCheck) {
       const uint32_t diff = dst[j] - src[j];
 #endif
       const uint32_t error = diff * diff;
-      if (max_error < error)
-        max_error = error;
+      if (max_error < error) max_error = error;
       total_error += error;
     }
   }
@@ -224,11 +218,9 @@ TEST_P(Trans32x32Test, MemCheck) {
       input_extreme_block[j] = rnd.Rand8() & 1 ? mask_ : -mask_;
     }
     if (i == 0) {
-      for (int j = 0; j < kNumCoeffs; ++j)
-        input_extreme_block[j] = mask_;
+      for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_;
     } else if (i == 1) {
-      for (int j = 0; j < kNumCoeffs; ++j)
-        input_extreme_block[j] = -mask_;
+      for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_;
     }
 
     const int stride = 32;
@@ -302,9 +294,8 @@ TEST_P(Trans32x32Test, InverseAccuracy) {
       const int diff = dst[j] - src[j];
 #endif
       const int error = diff * diff;
-      EXPECT_GE(1, error)
-          << "Error: 32x32 IDCT has error " << error
-          << " at index " << j;
+      EXPECT_GE(1, error) << "Error: 32x32 IDCT has error " << error
+                          << " at index " << j;
     }
   }
 }
@@ -315,46 +306,38 @@ using std::tr1::make_tuple;
 INSTANTIATE_TEST_CASE_P(
     C, Trans32x32Test,
     ::testing::Values(
-        make_tuple(&vpx_highbd_fdct32x32_c,
-                   &idct32x32_10, 0, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct32x32_rd_c,
-                   &idct32x32_10, 1, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct32x32_c,
-                   &idct32x32_12, 0, VPX_BITS_12),
-        make_tuple(&vpx_highbd_fdct32x32_rd_c,
-                   &idct32x32_12, 1, VPX_BITS_12),
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8),
-        make_tuple(&vpx_fdct32x32_rd_c,
-                   &vpx_idct32x32_1024_add_c, 1, VPX_BITS_8)));
+        make_tuple(&vpx_highbd_fdct32x32_c, &idct32x32_10, 0, VPX_BITS_10),
+        make_tuple(&vpx_highbd_fdct32x32_rd_c, &idct32x32_10, 1, VPX_BITS_10),
+        make_tuple(&vpx_highbd_fdct32x32_c, &idct32x32_12, 0, VPX_BITS_12),
+        make_tuple(&vpx_highbd_fdct32x32_rd_c, &idct32x32_12, 1, VPX_BITS_12),
+        make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8),
+        make_tuple(&vpx_fdct32x32_rd_c, &vpx_idct32x32_1024_add_c, 1,
+                   VPX_BITS_8)));
 #else
 INSTANTIATE_TEST_CASE_P(
     C, Trans32x32Test,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8),
-        make_tuple(&vpx_fdct32x32_rd_c,
-                   &vpx_idct32x32_1024_add_c, 1, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, 0,
+                                 VPX_BITS_8),
+                      make_tuple(&vpx_fdct32x32_rd_c, &vpx_idct32x32_1024_add_c,
+                                 1, VPX_BITS_8)));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, Trans32x32Test,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_neon, 0, VPX_BITS_8),
-        make_tuple(&vpx_fdct32x32_rd_c,
-                   &vpx_idct32x32_1024_add_neon, 1, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_neon,
+                                 0, VPX_BITS_8),
+                      make_tuple(&vpx_fdct32x32_rd_c,
+                                 &vpx_idct32x32_1024_add_neon, 1, VPX_BITS_8)));
 #endif  // HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans32x32Test,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_sse2,
-                   &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
-        make_tuple(&vpx_fdct32x32_rd_sse2,
-                   &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_sse2,
+                                 &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
+                      make_tuple(&vpx_fdct32x32_rd_sse2,
+                                 &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
 #endif  // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
@@ -376,20 +359,18 @@ INSTANTIATE_TEST_CASE_P(
 #if HAVE_AVX2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     AVX2, Trans32x32Test,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_avx2,
-                   &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
-        make_tuple(&vpx_fdct32x32_rd_avx2,
-                   &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_avx2,
+                                 &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
+                      make_tuple(&vpx_fdct32x32_rd_avx2,
+                                 &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
 #endif  // HAVE_AVX2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans32x32Test,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_msa,
-                   &vpx_idct32x32_1024_add_msa, 0, VPX_BITS_8),
-        make_tuple(&vpx_fdct32x32_rd_msa,
-                   &vpx_idct32x32_1024_add_msa, 1, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_msa,
+                                 &vpx_idct32x32_1024_add_msa, 0, VPX_BITS_8),
+                      make_tuple(&vpx_fdct32x32_rd_msa,
+                                 &vpx_idct32x32_1024_add_msa, 1, VPX_BITS_8)));
 #endif  // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/decode_api_test.cc b/test/decode_api_test.cc
index a819da5626db099bab39e4cfb222ed56fa0a7a33..6571154fa0e36a5d6b29a0f6286b6f3262e70777 100644
--- a/test/decode_api_test.cc
+++ b/test/decode_api_test.cc
@@ -25,7 +25,7 @@ TEST(DecodeAPI, InvalidParams) {
     &vpx_codec_vp10_dx_algo,
 #endif
   };
-  uint8_t buf[1] = {0};
+  uint8_t buf[1] = { 0 };
   vpx_codec_ctx_t dec;
 
   EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_dec_init(NULL, NULL, NULL, 0));
@@ -48,8 +48,7 @@ TEST(DecodeAPI, InvalidParams) {
               vpx_codec_decode(&dec, buf, NELEMENTS(buf), NULL, 0));
     EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
               vpx_codec_decode(&dec, NULL, NELEMENTS(buf), NULL, 0));
-    EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
-              vpx_codec_decode(&dec, buf, 0, NULL, 0));
+    EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_decode(&dec, buf, 0, NULL, 0));
 
     EXPECT_EQ(VPX_CODEC_OK, vpx_codec_destroy(&dec));
   }
diff --git a/test/decode_perf_test.cc b/test/decode_perf_test.cc
index 000f9c85c4c7c9d54a784295101ff4ae7e47126e..d8bfcbe007689a7c989cdd14f5093a1b5e698f93 100644
--- a/test/decode_perf_test.cc
+++ b/test/decode_perf_test.cc
@@ -70,8 +70,7 @@ const DecodePerfParam kVP9DecodePerfVectors[] = {
    power/temp/min max frame decode times/etc
  */
 
-class DecodePerfTest : public ::testing::TestWithParam<DecodePerfParam> {
-};
+class DecodePerfTest : public ::testing::TestWithParam<DecodePerfParam> {};
 
 TEST_P(DecodePerfTest, PerfTest) {
   const char *const video_name = GET_PARAM(VIDEO_NAME);
@@ -92,8 +91,7 @@ TEST_P(DecodePerfTest, PerfTest) {
   }
 
   vpx_usec_timer_mark(&t);
-  const double elapsed_secs = double(vpx_usec_timer_elapsed(&t))
-                              / kUsecsInSec;
+  const double elapsed_secs = double(vpx_usec_timer_elapsed(&t)) / kUsecsInSec;
   const unsigned frames = video.frame_number();
   const double fps = double(frames) / elapsed_secs;
 
@@ -111,17 +109,13 @@ TEST_P(DecodePerfTest, PerfTest) {
 INSTANTIATE_TEST_CASE_P(VP9, DecodePerfTest,
                         ::testing::ValuesIn(kVP9DecodePerfVectors));
 
-class VP9NewEncodeDecodePerfTest :
-    public ::libvpx_test::EncoderTest,
-    public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class VP9NewEncodeDecodePerfTest
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
  protected:
   VP9NewEncodeDecodePerfTest()
-      : EncoderTest(GET_PARAM(0)),
-        encoding_mode_(GET_PARAM(1)),
-        speed_(0),
-        outfile_(0),
-        out_frames_(0) {
-  }
+      : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)), speed_(0),
+        outfile_(0), out_frames_(0) {}
 
   virtual ~VP9NewEncodeDecodePerfTest() {}
 
@@ -182,9 +176,7 @@ class VP9NewEncodeDecodePerfTest :
 
   virtual bool DoDecode() { return false; }
 
-  void set_speed(unsigned int speed) {
-    speed_ = speed;
-  }
+  void set_speed(unsigned int speed) { speed_ = speed; }
 
  private:
   libvpx_test::TestMode encoding_mode_;
@@ -196,10 +188,7 @@ class VP9NewEncodeDecodePerfTest :
 struct EncodePerfTestVideo {
   EncodePerfTestVideo(const char *name_, uint32_t width_, uint32_t height_,
                       uint32_t bitrate_, int frames_)
-      : name(name_),
-        width(width_),
-        height(height_),
-        bitrate(bitrate_),
+      : name(name_), width(width_), height(height_), bitrate(bitrate_),
         frames(frames_) {}
   const char *name;
   uint32_t width;
@@ -225,10 +214,8 @@ TEST_P(VP9NewEncodeDecodePerfTest, PerfTest) {
 
   const char *video_name = kVP9EncodePerfTestVectors[i].name;
   libvpx_test::I420VideoSource video(
-      video_name,
-      kVP9EncodePerfTestVectors[i].width,
-      kVP9EncodePerfTestVectors[i].height,
-      timebase.den, timebase.num, 0,
+      video_name, kVP9EncodePerfTestVectors[i].width,
+      kVP9EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0,
       kVP9EncodePerfTestVectors[i].frames);
   set_speed(2);
 
@@ -268,6 +255,6 @@ TEST_P(VP9NewEncodeDecodePerfTest, PerfTest) {
   printf("}\n");
 }
 
-VP10_INSTANTIATE_TEST_CASE(
-  VP9NewEncodeDecodePerfTest, ::testing::Values(::libvpx_test::kTwoPassGood));
+VP10_INSTANTIATE_TEST_CASE(VP9NewEncodeDecodePerfTest,
+                           ::testing::Values(::libvpx_test::kTwoPassGood));
 }  // namespace
diff --git a/test/decode_test_driver.cc b/test/decode_test_driver.cc
index ad861c31575fe8aebead48eaccb2d876bd1366e7..3417e57b6eb99a16aa8b6c1f48fd191b533219c3 100644
--- a/test/decode_test_driver.cc
+++ b/test/decode_test_driver.cc
@@ -21,9 +21,8 @@ const char kVP8Name[] = "WebM Project VP8";
 
 vpx_codec_err_t Decoder::PeekStream(const uint8_t *cxdata, size_t size,
                                     vpx_codec_stream_info_t *stream_info) {
-  return vpx_codec_peek_stream_info(CodecInterface(),
-                                    cxdata, static_cast<unsigned int>(size),
-                                    stream_info);
+  return vpx_codec_peek_stream_info(
+      CodecInterface(), cxdata, static_cast<unsigned int>(size), stream_info);
 }
 
 vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
@@ -35,9 +34,8 @@ vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size,
   vpx_codec_err_t res_dec;
   InitOnce();
   API_REGISTER_STATE_CHECK(
-      res_dec = vpx_codec_decode(&decoder_,
-                                 cxdata, static_cast<unsigned int>(size),
-                                 user_priv, 0));
+      res_dec = vpx_codec_decode(
+          &decoder_, cxdata, static_cast<unsigned int>(size), user_priv, 0));
   return res_dec;
 }
 
@@ -67,7 +65,7 @@ void DecoderTest::HandlePeekResult(Decoder *const decoder,
 
 void DecoderTest::RunLoop(CompressedVideoSource *video,
                           const vpx_codec_dec_cfg_t &dec_cfg) {
-  Decoder* const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
+  Decoder *const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
   ASSERT_TRUE(decoder != NULL);
   bool end_of_file = false;
 
@@ -80,16 +78,14 @@ void DecoderTest::RunLoop(CompressedVideoSource *video,
     stream_info.sz = sizeof(stream_info);
 
     if (video->cxdata() != NULL) {
-      const vpx_codec_err_t res_peek = decoder->PeekStream(video->cxdata(),
-                                                           video->frame_size(),
-                                                           &stream_info);
+      const vpx_codec_err_t res_peek = decoder->PeekStream(
+          video->cxdata(), video->frame_size(), &stream_info);
       HandlePeekResult(decoder, video, res_peek);
       ASSERT_FALSE(::testing::Test::HasFailure());
 
-      vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
-                                                     video->frame_size());
-      if (!HandleDecodeResult(res_dec, *video, decoder))
-        break;
+      vpx_codec_err_t res_dec =
+          decoder->DecodeFrame(video->cxdata(), video->frame_size());
+      if (!HandleDecodeResult(res_dec, *video, decoder)) break;
     } else {
       // Signal end of the file to the decoder.
       const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
@@ -116,8 +112,6 @@ void DecoderTest::set_cfg(const vpx_codec_dec_cfg_t &dec_cfg) {
   memcpy(&cfg_, &dec_cfg, sizeof(cfg_));
 }
 
-void DecoderTest::set_flags(const vpx_codec_flags_t flags) {
-  flags_ = flags;
-}
+void DecoderTest::set_flags(const vpx_codec_flags_t flags) { flags_ = flags; }
 
 }  // namespace libvpx_test
diff --git a/test/decode_test_driver.h b/test/decode_test_driver.h
index f566c53c7d30e5d129dfd0abd804249044d56cb3..553e81ab3f255949005af56cd87a3b7e09808021 100644
--- a/test/decode_test_driver.h
+++ b/test/decode_test_driver.h
@@ -26,13 +26,11 @@ class DxDataIterator {
   explicit DxDataIterator(vpx_codec_ctx_t *decoder)
       : decoder_(decoder), iter_(NULL) {}
 
-  const vpx_image_t *Next() {
-    return vpx_codec_get_frame(decoder_, &iter_);
-  }
+  const vpx_image_t *Next() { return vpx_codec_get_frame(decoder_, &iter_); }
 
  private:
-  vpx_codec_ctx_t  *decoder_;
-  vpx_codec_iter_t  iter_;
+  vpx_codec_ctx_t *decoder_;
+  vpx_codec_iter_t iter_;
 };
 
 // Provides a simplified interface to manage one video decoding.
@@ -47,13 +45,14 @@ class Decoder {
 
   Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag,
           unsigned long deadline)  // NOLINT
-      : cfg_(cfg), flags_(flag), deadline_(deadline), init_done_(false) {
+      : cfg_(cfg),
+        flags_(flag),
+        deadline_(deadline),
+        init_done_(false) {
     memset(&decoder_, 0, sizeof(decoder_));
   }
 
-  virtual ~Decoder() {
-    vpx_codec_destroy(&decoder_);
-  }
+  virtual ~Decoder() { vpx_codec_destroy(&decoder_); }
 
   vpx_codec_err_t PeekStream(const uint8_t *cxdata, size_t size,
                              vpx_codec_stream_info_t *stream_info);
@@ -63,17 +62,11 @@ class Decoder {
   vpx_codec_err_t DecodeFrame(const uint8_t *cxdata, size_t size,
                               void *user_priv);
 
-  DxDataIterator GetDxData() {
-    return DxDataIterator(&decoder_);
-  }
+  DxDataIterator GetDxData() { return DxDataIterator(&decoder_); }
 
-  void set_deadline(unsigned long deadline) {
-    deadline_ = deadline;
-  }
+  void set_deadline(unsigned long deadline) { deadline_ = deadline; }
 
-  void Control(int ctrl_id, int arg) {
-    Control(ctrl_id, arg, VPX_CODEC_OK);
-  }
+  void Control(int ctrl_id, int arg) { Control(ctrl_id, arg, VPX_CODEC_OK); }
 
   void Control(int ctrl_id, const void *arg) {
     InitOnce();
@@ -87,7 +80,7 @@ class Decoder {
     ASSERT_EQ(expected_value, res) << DecodeError();
   }
 
-  const char* DecodeError() {
+  const char *DecodeError() {
     const char *detail = vpx_codec_error_detail(&decoder_);
     return detail ? detail : vpx_codec_error(&decoder_);
   }
@@ -97,38 +90,35 @@ class Decoder {
       vpx_get_frame_buffer_cb_fn_t cb_get,
       vpx_release_frame_buffer_cb_fn_t cb_release, void *user_priv) {
     InitOnce();
-    return vpx_codec_set_frame_buffer_functions(
-        &decoder_, cb_get, cb_release, user_priv);
+    return vpx_codec_set_frame_buffer_functions(&decoder_, cb_get, cb_release,
+                                                user_priv);
   }
 
-  const char* GetDecoderName() const {
+  const char *GetDecoderName() const {
     return vpx_codec_iface_name(CodecInterface());
   }
 
   bool IsVP8() const;
 
-  vpx_codec_ctx_t * GetDecoder() {
-    return &decoder_;
-  }
+  vpx_codec_ctx_t *GetDecoder() { return &decoder_; }
 
  protected:
-  virtual vpx_codec_iface_t* CodecInterface() const = 0;
+  virtual vpx_codec_iface_t *CodecInterface() const = 0;
 
   void InitOnce() {
     if (!init_done_) {
-      const vpx_codec_err_t res = vpx_codec_dec_init(&decoder_,
-                                                     CodecInterface(),
-                                                     &cfg_, flags_);
+      const vpx_codec_err_t res =
+          vpx_codec_dec_init(&decoder_, CodecInterface(), &cfg_, flags_);
       ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
       init_done_ = true;
     }
   }
 
-  vpx_codec_ctx_t     decoder_;
+  vpx_codec_ctx_t decoder_;
   vpx_codec_dec_cfg_t cfg_;
-  vpx_codec_flags_t   flags_;
-  unsigned int        deadline_;
-  bool                init_done_;
+  vpx_codec_flags_t flags_;
+  unsigned int deadline_;
+  bool init_done_;
 };
 
 // Common test functionality for all Decoder tests.
@@ -143,37 +133,35 @@ class DecoderTest {
   virtual void set_flags(const vpx_codec_flags_t flags);
 
   // Hook to be called before decompressing every frame.
-  virtual void PreDecodeFrameHook(const CompressedVideoSource& /*video*/,
-                                  Decoder* /*decoder*/) {}
+  virtual void PreDecodeFrameHook(const CompressedVideoSource & /*video*/,
+                                  Decoder * /*decoder*/) {}
 
   // Hook to be called to handle decode result. Return true to continue.
   virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
-                                  const CompressedVideoSource& /*video*/,
+                                  const CompressedVideoSource & /*video*/,
                                   Decoder *decoder) {
     EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
     return VPX_CODEC_OK == res_dec;
   }
 
   // Hook to be called on every decompressed frame.
-  virtual void DecompressedFrameHook(const vpx_image_t& /*img*/,
+  virtual void DecompressedFrameHook(const vpx_image_t & /*img*/,
                                      const unsigned int /*frame_number*/) {}
 
   // Hook to be called on peek result
-  virtual void HandlePeekResult(Decoder* const decoder,
+  virtual void HandlePeekResult(Decoder *const decoder,
                                 CompressedVideoSource *video,
                                 const vpx_codec_err_t res_peek);
 
  protected:
   explicit DecoderTest(const CodecFactory *codec)
-      : codec_(codec),
-        cfg_(),
-        flags_(0) {}
+      : codec_(codec), cfg_(), flags_(0) {}
 
   virtual ~DecoderTest() {}
 
   const CodecFactory *codec_;
   vpx_codec_dec_cfg_t cfg_;
-  vpx_codec_flags_t   flags_;
+  vpx_codec_flags_t flags_;
 };
 
 }  // namespace libvpx_test
diff --git a/test/denoiser_sse2_test.cc b/test/denoiser_sse2_test.cc
index efcd90fc362978e0d636f03bd4e8dec774300138..0a0fee79920bea3272c3055de900ac2e71a1170e 100644
--- a/test/denoiser_sse2_test.cc
+++ b/test/denoiser_sse2_test.cc
@@ -33,9 +33,7 @@ class VP9DenoiserTest : public ::testing::TestWithParam<BLOCK_SIZE> {
  public:
   virtual ~VP9DenoiserTest() {}
 
-  virtual void SetUp() {
-    bs_ = GetParam();
-  }
+  virtual void SetUp() { bs_ = GetParam(); }
 
   virtual void TearDown() { libvpx_test::ClearSystemState(); }
 
@@ -68,19 +66,19 @@ TEST_P(VP9DenoiserTest, BitexactCheck) {
       sig_block[j] = rnd.Rand8();
       // The pixels in mc_avg_block are generated by adding a random
       // number in range [-19, 19] to corresponding pixels in sig_block.
-      temp = sig_block[j] + ((rnd.Rand8() % 2 == 0) ? -1 : 1) *
-             (rnd.Rand8() % 20);
+      temp =
+          sig_block[j] + ((rnd.Rand8() % 2 == 0) ? -1 : 1) * (rnd.Rand8() % 20);
       // Clip.
       mc_avg_block[j] = (temp < 0) ? 0 : ((temp > 255) ? 255 : temp);
     }
 
-    ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_c(
-        sig_block, 64, mc_avg_block, 64, avg_block_c,
-        64, 0, bs_, motion_magnitude_random));
+    ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_c(sig_block, 64, mc_avg_block,
+                                                   64, avg_block_c, 64, 0, bs_,
+                                                   motion_magnitude_random));
 
     ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_sse2(
-        sig_block, 64, mc_avg_block, 64, avg_block_sse2,
-        64, 0, bs_, motion_magnitude_random));
+        sig_block, 64, mc_avg_block, 64, avg_block_sse2, 64, 0, bs_,
+        motion_magnitude_random));
 
     // Test bitexactness.
     for (int h = 0; h < (4 << b_height_log2_lookup[bs_]); ++h) {
@@ -92,10 +90,10 @@ TEST_P(VP9DenoiserTest, BitexactCheck) {
 }
 
 // Test for all block size.
-INSTANTIATE_TEST_CASE_P(
-    SSE2, VP9DenoiserTest,
-    ::testing::Values(BLOCK_4X4, BLOCK_4X8, BLOCK_8X4, BLOCK_8X8,
-                      BLOCK_8X16, BLOCK_16X8, BLOCK_16X16, BLOCK_16X32,
-                      BLOCK_32X16, BLOCK_32X32, BLOCK_32X64, BLOCK_64X32,
-                      BLOCK_64X64));
+INSTANTIATE_TEST_CASE_P(SSE2, VP9DenoiserTest,
+                        ::testing::Values(BLOCK_4X4, BLOCK_4X8, BLOCK_8X4,
+                                          BLOCK_8X8, BLOCK_8X16, BLOCK_16X8,
+                                          BLOCK_16X16, BLOCK_16X32, BLOCK_32X16,
+                                          BLOCK_32X32, BLOCK_32X64, BLOCK_64X32,
+                                          BLOCK_64X64));
 }  // namespace
diff --git a/test/encode_perf_test.cc b/test/encode_perf_test.cc
index 69be9ea646db1a0f7c3df105fe3230d05e4e8978..2411dcd266cba069e637b4340b7feb641f0d2930 100644
--- a/test/encode_perf_test.cc
+++ b/test/encode_perf_test.cc
@@ -26,10 +26,7 @@ const double kUsecsInSec = 1000000.0;
 struct EncodePerfTestVideo {
   EncodePerfTestVideo(const char *name_, uint32_t width_, uint32_t height_,
                       uint32_t bitrate_, int frames_)
-      : name(name_),
-        width(width_),
-        height(height_),
-        bitrate(bitrate_),
+      : name(name_), width(width_), height(height_), bitrate(bitrate_),
         frames(frames_) {}
   const char *name;
   uint32_t width;
@@ -45,8 +42,8 @@ const EncodePerfTestVideo kVP9EncodePerfTestVectors[] = {
   EncodePerfTestVideo("macmarcostationary_640_480_30.yuv", 640, 480, 200, 718),
   EncodePerfTestVideo("niklas_640_480_30.yuv", 640, 480, 200, 471),
   EncodePerfTestVideo("tacomanarrows_640_480_30.yuv", 640, 480, 200, 300),
-  EncodePerfTestVideo("tacomasmallcameramovement_640_480_30.yuv",
-                      640, 480, 200, 300),
+  EncodePerfTestVideo("tacomasmallcameramovement_640_480_30.yuv", 640, 480, 200,
+                      300),
   EncodePerfTestVideo("thaloundeskmtg_640_480_30.yuv", 640, 480, 200, 300),
   EncodePerfTestVideo("niklas_1280_720_30.yuv", 1280, 720, 600, 470),
 };
@@ -61,12 +58,8 @@ class VP9EncodePerfTest
       public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
  protected:
   VP9EncodePerfTest()
-      : EncoderTest(GET_PARAM(0)),
-        min_psnr_(kMaxPsnr),
-        nframes_(0),
-        encoding_mode_(GET_PARAM(1)),
-        speed_(0),
-        threads_(1) {}
+      : EncoderTest(GET_PARAM(0)), min_psnr_(kMaxPsnr), nframes_(0),
+        encoding_mode_(GET_PARAM(1)), speed_(0), threads_(1) {}
 
   virtual ~VP9EncodePerfTest() {}
 
@@ -107,24 +100,18 @@ class VP9EncodePerfTest
 
   virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
     if (pkt->data.psnr.psnr[0] < min_psnr_) {
-      min_psnr_= pkt->data.psnr.psnr[0];
+      min_psnr_ = pkt->data.psnr.psnr[0];
     }
   }
 
   // for performance reasons don't decode
   virtual bool DoDecode() { return 0; }
 
-  double min_psnr() const {
-    return min_psnr_;
-  }
+  double min_psnr() const { return min_psnr_; }
 
-  void set_speed(unsigned int speed) {
-    speed_ = speed;
-  }
+  void set_speed(unsigned int speed) { speed_ = speed; }
 
-  void set_threads(unsigned int threads) {
-    threads_ = threads;
-  }
+  void set_threads(unsigned int threads) { threads_ = threads; }
 
  private:
   double min_psnr_;
@@ -157,10 +144,8 @@ TEST_P(VP9EncodePerfTest, PerfTest) {
         const unsigned frames = kVP9EncodePerfTestVectors[i].frames;
         const char *video_name = kVP9EncodePerfTestVectors[i].name;
         libvpx_test::I420VideoSource video(
-            video_name,
-            kVP9EncodePerfTestVectors[i].width,
-            kVP9EncodePerfTestVectors[i].height,
-            timebase.den, timebase.num, 0,
+            video_name, kVP9EncodePerfTestVectors[i].width,
+            kVP9EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0,
             kVP9EncodePerfTestVectors[i].frames);
         set_speed(kEncodePerfTestSpeeds[j]);
 
@@ -197,6 +182,6 @@ TEST_P(VP9EncodePerfTest, PerfTest) {
   }
 }
 
-VP10_INSTANTIATE_TEST_CASE(
-    VP9EncodePerfTest, ::testing::Values(::libvpx_test::kRealTime));
+VP10_INSTANTIATE_TEST_CASE(VP9EncodePerfTest,
+                           ::testing::Values(::libvpx_test::kRealTime));
 }  // namespace
diff --git a/test/encode_test_driver.cc b/test/encode_test_driver.cc
index 65aac32b7834f9e0d4594a7077057a99e8742931..9c753a01952c610b7ab2902fe66bbae084245804 100644
--- a/test/encode_test_driver.cc
+++ b/test/encode_test_driver.cc
@@ -30,8 +30,7 @@ void Encoder::InitEncoder(VideoSource *video) {
     cfg_.g_timebase = video->timebase();
     cfg_.rc_twopass_stats_in = stats_->buf();
 
-    res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
-                             init_flags_);
+    res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_, init_flags_);
     ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
 
 #if CONFIG_VP10_ENCODER
@@ -56,8 +55,7 @@ void Encoder::EncodeFrame(VideoSource *video, const unsigned long frame_flags) {
   CxDataIterator iter = GetCxData();
 
   while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) {
-    if (pkt->kind != VPX_CODEC_STATS_PKT)
-      continue;
+    if (pkt->kind != VPX_CODEC_STATS_PKT) continue;
 
     stats_->Append(*pkt);
   }
@@ -77,15 +75,15 @@ void Encoder::EncodeFrameInternal(const VideoSource &video,
   }
 
   // Encode the frame
-  API_REGISTER_STATE_CHECK(
-      res = vpx_codec_encode(&encoder_, img, video.pts(), video.duration(),
-                             frame_flags, deadline_));
+  API_REGISTER_STATE_CHECK(res = vpx_codec_encode(&encoder_, img, video.pts(),
+                                                  video.duration(), frame_flags,
+                                                  deadline_));
   ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
 }
 
 void Encoder::Flush() {
-  const vpx_codec_err_t res = vpx_codec_encode(&encoder_, NULL, 0, 0, 0,
-                                               deadline_);
+  const vpx_codec_err_t res =
+      vpx_codec_encode(&encoder_, NULL, 0, 0, 0, deadline_);
   if (!encoder_.priv)
     ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError();
   else
@@ -100,22 +98,15 @@ void EncoderTest::InitializeConfig() {
 
 void EncoderTest::SetMode(TestMode mode) {
   switch (mode) {
-    case kRealTime:
-      deadline_ = VPX_DL_REALTIME;
-      break;
+    case kRealTime: deadline_ = VPX_DL_REALTIME; break;
 
     case kOnePassGood:
-    case kTwoPassGood:
-      deadline_ = VPX_DL_GOOD_QUALITY;
-      break;
+    case kTwoPassGood: deadline_ = VPX_DL_GOOD_QUALITY; break;
 
     case kOnePassBest:
-    case kTwoPassBest:
-      deadline_ = VPX_DL_BEST_QUALITY;
-      break;
+    case kTwoPassBest: deadline_ = VPX_DL_BEST_QUALITY; break;
 
-    default:
-      ASSERT_TRUE(false) << "Unexpected mode " << mode;
+    default: ASSERT_TRUE(false) << "Unexpected mode " << mode;
   }
 
   if (mode == kTwoPassGood || mode == kTwoPassBest)
@@ -125,35 +116,35 @@ void EncoderTest::SetMode(TestMode mode) {
 }
 // The function should return "true" most of the time, therefore no early
 // break-out is implemented within the match checking process.
-static bool compare_img(const vpx_image_t *img1,
-                        const vpx_image_t *img2) {
-  bool match = (img1->fmt == img2->fmt) &&
-               (img1->cs == img2->cs) &&
-               (img1->d_w == img2->d_w) &&
-               (img1->d_h == img2->d_h);
-
-  const unsigned int width_y  = img1->d_w;
+static bool compare_img(const vpx_image_t *img1, const vpx_image_t *img2) {
+  bool match = (img1->fmt == img2->fmt) && (img1->cs == img2->cs) &&
+               (img1->d_w == img2->d_w) && (img1->d_h == img2->d_h);
+
+  const unsigned int width_y = img1->d_w;
   const unsigned int height_y = img1->d_h;
   unsigned int i;
   for (i = 0; i < height_y; ++i)
     match = (memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y],
                     img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y],
-                    width_y) == 0) && match;
-  const unsigned int width_uv  = (img1->d_w + 1) >> 1;
+                    width_y) == 0) &&
+            match;
+  const unsigned int width_uv = (img1->d_w + 1) >> 1;
   const unsigned int height_uv = (img1->d_h + 1) >> 1;
-  for (i = 0; i <  height_uv; ++i)
+  for (i = 0; i < height_uv; ++i)
     match = (memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U],
                     img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U],
-                    width_uv) == 0) && match;
+                    width_uv) == 0) &&
+            match;
   for (i = 0; i < height_uv; ++i)
     match = (memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V],
                     img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V],
-                    width_uv) == 0) && match;
+                    width_uv) == 0) &&
+            match;
   return match;
 }
 
-void EncoderTest::MismatchHook(const vpx_image_t* /*img1*/,
-                               const vpx_image_t* /*img2*/) {
+void EncoderTest::MismatchHook(const vpx_image_t * /*img1*/,
+                               const vpx_image_t * /*img2*/) {
   ASSERT_TRUE(0) << "Encode/Decode mismatch found";
 }
 
@@ -174,8 +165,8 @@ void EncoderTest::RunLoop(VideoSource *video) {
       cfg_.g_pass = VPX_RC_LAST_PASS;
 
     BeginPassHook(pass);
-    Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_,
-                                                   &stats_);
+    Encoder *const encoder =
+        codec_->CreateEncoder(cfg_, deadline_, init_flags_, &stats_);
     ASSERT_TRUE(encoder != NULL);
 
     video->Begin();
@@ -187,7 +178,7 @@ void EncoderTest::RunLoop(VideoSource *video) {
     // NOTE: fragment decoder and partition encoder are only supported by VP8.
     if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION)
       dec_init_flags |= VPX_CODEC_USE_INPUT_FRAGMENTS;
-    Decoder* const decoder = codec_->CreateDecoder(dec_cfg, dec_init_flags, 0);
+    Decoder *const decoder = codec_->CreateDecoder(dec_cfg, dec_init_flags, 0);
     bool again;
     for (again = true; again; video->Next()) {
       again = (video->img() != NULL);
@@ -208,10 +199,9 @@ void EncoderTest::RunLoop(VideoSource *video) {
             has_cxdata = true;
             if (decoder && DoDecode()) {
               vpx_codec_err_t res_dec = decoder->DecodeFrame(
-                  (const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
+                  (const uint8_t *)pkt->data.frame.buf, pkt->data.frame.sz);
 
-              if (!HandleDecodeResult(res_dec, *video, decoder))
-                break;
+              if (!HandleDecodeResult(res_dec, *video, decoder)) break;
 
               has_dxdata = true;
             }
@@ -220,20 +210,16 @@ void EncoderTest::RunLoop(VideoSource *video) {
             FramePktHook(pkt);
             break;
 
-          case VPX_CODEC_PSNR_PKT:
-            PSNRPktHook(pkt);
-            break;
+          case VPX_CODEC_PSNR_PKT: PSNRPktHook(pkt); break;
 
-          default:
-            break;
+          default: break;
         }
       }
 
       // Flush the decoder when there are no more fragments.
       if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
         const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
-        if (!HandleDecodeResult(res_dec, *video, decoder))
-          break;
+        if (!HandleDecodeResult(res_dec, *video, decoder)) break;
       }
 
       if (has_dxdata && has_cxdata) {
@@ -246,21 +232,17 @@ void EncoderTest::RunLoop(VideoSource *video) {
             MismatchHook(img_enc, img_dec);
           }
         }
-        if (img_dec)
-          DecompressedFrameHook(*img_dec, video->pts());
+        if (img_dec) DecompressedFrameHook(*img_dec, video->pts());
       }
-      if (!Continue())
-        break;
+      if (!Continue()) break;
     }
 
     EndPassHook();
 
-    if (decoder)
-      delete decoder;
+    if (decoder) delete decoder;
     delete encoder;
 
-    if (!Continue())
-      break;
+    if (!Continue()) break;
   }
 }
 
diff --git a/test/encode_test_driver.h b/test/encode_test_driver.h
index e444a92919b26bf9c77e3682a300db7318245e8d..9f27e0daf0be4a1c99141ce1ed61c1994a1476fa 100644
--- a/test/encode_test_driver.h
+++ b/test/encode_test_driver.h
@@ -33,19 +33,17 @@ enum TestMode {
   kTwoPassGood,
   kTwoPassBest
 };
-#define ALL_TEST_MODES ::testing::Values(::libvpx_test::kRealTime, \
-                                         ::libvpx_test::kOnePassGood, \
-                                         ::libvpx_test::kOnePassBest, \
-                                         ::libvpx_test::kTwoPassGood, \
-                                         ::libvpx_test::kTwoPassBest)
+#define ALL_TEST_MODES                                                        \
+  ::testing::Values(::libvpx_test::kRealTime, ::libvpx_test::kOnePassGood,    \
+                    ::libvpx_test::kOnePassBest, ::libvpx_test::kTwoPassGood, \
+                    ::libvpx_test::kTwoPassBest)
 
-#define ONE_PASS_TEST_MODES ::testing::Values(::libvpx_test::kRealTime, \
-                                              ::libvpx_test::kOnePassGood, \
-                                              ::libvpx_test::kOnePassBest)
-
-#define TWO_PASS_TEST_MODES ::testing::Values(::libvpx_test::kTwoPassGood, \
-                                              ::libvpx_test::kTwoPassBest)
+#define ONE_PASS_TEST_MODES                                                \
+  ::testing::Values(::libvpx_test::kRealTime, ::libvpx_test::kOnePassGood, \
+                    ::libvpx_test::kOnePassBest)
 
+#define TWO_PASS_TEST_MODES \
+  ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kTwoPassBest)
 
 // Provides an object to handle the libvpx get_cx_data() iteration pattern
 class CxDataIterator {
@@ -58,8 +56,8 @@ class CxDataIterator {
   }
 
  private:
-  vpx_codec_ctx_t  *encoder_;
-  vpx_codec_iter_t  iter_;
+  vpx_codec_ctx_t *encoder_;
+  vpx_codec_iter_t iter_;
 };
 
 // Implements an in-memory store for libvpx twopass statistics
@@ -75,15 +73,12 @@ class TwopassStatsStore {
     return buf;
   }
 
-  void Reset() {
-    buffer_.clear();
-  }
+  void Reset() { buffer_.clear(); }
 
  protected:
-  std::string  buffer_;
+  std::string buffer_;
 };
 
-
 // Provides a simplified interface to manage one video encoding pass, given
 // a configuration and video source.
 //
@@ -97,13 +92,9 @@ class Encoder {
     memset(&encoder_, 0, sizeof(encoder_));
   }
 
-  virtual ~Encoder() {
-    vpx_codec_destroy(&encoder_);
-  }
+  virtual ~Encoder() { vpx_codec_destroy(&encoder_); }
 
-  CxDataIterator GetCxData() {
-    return CxDataIterator(&encoder_);
-  }
+  CxDataIterator GetCxData() { return CxDataIterator(&encoder_); }
 
   void InitEncoder(VideoSource *video);
 
@@ -115,9 +106,7 @@ class Encoder {
   void EncodeFrame(VideoSource *video, const unsigned long frame_flags);
 
   // Convenience wrapper for EncodeFrame()
-  void EncodeFrame(VideoSource *video) {
-    EncodeFrame(video, 0);
-  }
+  void EncodeFrame(VideoSource *video) { EncodeFrame(video, 0); }
 
   void Control(int ctrl_id, int arg) {
     const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);
@@ -156,12 +145,10 @@ class Encoder {
     cfg_ = *cfg;
   }
 
-  void set_deadline(unsigned long deadline) {
-    deadline_ = deadline;
-  }
+  void set_deadline(unsigned long deadline) { deadline_ = deadline; }
 
  protected:
-  virtual vpx_codec_iface_t* CodecInterface() const = 0;
+  virtual vpx_codec_iface_t *CodecInterface() const = 0;
 
   const char *EncoderError() {
     const char *detail = vpx_codec_error_detail(&encoder_);
@@ -175,11 +162,11 @@ class Encoder {
   // Flush the encoder on EOS
   void Flush();
 
-  vpx_codec_ctx_t      encoder_;
-  vpx_codec_enc_cfg_t  cfg_;
-  unsigned long        deadline_;
-  unsigned long        init_flags_;
-  TwopassStatsStore   *stats_;
+  vpx_codec_ctx_t encoder_;
+  vpx_codec_enc_cfg_t cfg_;
+  unsigned long deadline_;
+  unsigned long init_flags_;
+  TwopassStatsStore *stats_;
 };
 
 // Common test functionality for all Encoder tests.
@@ -221,36 +208,35 @@ class EncoderTest {
   virtual void EndPassHook() {}
 
   // Hook to be called before encoding a frame.
-  virtual void PreEncodeFrameHook(VideoSource* /*video*/) {}
-  virtual void PreEncodeFrameHook(VideoSource* /*video*/,
-                                  Encoder* /*encoder*/) {}
+  virtual void PreEncodeFrameHook(VideoSource * /*video*/) {}
+  virtual void PreEncodeFrameHook(VideoSource * /*video*/,
+                                  Encoder * /*encoder*/) {}
 
   // Hook to be called on every compressed data packet.
-  virtual void FramePktHook(const vpx_codec_cx_pkt_t* /*pkt*/) {}
+  virtual void FramePktHook(const vpx_codec_cx_pkt_t * /*pkt*/) {}
 
   // Hook to be called on every PSNR packet.
-  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t* /*pkt*/) {}
+  virtual void PSNRPktHook(const vpx_codec_cx_pkt_t * /*pkt*/) {}
 
   // Hook to determine whether the encode loop should continue.
   virtual bool Continue() const {
     return !(::testing::Test::HasFatalFailure() || abort_);
   }
 
-  const CodecFactory   *codec_;
+  const CodecFactory *codec_;
   // Hook to determine whether to decode frame after encoding
   virtual bool DoDecode() const { return 1; }
 
   // Hook to handle encode/decode mismatch
-  virtual void MismatchHook(const vpx_image_t *img1,
-                            const vpx_image_t *img2);
+  virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2);
 
   // Hook to be called on every decompressed frame.
-  virtual void DecompressedFrameHook(const vpx_image_t& /*img*/,
+  virtual void DecompressedFrameHook(const vpx_image_t & /*img*/,
                                      vpx_codec_pts_t /*pts*/) {}
 
   // Hook to be called to handle decode result. Return true to continue.
   virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
-                                  const VideoSource& /*video*/,
+                                  const VideoSource & /*video*/,
                                   Decoder *decoder) {
     EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
     return VPX_CODEC_OK == res_dec;
@@ -262,15 +248,15 @@ class EncoderTest {
     return pkt;
   }
 
-  bool                 abort_;
-  vpx_codec_enc_cfg_t  cfg_;
-  vpx_codec_dec_cfg_t  dec_cfg_;
-  unsigned int         passes_;
-  unsigned long        deadline_;
-  TwopassStatsStore    stats_;
-  unsigned long        init_flags_;
-  unsigned long        frame_flags_;
-  vpx_codec_pts_t      last_pts_;
+  bool abort_;
+  vpx_codec_enc_cfg_t cfg_;
+  vpx_codec_dec_cfg_t dec_cfg_;
+  unsigned int passes_;
+  unsigned long deadline_;
+  TwopassStatsStore stats_;
+  unsigned long init_flags_;
+  unsigned long frame_flags_;
+  vpx_codec_pts_t last_pts_;
 };
 
 }  // namespace libvpx_test
diff --git a/test/encoder_parms_get_to_decoder.cc b/test/encoder_parms_get_to_decoder.cc
index 98fb5786e351e486411177aaae914834afeb5a8f..3dd5f86b1dcfdc2c440370e901932c888b8df993 100644
--- a/test/encoder_parms_get_to_decoder.cc
+++ b/test/encoder_parms_get_to_decoder.cc
@@ -29,7 +29,7 @@ struct EncodePerfTestVideo {
 };
 
 const EncodePerfTestVideo kVP9EncodePerfTestVectors[] = {
-  {"niklas_1280_720_30.y4m", 1280, 720, 600, 10},
+  { "niklas_1280_720_30.y4m", 1280, 720, 600, 10 },
 };
 
 struct EncodeParameters {
@@ -45,10 +45,10 @@ struct EncodeParameters {
 };
 
 const EncodeParameters kVP9EncodeParameterSet[] = {
-  {0, 0, 0, 1, 0, VPX_CR_STUDIO_RANGE, VPX_CS_BT_601},
-  {0, 0, 0, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_709},
-  {0, 0, 1, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_2020},
-  {0, 2, 0, 0, 1, VPX_CR_STUDIO_RANGE, VPX_CS_UNKNOWN, { 640, 480 }},
+  { 0, 0, 0, 1, 0, VPX_CR_STUDIO_RANGE, VPX_CS_BT_601 },
+  { 0, 0, 0, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_709 },
+  { 0, 0, 1, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_2020 },
+  { 0, 2, 0, 0, 1, VPX_CR_STUDIO_RANGE, VPX_CS_UNKNOWN, { 640, 480 } },
   // TODO(JBB): Test profiles (requires more work).
 };
 
@@ -144,6 +144,6 @@ TEST_P(VpxEncoderParmsGetToDecoder, BitstreamParms) {
 }
 
 VP10_INSTANTIATE_TEST_CASE(VpxEncoderParmsGetToDecoder,
-                          ::testing::ValuesIn(kVP9EncodeParameterSet),
-                          ::testing::ValuesIn(kVP9EncodePerfTestVectors));
+                           ::testing::ValuesIn(kVP9EncodeParameterSet),
+                           ::testing::ValuesIn(kVP9EncodePerfTestVectors));
 }  // namespace
diff --git a/test/end_to_end_test.cc b/test/end_to_end_test.cc
index 747d2e0a1de63f08e6e1ede3677318632bd474cb..96621dbc27888319b54ba81e36e76de0158bb7f5 100644
--- a/test/end_to_end_test.cc
+++ b/test/end_to_end_test.cc
@@ -18,21 +18,17 @@
 
 namespace {
 
-const unsigned int kWidth  = 160;
+const unsigned int kWidth = 160;
 const unsigned int kHeight = 90;
 const unsigned int kFramerate = 50;
 const unsigned int kFrames = 10;
 const int kBitrate = 500;
 // List of psnr thresholds for speed settings 0-7 and 5 encoding modes
 const double kPsnrThreshold[][5] = {
-  { 36.0, 37.0, 37.0, 37.0, 37.0 },
-  { 35.0, 36.0, 36.0, 36.0, 36.0 },
-  { 34.0, 35.0, 35.0, 35.0, 35.0 },
-  { 33.0, 34.0, 34.0, 34.0, 34.0 },
-  { 32.0, 33.0, 33.0, 33.0, 33.0 },
-  { 31.0, 32.0, 32.0, 32.0, 32.0 },
-  { 30.0, 31.0, 31.0, 31.0, 31.0 },
-  { 29.0, 30.0, 30.0, 30.0, 30.0 },
+  { 36.0, 37.0, 37.0, 37.0, 37.0 }, { 35.0, 36.0, 36.0, 36.0, 36.0 },
+  { 34.0, 35.0, 35.0, 35.0, 35.0 }, { 33.0, 34.0, 34.0, 34.0, 34.0 },
+  { 32.0, 33.0, 33.0, 33.0, 33.0 }, { 31.0, 32.0, 32.0, 32.0, 32.0 },
+  { 30.0, 31.0, 31.0, 31.0, 31.0 }, { 29.0, 30.0, 30.0, 30.0, 30.0 },
 };
 
 typedef struct {
@@ -44,31 +40,30 @@ typedef struct {
 } TestVideoParam;
 
 const TestVideoParam kTestVectors[] = {
-  {"park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0},
-  {"park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, VPX_BITS_8, 1},
-  {"park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1},
-  {"park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1},
+  { "park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0 },
+  { "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, VPX_BITS_8, 1 },
+  { "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
+  { "park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1 },
 #if CONFIG_VPX_HIGHBITDEPTH
-  {"park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, VPX_BITS_10, 2},
-  {"park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, VPX_BITS_10, 3},
-  {"park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, VPX_BITS_10, 3},
-  {"park_joy_90p_10_440.yuv", 10, VPX_IMG_FMT_I44016, VPX_BITS_10, 3},
-  {"park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016, VPX_BITS_12, 2},
-  {"park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, VPX_BITS_12, 3},
-  {"park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, VPX_BITS_12, 3},
-  {"park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3},
+  { "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, VPX_BITS_10, 2 },
+  { "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, VPX_BITS_10, 3 },
+  { "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, VPX_BITS_10, 3 },
+  { "park_joy_90p_10_440.yuv", 10, VPX_IMG_FMT_I44016, VPX_BITS_10, 3 },
+  { "park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016, VPX_BITS_12, 2 },
+  { "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, VPX_BITS_12, 3 },
+  { "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, VPX_BITS_12, 3 },
+  { "park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3 },
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 };
 
 // Encoding modes tested
 const libvpx_test::TestMode kEncodingModeVectors[] = {
-  ::libvpx_test::kTwoPassGood,
-  ::libvpx_test::kOnePassGood,
+  ::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood,
   ::libvpx_test::kRealTime,
 };
 
 // Speed settings tested
-const int kCpuUsedVectors[] = {1, 2, 3, 5, 6};
+const int kCpuUsedVectors[] = { 1, 2, 3, 5, 6 };
 
 int is_extension_y4m(const char *filename) {
   const char *dot = strrchr(filename, '.');
@@ -80,17 +75,13 @@ int is_extension_y4m(const char *filename) {
 
 class EndToEndTestLarge
     : public ::libvpx_test::EncoderTest,
-      public ::libvpx_test::CodecTestWith3Params<libvpx_test::TestMode, \
+      public ::libvpx_test::CodecTestWith3Params<libvpx_test::TestMode,
                                                  TestVideoParam, int> {
  protected:
   EndToEndTestLarge()
-      : EncoderTest(GET_PARAM(0)),
-        test_video_param_(GET_PARAM(2)),
-        cpu_used_(GET_PARAM(3)),
-        psnr_(0.0),
-        nframes_(0),
-        encoding_mode_(GET_PARAM(1)) {
-  }
+      : EncoderTest(GET_PARAM(0)), test_video_param_(GET_PARAM(2)),
+        cpu_used_(GET_PARAM(3)), psnr_(0.0), nframes_(0),
+        encoding_mode_(GET_PARAM(1)) {}
 
   virtual ~EndToEndTestLarge() {}
 
@@ -136,8 +127,7 @@ class EndToEndTestLarge
   }
 
   double GetAveragePsnr() const {
-    if (nframes_)
-      return psnr_ / nframes_;
+    if (nframes_) return psnr_ / nframes_;
     return 0.0;
   }
 
@@ -161,28 +151,26 @@ TEST_P(EndToEndTestLarge, EndtoEndPSNRTest) {
   cfg_.g_input_bit_depth = test_video_param_.input_bit_depth;
   cfg_.g_bit_depth = test_video_param_.bit_depth;
   init_flags_ = VPX_CODEC_USE_PSNR;
-  if (cfg_.g_bit_depth > 8)
-    init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
+  if (cfg_.g_bit_depth > 8) init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
 
   libvpx_test::VideoSource *video;
   if (is_extension_y4m(test_video_param_.filename)) {
-    video = new libvpx_test::Y4mVideoSource(test_video_param_.filename,
-                                            0, kFrames);
+    video =
+        new libvpx_test::Y4mVideoSource(test_video_param_.filename, 0, kFrames);
   } else {
     video = new libvpx_test::YUVVideoSource(test_video_param_.filename,
-                                            test_video_param_.fmt,
-                                            kWidth, kHeight,
-                                            kFramerate, 1, 0, kFrames);
+                                            test_video_param_.fmt, kWidth,
+                                            kHeight, kFramerate, 1, 0, kFrames);
   }
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(video));
   const double psnr = GetAveragePsnr();
   EXPECT_GT(psnr, GetPsnrThreshold());
-  delete(video);
+  delete (video);
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-# if CONFIG_VP10_ENCODER
+#if CONFIG_VP10_ENCODER
 // TODO(angiebird): many fail in high bitdepth mode.
 INSTANTIATE_TEST_CASE_P(
     DISABLED_VP10, EndToEndTestLarge,
@@ -192,12 +180,11 @@ INSTANTIATE_TEST_CASE_P(
         ::testing::ValuesIn(kEncodingModeVectors),
         ::testing::ValuesIn(kTestVectors),
         ::testing::ValuesIn(kCpuUsedVectors)));
-# endif  // CONFIG_VP10_ENCODER
+#endif  // CONFIG_VP10_ENCODER
 #else
-VP10_INSTANTIATE_TEST_CASE(
-    EndToEndTestLarge,
-    ::testing::ValuesIn(kEncodingModeVectors),
-    ::testing::ValuesIn(kTestVectors),
-    ::testing::ValuesIn(kCpuUsedVectors));
+VP10_INSTANTIATE_TEST_CASE(EndToEndTestLarge,
+                           ::testing::ValuesIn(kEncodingModeVectors),
+                           ::testing::ValuesIn(kTestVectors),
+                           ::testing::ValuesIn(kCpuUsedVectors));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 }  // namespace
diff --git a/test/error_block_test.cc b/test/error_block_test.cc
index 8a9359124d1be805eeb1354cbdfa972f28d0f0d6..72c47147b721088dd6cc541c22d973b169352aff 100644
--- a/test/error_block_test.cc
+++ b/test/error_block_test.cc
@@ -32,20 +32,18 @@ const int kNumIterations = 1000;
 
 typedef int64_t (*ErrorBlockFunc)(const tran_low_t *coeff,
                                   const tran_low_t *dqcoeff,
-                                  intptr_t block_size,
-                                  int64_t *ssz, int bps);
+                                  intptr_t block_size, int64_t *ssz, int bps);
 
 typedef std::tr1::tuple<ErrorBlockFunc, ErrorBlockFunc, vpx_bit_depth_t>
-                        ErrorBlockParam;
+    ErrorBlockParam;
 
-class ErrorBlockTest
-  : public ::testing::TestWithParam<ErrorBlockParam> {
+class ErrorBlockTest : public ::testing::TestWithParam<ErrorBlockParam> {
  public:
   virtual ~ErrorBlockTest() {}
   virtual void SetUp() {
-    error_block_op_     = GET_PARAM(0);
+    error_block_op_ = GET_PARAM(0);
     ref_error_block_op_ = GET_PARAM(1);
-    bit_depth_  = GET_PARAM(2);
+    bit_depth_ = GET_PARAM(2);
   }
 
   virtual void TearDown() { libvpx_test::ClearSystemState(); }
@@ -76,18 +74,18 @@ TEST_P(ErrorBlockTest, OperationCheck) {
       // can be used for optimization, so generate test input precisely.
       if (rnd(2)) {
         // Positive number
-        coeff[j]   = rnd(1 << msb);
+        coeff[j] = rnd(1 << msb);
         dqcoeff[j] = rnd(1 << msb);
       } else {
         // Negative number
-        coeff[j]   = -rnd(1 << msb);
+        coeff[j] = -rnd(1 << msb);
         dqcoeff[j] = -rnd(1 << msb);
       }
     }
-    ref_ret = ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz,
-                                  bit_depth_);
-    ASM_REGISTER_STATE_CHECK(ret = error_block_op_(coeff, dqcoeff, block_size,
-                                                   &ssz, bit_depth_));
+    ref_ret =
+        ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz, bit_depth_);
+    ASM_REGISTER_STATE_CHECK(
+        ret = error_block_op_(coeff, dqcoeff, block_size, &ssz, bit_depth_));
     err_count += (ref_ret != ret) | (ref_ssz != ssz);
     if (err_count && !err_count_total) {
       first_failure = i;
@@ -117,35 +115,35 @@ TEST_P(ErrorBlockTest, ExtremeValues) {
     int k = (i / 9) % 9;
 
     // Change the maximum coeff value, to test different bit boundaries
-    if ( k == 8 && (i % 9) == 0 ) {
+    if (k == 8 && (i % 9) == 0) {
       max_val >>= 1;
     }
     block_size = 16 << (i % 9);  // All block sizes from 4x4, 8x4 ..64x64
     for (int j = 0; j < block_size; j++) {
       if (k < 4) {
         // Test at positive maximum values
-        coeff[j]   = k % 2 ? max_val : 0;
+        coeff[j] = k % 2 ? max_val : 0;
         dqcoeff[j] = (k >> 1) % 2 ? max_val : 0;
       } else if (k < 8) {
         // Test at negative maximum values
-        coeff[j]   = k % 2 ? -max_val : 0;
+        coeff[j] = k % 2 ? -max_val : 0;
         dqcoeff[j] = (k >> 1) % 2 ? -max_val : 0;
       } else {
         if (rnd(2)) {
           // Positive number
-          coeff[j]   = rnd(1 << 14);
+          coeff[j] = rnd(1 << 14);
           dqcoeff[j] = rnd(1 << 14);
         } else {
           // Negative number
-          coeff[j]   = -rnd(1 << 14);
+          coeff[j] = -rnd(1 << 14);
           dqcoeff[j] = -rnd(1 << 14);
         }
       }
     }
-    ref_ret = ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz,
-                                  bit_depth_);
-    ASM_REGISTER_STATE_CHECK(ret = error_block_op_(coeff, dqcoeff, block_size,
-                                                   &ssz, bit_depth_));
+    ref_ret =
+        ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz, bit_depth_);
+    ASM_REGISTER_STATE_CHECK(
+        ret = error_block_op_(coeff, dqcoeff, block_size, &ssz, bit_depth_));
     err_count += (ref_ret != ret) | (ref_ssz != ssz);
     if (err_count && !err_count_total) {
       first_failure = i;
@@ -161,49 +159,48 @@ using std::tr1::make_tuple;
 
 #if CONFIG_USE_X86INC
 int64_t wrap_vp10_highbd_block_error_8bit_c(const tran_low_t *coeff,
-                                           const tran_low_t *dqcoeff,
-                                           intptr_t block_size,
-                                           int64_t *ssz, int bps) {
+                                            const tran_low_t *dqcoeff,
+                                            intptr_t block_size, int64_t *ssz,
+                                            int bps) {
   assert(bps == 8);
   return vp10_highbd_block_error_8bit_c(coeff, dqcoeff, block_size, ssz);
 }
 
 #if HAVE_SSE2
 int64_t wrap_vp10_highbd_block_error_8bit_sse2(const tran_low_t *coeff,
-                                              const tran_low_t *dqcoeff,
-                                              intptr_t block_size,
-                                              int64_t *ssz, int bps) {
+                                               const tran_low_t *dqcoeff,
+                                               intptr_t block_size,
+                                               int64_t *ssz, int bps) {
   assert(bps == 8);
   return vp10_highbd_block_error_8bit_sse2(coeff, dqcoeff, block_size, ssz);
 }
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, ErrorBlockTest,
-    ::testing::Values(
-        make_tuple(&vp10_highbd_block_error_sse2,
-                   &vp10_highbd_block_error_c, VPX_BITS_10),
-        make_tuple(&vp10_highbd_block_error_sse2,
-                   &vp10_highbd_block_error_c, VPX_BITS_12),
-        make_tuple(&vp10_highbd_block_error_sse2,
-                   &vp10_highbd_block_error_c, VPX_BITS_8),
-        make_tuple(&wrap_vp10_highbd_block_error_8bit_sse2,
-                   &wrap_vp10_highbd_block_error_8bit_c, VPX_BITS_8)));
+    ::testing::Values(make_tuple(&vp10_highbd_block_error_sse2,
+                                 &vp10_highbd_block_error_c, VPX_BITS_10),
+                      make_tuple(&vp10_highbd_block_error_sse2,
+                                 &vp10_highbd_block_error_c, VPX_BITS_12),
+                      make_tuple(&vp10_highbd_block_error_sse2,
+                                 &vp10_highbd_block_error_c, VPX_BITS_8),
+                      make_tuple(&wrap_vp10_highbd_block_error_8bit_sse2,
+                                 &wrap_vp10_highbd_block_error_8bit_c,
+                                 VPX_BITS_8)));
 #endif  // HAVE_SSE2
 
 #if HAVE_AVX
 int64_t wrap_vp10_highbd_block_error_8bit_avx(const tran_low_t *coeff,
                                               const tran_low_t *dqcoeff,
-                                              intptr_t block_size,
-                                              int64_t *ssz, int bps) {
+                                              intptr_t block_size, int64_t *ssz,
+                                              int bps) {
   assert(bps == 8);
   return vp10_highbd_block_error_8bit_avx(coeff, dqcoeff, block_size, ssz);
 }
 
-INSTANTIATE_TEST_CASE_P(
-    AVX, ErrorBlockTest,
-    ::testing::Values(
-        make_tuple(&wrap_vp10_highbd_block_error_8bit_avx,
-                   &wrap_vp10_highbd_block_error_8bit_c, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(AVX, ErrorBlockTest,
+                        ::testing::Values(make_tuple(
+                            &wrap_vp10_highbd_block_error_8bit_avx,
+                            &wrap_vp10_highbd_block_error_8bit_c, VPX_BITS_8)));
 #endif  // HAVE_AVX
 
 #endif  // CONFIG_USE_X86INC
diff --git a/test/error_resilience_test.cc b/test/error_resilience_test.cc
index b1a60fda424928b6d0d7a0d8d193fb56691a1d00..5e578e844adca11b88194082c7edc67d08650063 100644
--- a/test/error_resilience_test.cc
+++ b/test/error_resilience_test.cc
@@ -19,16 +19,13 @@ namespace {
 const int kMaxErrorFrames = 12;
 const int kMaxDroppableFrames = 12;
 
-class ErrorResilienceTestLarge : public ::libvpx_test::EncoderTest,
-    public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, bool> {
+class ErrorResilienceTestLarge
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, bool> {
  protected:
   ErrorResilienceTestLarge()
-      : EncoderTest(GET_PARAM(0)),
-        svc_support_(GET_PARAM(2)),
-        psnr_(0.0),
-        nframes_(0),
-        mismatch_psnr_(0.0),
-        mismatch_nframes_(0),
+      : EncoderTest(GET_PARAM(0)), svc_support_(GET_PARAM(2)), psnr_(0.0),
+        nframes_(0), mismatch_psnr_(0.0), mismatch_nframes_(0),
         encoding_mode_(GET_PARAM(1)) {
     Reset();
   }
@@ -66,81 +63,70 @@ class ErrorResilienceTestLarge : public ::libvpx_test::EncoderTest,
   // LAST is updated on base/layer 0, GOLDEN  updated on layer 1.
   // Non-zero pattern_switch parameter means pattern will switch to
   // not using LAST for frame_num >= pattern_switch.
-  int SetFrameFlags(int frame_num,
-                    int num_temp_layers,
-                    int pattern_switch) {
+  int SetFrameFlags(int frame_num, int num_temp_layers, int pattern_switch) {
     int frame_flags = 0;
     if (num_temp_layers == 2) {
-        if (frame_num % 2 == 0) {
-          if (frame_num < pattern_switch || pattern_switch == 0) {
-            // Layer 0: predict from LAST and ARF, update LAST.
-            frame_flags = VP8_EFLAG_NO_REF_GF |
-                          VP8_EFLAG_NO_UPD_GF |
-                          VP8_EFLAG_NO_UPD_ARF;
-          } else {
-            // Layer 0: predict from GF and ARF, update GF.
-            frame_flags = VP8_EFLAG_NO_REF_LAST |
-                          VP8_EFLAG_NO_UPD_LAST |
-                          VP8_EFLAG_NO_UPD_ARF;
-          }
+      if (frame_num % 2 == 0) {
+        if (frame_num < pattern_switch || pattern_switch == 0) {
+          // Layer 0: predict from LAST and ARF, update LAST.
+          frame_flags =
+              VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
         } else {
-          if (frame_num < pattern_switch || pattern_switch == 0) {
-            // Layer 1: predict from L, GF, and ARF, update GF.
-            frame_flags = VP8_EFLAG_NO_UPD_ARF |
-                          VP8_EFLAG_NO_UPD_LAST;
-          } else {
-            // Layer 1: predict from GF and ARF, update GF.
-            frame_flags = VP8_EFLAG_NO_REF_LAST |
-                          VP8_EFLAG_NO_UPD_LAST |
-                          VP8_EFLAG_NO_UPD_ARF;
-          }
+          // Layer 0: predict from GF and ARF, update GF.
+          frame_flags = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_UPD_LAST |
+                        VP8_EFLAG_NO_UPD_ARF;
+        }
+      } else {
+        if (frame_num < pattern_switch || pattern_switch == 0) {
+          // Layer 1: predict from L, GF, and ARF, update GF.
+          frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
+        } else {
+          // Layer 1: predict from GF and ARF, update GF.
+          frame_flags = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_UPD_LAST |
+                        VP8_EFLAG_NO_UPD_ARF;
         }
+      }
     }
     return frame_flags;
   }
 
   virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
                                   ::libvpx_test::Encoder *encoder) {
-    frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST |
-                      VP8_EFLAG_NO_UPD_GF |
-                      VP8_EFLAG_NO_UPD_ARF);
+    frame_flags_ &=
+        ~(VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF);
     // For temporal layer case.
     if (cfg_.ts_number_layers > 1) {
-      frame_flags_ = SetFrameFlags(video->frame(),
-                                   cfg_.ts_number_layers,
-                                   pattern_switch_);
+      frame_flags_ =
+          SetFrameFlags(video->frame(), cfg_.ts_number_layers, pattern_switch_);
       for (unsigned int i = 0; i < droppable_nframes_; ++i) {
         if (droppable_frames_[i] == video->frame()) {
-          std::cout << "Encoding droppable frame: "
-                    << droppable_frames_[i] << "\n";
+          std::cout << "Encoding droppable frame: " << droppable_frames_[i]
+                    << "\n";
         }
       }
     } else {
-       if (droppable_nframes_ > 0 &&
-         (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
-         for (unsigned int i = 0; i < droppable_nframes_; ++i) {
-           if (droppable_frames_[i] == video->frame()) {
-             std::cout << "Encoding droppable frame: "
-                       << droppable_frames_[i] << "\n";
-             frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
-                              VP8_EFLAG_NO_UPD_GF |
-                              VP8_EFLAG_NO_UPD_ARF);
-             return;
-           }
-         }
-       }
+      if (droppable_nframes_ > 0 &&
+          (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+        for (unsigned int i = 0; i < droppable_nframes_; ++i) {
+          if (droppable_frames_[i] == video->frame()) {
+            std::cout << "Encoding droppable frame: " << droppable_frames_[i]
+                      << "\n";
+            frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
+                             VP8_EFLAG_NO_UPD_ARF);
+            return;
+          }
+        }
+      }
     }
   }
 
   double GetAveragePsnr() const {
-    if (nframes_)
-      return psnr_ / nframes_;
+    if (nframes_) return psnr_ / nframes_;
     return 0.0;
   }
 
   double GetAverageMismatchPsnr() const {
-    if (mismatch_nframes_)
-      return mismatch_psnr_ / mismatch_nframes_;
+    if (mismatch_nframes_) return mismatch_psnr_ / mismatch_nframes_;
     return 0.0;
   }
 
@@ -158,8 +144,7 @@ class ErrorResilienceTestLarge : public ::libvpx_test::EncoderTest,
     return 1;
   }
 
-  virtual void MismatchHook(const vpx_image_t *img1,
-                            const vpx_image_t *img2) {
+  virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2) {
     double mismatch_psnr = compute_psnr(img1, img2);
     mismatch_psnr_ += mismatch_psnr;
     ++mismatch_nframes_;
@@ -186,13 +171,9 @@ class ErrorResilienceTestLarge : public ::libvpx_test::EncoderTest,
       droppable_frames_[i] = list[i];
   }
 
-  unsigned int GetMismatchFrames() {
-    return mismatch_nframes_;
-  }
+  unsigned int GetMismatchFrames() { return mismatch_nframes_; }
 
-  void SetPatternSwitch(int frame_switch) {
-     pattern_switch_ = frame_switch;
-   }
+  void SetPatternSwitch(int frame_switch) { pattern_switch_ = frame_switch; }
 
   bool svc_support_;
 
@@ -265,15 +246,14 @@ TEST_P(ErrorResilienceTestLarge, DropFramesWithoutRecovery) {
   // In addition to isolated loss/drop, add a long consecutive series
   // (of size 9) of dropped frames.
   unsigned int num_droppable_frames = 11;
-  unsigned int droppable_frame_list[] = {5, 16, 22, 23, 24, 25, 26, 27, 28,
-                                         29, 30};
+  unsigned int droppable_frame_list[] = { 5,  16, 22, 23, 24, 25,
+                                          26, 27, 28, 29, 30 };
   SetDroppableFrames(num_droppable_frames, droppable_frame_list);
   SetErrorFrames(num_droppable_frames, droppable_frame_list);
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
   // Test that no mismatches have been found
-  std::cout << "             Mismatch frames: "
-            << GetMismatchFrames() << "\n";
-  EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+  std::cout << "             Mismatch frames: " << GetMismatchFrames() << "\n";
+  EXPECT_EQ(GetMismatchFrames(), (unsigned int)0);
 
   // Reset previously set of error/droppable frames.
   Reset();
@@ -306,8 +286,7 @@ TEST_P(ErrorResilienceTestLarge, DropFramesWithoutRecovery) {
 // layer, so successful decoding is expected.
 TEST_P(ErrorResilienceTestLarge, 2LayersDropEnhancement) {
   // This test doesn't run if SVC is not supported.
-  if (!svc_support_)
-    return;
+  if (!svc_support_) return;
 
   const vpx_rational timebase = { 33333333, 1000000000 };
   cfg_.g_timebase = timebase;
@@ -337,14 +316,13 @@ TEST_P(ErrorResilienceTestLarge, 2LayersDropEnhancement) {
   // The odd frames are the enhancement layer for 2 layer pattern, so set
   // those frames as droppable. Drop the last 7 frames.
   unsigned int num_droppable_frames = 7;
-  unsigned int droppable_frame_list[] = {27, 29, 31, 33, 35, 37, 39};
+  unsigned int droppable_frame_list[] = { 27, 29, 31, 33, 35, 37, 39 };
   SetDroppableFrames(num_droppable_frames, droppable_frame_list);
   SetErrorFrames(num_droppable_frames, droppable_frame_list);
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
   // Test that no mismatches have been found
-  std::cout << "             Mismatch frames: "
-            << GetMismatchFrames() << "\n";
-  EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+  std::cout << "             Mismatch frames: " << GetMismatchFrames() << "\n";
+  EXPECT_EQ(GetMismatchFrames(), (unsigned int)0);
 
   // Reset previously set of error/droppable frames.
   Reset();
@@ -355,8 +333,7 @@ TEST_P(ErrorResilienceTestLarge, 2LayersDropEnhancement) {
 // sequence, the LAST ref is not used anymore.
 TEST_P(ErrorResilienceTestLarge, 2LayersNoRefLast) {
   // This test doesn't run if SVC is not supported.
-  if (!svc_support_)
-    return;
+  if (!svc_support_) return;
 
   const vpx_rational timebase = { 33333333, 1000000000 };
   cfg_.g_timebase = timebase;
@@ -385,20 +362,19 @@ TEST_P(ErrorResilienceTestLarge, 2LayersNoRefLast) {
 
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
   // Test that no mismatches have been found
-  std::cout << "             Mismatch frames: "
-            << GetMismatchFrames() << "\n";
-  EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+  std::cout << "             Mismatch frames: " << GetMismatchFrames() << "\n";
+  EXPECT_EQ(GetMismatchFrames(), (unsigned int)0);
 
   // Reset previously set of error/droppable frames.
   Reset();
 }
 
-class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
-    public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class ErrorResilienceTestLargeCodecControls
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
  protected:
   ErrorResilienceTestLargeCodecControls()
-      : EncoderTest(GET_PARAM(0)),
-        encoding_mode_(GET_PARAM(1)) {
+      : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)) {
     Reset();
   }
 
@@ -437,8 +413,8 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
     if (num_temp_layers == 2) {
       if (frame_num % 2 == 0) {
         // Layer 0: predict from L and ARF, update L.
-        frame_flags = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
-                      VP8_EFLAG_NO_UPD_ARF;
+        frame_flags =
+            VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
       } else {
         // Layer 1: predict from L, G and ARF, and update G.
         frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
@@ -451,9 +427,9 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
                       VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
       } else if ((frame_num - 2) % 4 == 0) {
         // Layer 1: predict from L, G,  update G.
-        frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
-                      VP8_EFLAG_NO_REF_ARF;
-      }  else if ((frame_num - 1) % 2 == 0) {
+        frame_flags =
+            VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_REF_ARF;
+      } else if ((frame_num - 1) % 2 == 0) {
         // Layer 2: predict from L, G, ARF; update ARG.
         frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST;
       }
@@ -467,7 +443,7 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
       if (frame_num % 2 == 0) {
         layer_id = 0;
       } else {
-         layer_id = 1;
+        layer_id = 1;
       }
     } else if (num_temp_layers == 3) {
       if (frame_num % 4 == 0) {
@@ -484,16 +460,16 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
   virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
                                   libvpx_test::Encoder *encoder) {
     if (cfg_.ts_number_layers > 1) {
-        int layer_id = SetLayerId(video->frame(), cfg_.ts_number_layers);
-        int frame_flags = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
-        if (video->frame() > 0) {
-          encoder->Control(VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
-          encoder->Control(VP8E_SET_FRAME_FLAGS, frame_flags);
-        }
-       const vpx_rational_t tb = video->timebase();
-       timebase_ = static_cast<double>(tb.num) / tb.den;
-       duration_ = 0;
-       return;
+      int layer_id = SetLayerId(video->frame(), cfg_.ts_number_layers);
+      int frame_flags = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
+      if (video->frame() > 0) {
+        encoder->Control(VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
+        encoder->Control(VP8E_SET_FRAME_FLAGS, frame_flags);
+      }
+      const vpx_rational_t tb = video->timebase();
+      timebase_ = static_cast<double>(tb.num) / tb.den;
+      duration_ = 0;
+      return;
     }
   }
 
@@ -519,26 +495,28 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
 
   virtual void EndPassHook(void) {
     duration_ = (last_pts_ + 1) * timebase_;
-    if (cfg_.ts_number_layers  > 1) {
+    if (cfg_.ts_number_layers > 1) {
       for (int layer = 0; layer < static_cast<int>(cfg_.ts_number_layers);
-          ++layer) {
+           ++layer) {
         if (bits_total_[layer]) {
           // Effective file datarate:
-          effective_datarate_[layer] = (bits_total_[layer] / 1000.0) / duration_;
+          effective_datarate_[layer] =
+              (bits_total_[layer] / 1000.0) / duration_;
         }
       }
     }
   }
 
   double effective_datarate_[3];
-   private:
-    libvpx_test::TestMode encoding_mode_;
-    vpx_codec_pts_t last_pts_;
-    double timebase_;
-    int64_t bits_total_[3];
-    double duration_;
-    int tot_frame_number_;
-  };
+
+ private:
+  libvpx_test::TestMode encoding_mode_;
+  vpx_codec_pts_t last_pts_;
+  double timebase_;
+  int64_t bits_total_[3];
+  double duration_;
+  int tot_frame_number_;
+};
 
 // Check two codec controls used for:
 // (1) for setting temporal layer id, and (2) for settings encoder flags.
@@ -582,10 +560,12 @@ TEST_P(ErrorResilienceTestLargeCodecControls, CodecControl3TemporalLayers) {
     for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
       ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.75)
           << " The datarate for the file is lower than target by too much, "
-              "for layer: " << j;
+             "for layer: "
+          << j;
       ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.25)
           << " The datarate for the file is greater than target by too much, "
-              "for layer: " << j;
+             "for layer: "
+          << j;
     }
   }
 }
diff --git a/test/ethread_test.cc b/test/ethread_test.cc
index b221ba0d8729be90f4b04432fa39f7c1ddf04e97..b358cd4ec14be07c24632d15ff1926e76368333c 100644
--- a/test/ethread_test.cc
+++ b/test/ethread_test.cc
@@ -23,11 +23,8 @@ class VPxEncoderThreadTest
       public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
  protected:
   VPxEncoderThreadTest()
-      : EncoderTest(GET_PARAM(0)),
-        encoder_initialized_(false),
-        tiles_(2),
-        encoding_mode_(GET_PARAM(1)),
-        set_cpu_used_(GET_PARAM(2)) {
+      : EncoderTest(GET_PARAM(0)), encoder_initialized_(false), tiles_(2),
+        encoding_mode_(GET_PARAM(1)), set_cpu_used_(GET_PARAM(2)) {
     init_flags_ = VPX_CODEC_USE_PSNR;
     vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
     cfg.w = 1280;
@@ -36,9 +33,7 @@ class VPxEncoderThreadTest
 
     md5_.clear();
   }
-  virtual ~VPxEncoderThreadTest() {
-    delete decoder_;
-  }
+  virtual ~VPxEncoderThreadTest() { delete decoder_; }
 
   virtual void SetUp() {
     InitializeConfig();
@@ -83,7 +78,7 @@ class VPxEncoderThreadTest
 
   virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
     const vpx_codec_err_t res = decoder_->DecodeFrame(
-        reinterpret_cast<uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz);
+        reinterpret_cast<uint8_t *>(pkt->data.frame.buf), pkt->data.frame.sz);
     if (res != VPX_CODEC_OK) {
       abort_ = true;
       ASSERT_EQ(VPX_CODEC_OK, res);
@@ -129,8 +124,8 @@ TEST_P(VPxEncoderThreadTest, EncoderResultTest) {
   ASSERT_EQ(single_thr_md5, multi_thr_md5);
 }
 
-VP10_INSTANTIATE_TEST_CASE(
-    VPxEncoderThreadTest,
-    ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood),
-    ::testing::Range(1, 3));
+VP10_INSTANTIATE_TEST_CASE(VPxEncoderThreadTest,
+                           ::testing::Values(::libvpx_test::kTwoPassGood,
+                                             ::libvpx_test::kOnePassGood),
+                           ::testing::Range(1, 3));
 }  // namespace
diff --git a/test/fdct4x4_test.cc b/test/fdct4x4_test.cc
index 8d8f56959e26ffabf16e76515e7013bd4baa8336..261fb29e0a5802acc44e0ff93b50a69e07ac5ff4 100644
--- a/test/fdct4x4_test.cc
+++ b/test/fdct4x4_test.cc
@@ -39,8 +39,7 @@ typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
 typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
 typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
 
-void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
-                 int tx_type) {
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
   vpx_fdct4x4_c(in, out, stride);
 }
 
@@ -48,8 +47,7 @@ void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
   vp10_fht4x4_c(in, out, stride, tx_type);
 }
 
-void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
-                 int tx_type) {
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
   vp10_fwht4x4_c(in, out, stride);
 }
 
@@ -128,14 +126,14 @@ class Trans4x4TestBase {
         }
       }
 
-      ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
-                                          test_temp_block, pitch_));
+      ASM_REGISTER_STATE_CHECK(
+          RunFwdTxfm(test_input_block, test_temp_block, pitch_));
       if (bit_depth_ == VPX_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
-        ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
-                                            CONVERT_TO_BYTEPTR(dst16), pitch_));
+        ASM_REGISTER_STATE_CHECK(
+            RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
 #endif
       }
 
@@ -148,15 +146,13 @@ class Trans4x4TestBase {
         const uint32_t diff = dst[j] - src[j];
 #endif
         const uint32_t error = diff * diff;
-        if (max_error < error)
-          max_error = error;
+        if (max_error < error) max_error = error;
         total_error += error;
       }
     }
 
     EXPECT_GE(static_cast<uint32_t>(limit), max_error)
-        << "Error: 4x4 FHT/IHT has an individual round trip error > "
-        << limit;
+        << "Error: 4x4 FHT/IHT has an individual round trip error > " << limit;
 
     EXPECT_GE(count_test_block * limit, total_error)
         << "Error: 4x4 FHT/IHT has average round trip error > " << limit
@@ -197,16 +193,14 @@ class Trans4x4TestBase {
         input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
       }
       if (i == 0) {
-        for (int j = 0; j < kNumCoeffs; ++j)
-          input_extreme_block[j] = mask_;
+        for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_;
       } else if (i == 1) {
-        for (int j = 0; j < kNumCoeffs; ++j)
-          input_extreme_block[j] = -mask_;
+        for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_;
       }
 
       fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
-      ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
-                                          output_block, pitch_));
+      ASM_REGISTER_STATE_CHECK(
+          RunFwdTxfm(input_extreme_block, output_block, pitch_));
 
       // The minimum quant value is 4.
       for (int j = 0; j < kNumCoeffs; ++j) {
@@ -251,8 +245,8 @@ class Trans4x4TestBase {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
-        ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
-                                            pitch_));
+        ASM_REGISTER_STATE_CHECK(
+            RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
 #endif
       }
 
@@ -265,8 +259,7 @@ class Trans4x4TestBase {
 #endif
         const uint32_t error = diff * diff;
         EXPECT_GE(static_cast<uint32_t>(limit), error)
-            << "Error: 4x4 IDCT has error " << error
-            << " at index " << j;
+            << "Error: 4x4 IDCT has error " << error << " at index " << j;
       }
     }
   }
@@ -278,17 +271,16 @@ class Trans4x4TestBase {
   int mask_;
 };
 
-class Trans4x4DCT
-    : public Trans4x4TestBase,
-      public ::testing::TestWithParam<Dct4x4Param> {
+class Trans4x4DCT : public Trans4x4TestBase,
+                    public ::testing::TestWithParam<Dct4x4Param> {
  public:
   virtual ~Trans4x4DCT() {}
 
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
     inv_txfm_ = GET_PARAM(1);
-    tx_type_  = GET_PARAM(2);
-    pitch_    = 4;
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 4;
     fwd_txfm_ref = fdct4x4_ref;
     bit_depth_ = GET_PARAM(3);
     mask_ = (1 << bit_depth_) - 1;
@@ -307,33 +299,24 @@ class Trans4x4DCT
   IdctFunc inv_txfm_;
 };
 
-TEST_P(Trans4x4DCT, AccuracyCheck) {
-  RunAccuracyCheck(1);
-}
+TEST_P(Trans4x4DCT, AccuracyCheck) { RunAccuracyCheck(1); }
 
-TEST_P(Trans4x4DCT, CoeffCheck) {
-  RunCoeffCheck();
-}
+TEST_P(Trans4x4DCT, CoeffCheck) { RunCoeffCheck(); }
 
-TEST_P(Trans4x4DCT, MemCheck) {
-  RunMemCheck();
-}
+TEST_P(Trans4x4DCT, MemCheck) { RunMemCheck(); }
 
-TEST_P(Trans4x4DCT, InvAccuracyCheck) {
-  RunInvAccuracyCheck(1);
-}
+TEST_P(Trans4x4DCT, InvAccuracyCheck) { RunInvAccuracyCheck(1); }
 
-class Trans4x4HT
-    : public Trans4x4TestBase,
-      public ::testing::TestWithParam<Ht4x4Param> {
+class Trans4x4HT : public Trans4x4TestBase,
+                   public ::testing::TestWithParam<Ht4x4Param> {
  public:
   virtual ~Trans4x4HT() {}
 
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
     inv_txfm_ = GET_PARAM(1);
-    tx_type_  = GET_PARAM(2);
-    pitch_    = 4;
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 4;
     fwd_txfm_ref = fht4x4_ref;
     bit_depth_ = GET_PARAM(3);
     mask_ = (1 << bit_depth_) - 1;
@@ -353,33 +336,24 @@ class Trans4x4HT
   IhtFunc inv_txfm_;
 };
 
-TEST_P(Trans4x4HT, AccuracyCheck) {
-  RunAccuracyCheck(1);
-}
+TEST_P(Trans4x4HT, AccuracyCheck) { RunAccuracyCheck(1); }
 
-TEST_P(Trans4x4HT, CoeffCheck) {
-  RunCoeffCheck();
-}
+TEST_P(Trans4x4HT, CoeffCheck) { RunCoeffCheck(); }
 
-TEST_P(Trans4x4HT, MemCheck) {
-  RunMemCheck();
-}
+TEST_P(Trans4x4HT, MemCheck) { RunMemCheck(); }
 
-TEST_P(Trans4x4HT, InvAccuracyCheck) {
-  RunInvAccuracyCheck(1);
-}
+TEST_P(Trans4x4HT, InvAccuracyCheck) { RunInvAccuracyCheck(1); }
 
-class Trans4x4WHT
-    : public Trans4x4TestBase,
-      public ::testing::TestWithParam<Dct4x4Param> {
+class Trans4x4WHT : public Trans4x4TestBase,
+                    public ::testing::TestWithParam<Dct4x4Param> {
  public:
   virtual ~Trans4x4WHT() {}
 
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
     inv_txfm_ = GET_PARAM(1);
-    tx_type_  = GET_PARAM(2);
-    pitch_    = 4;
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 4;
     fwd_txfm_ref = fwht4x4_ref;
     bit_depth_ = GET_PARAM(3);
     mask_ = (1 << bit_depth_) - 1;
@@ -398,21 +372,13 @@ class Trans4x4WHT
   IdctFunc inv_txfm_;
 };
 
-TEST_P(Trans4x4WHT, AccuracyCheck) {
-  RunAccuracyCheck(0);
-}
+TEST_P(Trans4x4WHT, AccuracyCheck) { RunAccuracyCheck(0); }
 
-TEST_P(Trans4x4WHT, CoeffCheck) {
-  RunCoeffCheck();
-}
+TEST_P(Trans4x4WHT, CoeffCheck) { RunCoeffCheck(); }
 
-TEST_P(Trans4x4WHT, MemCheck) {
-  RunMemCheck();
-}
+TEST_P(Trans4x4WHT, MemCheck) { RunMemCheck(); }
 
-TEST_P(Trans4x4WHT, InvAccuracyCheck) {
-  RunInvAccuracyCheck(0);
-}
+TEST_P(Trans4x4WHT, InvAccuracyCheck) { RunInvAccuracyCheck(0); }
 using std::tr1::make_tuple;
 
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -423,10 +389,10 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
         make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
 #else
-INSTANTIATE_TEST_CASE_P(
-    C, Trans4x4DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(C, Trans4x4DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct4x4_c,
+                                                     &vpx_idct4x4_16_add_c, 0,
+                                                     VPX_BITS_8)));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -463,18 +429,17 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(&vp10_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
         make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
 #else
-INSTANTIATE_TEST_CASE_P(
-    C, Trans4x4WHT,
-    ::testing::Values(
-        make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(C, Trans4x4WHT,
+                        ::testing::Values(make_tuple(&vp10_fwht4x4_c,
+                                                     &vpx_iwht4x4_16_add_c, 0,
+                                                     VPX_BITS_8)));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    NEON, Trans4x4DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct4x4_c,
-                   &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(NEON, Trans4x4DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct4x4_c,
+                                                     &vpx_idct4x4_16_add_neon,
+                                                     0, VPX_BITS_8)));
 #endif  // HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
@@ -489,46 +454,44 @@ INSTANTIATE_TEST_CASE_P(
 
 #if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VPX_HIGHBITDEPTH && \
     !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    MMX, Trans4x4WHT,
-    ::testing::Values(
-        make_tuple(&vp10_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(MMX, Trans4x4WHT,
+                        ::testing::Values(make_tuple(&vp10_fwht4x4_mmx,
+                                                     &vpx_iwht4x4_16_add_c, 0,
+                                                     VPX_BITS_8)));
 #endif
 
 #if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && \
     !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    SSE2, Trans4x4WHT,
-    ::testing::Values(
-        make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4WHT,
+                        ::testing::Values(make_tuple(&vp10_fwht4x4_c,
+                                                     &vpx_iwht4x4_16_add_sse2,
+                                                     0, VPX_BITS_8)));
 #endif
 
 #if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    SSE2, Trans4x4DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct4x4_sse2,
-                   &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct4x4_sse2,
+                                                     &vpx_idct4x4_16_add_sse2,
+                                                     0, VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4HT,
     ::testing::Values(
         make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 0, VPX_BITS_8),
         make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 1, VPX_BITS_8),
         make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_sse2,
-                   &vp10_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 3,
+                   VPX_BITS_8)));
 #endif  // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4DCT,
     ::testing::Values(
-        make_tuple(&vpx_highbd_fdct4x4_c,    &idct4x4_10_sse2, 0, VPX_BITS_10),
+        make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
         make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct4x4_c,    &idct4x4_12_sse2, 0, VPX_BITS_12),
+        make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
         make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
-        make_tuple(&vpx_fdct4x4_sse2,      &vpx_idct4x4_16_add_c, 0,
-                   VPX_BITS_8)));
+        make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4HT,
@@ -540,10 +503,10 @@ INSTANTIATE_TEST_CASE_P(
 #endif  // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    MSA, Trans4x4DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(MSA, Trans4x4DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct4x4_msa,
+                                                     &vpx_idct4x4_16_add_msa, 0,
+                                                     VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans4x4HT,
     ::testing::Values(
diff --git a/test/fdct8x8_test.cc b/test/fdct8x8_test.cc
index 66aa67bd398ac4232ec8b3f6bef9a6ea5147e478..5b832b9114fb5eab0ccb05b6922f3b11c477342d 100644
--- a/test/fdct8x8_test.cc
+++ b/test/fdct8x8_test.cc
@@ -53,8 +53,7 @@ void reference_8x8_dct_1d(const double in[8], double out[8], int stride) {
     out[k] = 0.0;
     for (int n = 0; n < 8; n++)
       out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
-    if (k == 0)
-      out[k] = out[k] * kInvSqrt2;
+    if (k == 0) out[k] = out[k] * kInvSqrt2;
   }
 }
 
@@ -63,25 +62,20 @@ void reference_8x8_dct_2d(const int16_t input[kNumCoeffs],
   // First transform columns
   for (int i = 0; i < 8; ++i) {
     double temp_in[8], temp_out[8];
-    for (int j = 0; j < 8; ++j)
-      temp_in[j] = input[j*8 + i];
+    for (int j = 0; j < 8; ++j) temp_in[j] = input[j * 8 + i];
     reference_8x8_dct_1d(temp_in, temp_out, 1);
-    for (int j = 0; j < 8; ++j)
-      output[j * 8 + i] = temp_out[j];
+    for (int j = 0; j < 8; ++j) output[j * 8 + i] = temp_out[j];
   }
   // Then transform rows
   for (int i = 0; i < 8; ++i) {
     double temp_in[8], temp_out[8];
-    for (int j = 0; j < 8; ++j)
-      temp_in[j] = output[j + i*8];
+    for (int j = 0; j < 8; ++j) temp_in[j] = output[j + i * 8];
     reference_8x8_dct_1d(temp_in, temp_out, 1);
     // Scale by some magic number
-    for (int j = 0; j < 8; ++j)
-      output[j + i * 8] = temp_out[j] * 2;
+    for (int j = 0; j < 8; ++j) output[j + i * 8] = temp_out[j] * 2;
   }
 }
 
-
 void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
   vpx_fdct8x8_c(in, out, stride);
 }
@@ -175,8 +169,7 @@ class FwdTrans8x8TestBase {
           << 1. * max_diff / count_test_block * 100 << "%"
           << " for input range [-255, 255] at index " << j
           << " count0: " << count_sign_block[j][0]
-          << " count1: " << count_sign_block[j][1]
-          << " diff: " << diff;
+          << " count1: " << count_sign_block[j][1] << " diff: " << diff;
     }
 
     memset(count_sign_block, 0, sizeof(count_sign_block));
@@ -184,8 +177,8 @@ class FwdTrans8x8TestBase {
     for (int i = 0; i < count_test_block; ++i) {
       // Initialize a test block with input range [-mask_ / 16, mask_ / 16].
       for (int j = 0; j < 64; ++j)
-        test_input_block[j] = ((rnd.Rand16() & mask_) >> 4) -
-                              ((rnd.Rand16() & mask_) >> 4);
+        test_input_block[j] =
+            ((rnd.Rand16() & mask_) >> 4) - ((rnd.Rand16() & mask_) >> 4);
       ASM_REGISTER_STATE_CHECK(
           RunFwdTxfm(test_input_block, test_output_block, pitch_));
 
@@ -205,8 +198,7 @@ class FwdTrans8x8TestBase {
           << 1. * max_diff / count_test_block * 100 << "%"
           << " for input range [-15, 15] at index " << j
           << " count0: " << count_sign_block[j][0]
-          << " count1: " << count_sign_block[j][1]
-          << " diff: " << diff;
+          << " count1: " << count_sign_block[j][1] << " diff: " << diff;
     }
   }
 
@@ -243,19 +235,18 @@ class FwdTrans8x8TestBase {
       ASM_REGISTER_STATE_CHECK(
           RunFwdTxfm(test_input_block, test_temp_block, pitch_));
       for (int j = 0; j < 64; ++j) {
-          if (test_temp_block[j] > 0) {
-            test_temp_block[j] += 2;
-            test_temp_block[j] /= 4;
-            test_temp_block[j] *= 4;
-          } else {
-            test_temp_block[j] -= 2;
-            test_temp_block[j] /= 4;
-            test_temp_block[j] *= 4;
-          }
+        if (test_temp_block[j] > 0) {
+          test_temp_block[j] += 2;
+          test_temp_block[j] /= 4;
+          test_temp_block[j] *= 4;
+        } else {
+          test_temp_block[j] -= 2;
+          test_temp_block[j] /= 4;
+          test_temp_block[j] *= 4;
+        }
       }
       if (bit_depth_ == VPX_BITS_8) {
-        ASM_REGISTER_STATE_CHECK(
-            RunInvTxfm(test_temp_block, dst, pitch_));
+        ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
@@ -271,19 +262,18 @@ class FwdTrans8x8TestBase {
         const int diff = dst[j] - src[j];
 #endif
         const int error = diff * diff;
-        if (max_error < error)
-          max_error = error;
+        if (max_error < error) max_error = error;
         total_error += error;
       }
     }
 
     EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
-      << "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
-      << " roundtrip error > 1";
+        << "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
+        << " roundtrip error > 1";
 
-    EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
-      << "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
-      << "error > 1/5 per block";
+    EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8)) / 5, total_error)
+        << "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
+        << "error > 1/5 per block";
   }
 
   void RunExtremalCheck() {
@@ -339,8 +329,7 @@ class FwdTrans8x8TestBase {
       ASM_REGISTER_STATE_CHECK(
           fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
       if (bit_depth_ == VPX_BITS_8) {
-        ASM_REGISTER_STATE_CHECK(
-            RunInvTxfm(test_temp_block, dst, pitch_));
+        ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
@@ -356,8 +345,7 @@ class FwdTrans8x8TestBase {
         const int diff = dst[j] - src[j];
 #endif
         const int error = diff * diff;
-        if (max_error < error)
-          max_error = error;
+        if (max_error < error) max_error = error;
         total_error += error;
 
         const int coeff_diff = test_temp_block[j] - ref_temp_block[j];
@@ -368,7 +356,7 @@ class FwdTrans8x8TestBase {
           << "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
           << "an individual roundtrip error > 1";
 
-      EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
+      EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8)) / 5, total_error)
           << "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
           << " roundtrip error > 1/5 per block";
 
@@ -416,8 +404,8 @@ class FwdTrans8x8TestBase {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
-        ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
-                                            pitch_));
+        ASM_REGISTER_STATE_CHECK(
+            RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
 #endif
       }
 
@@ -430,8 +418,7 @@ class FwdTrans8x8TestBase {
 #endif
         const uint32_t error = diff * diff;
         EXPECT_GE(1u << 2 * (bit_depth_ - 8), error)
-            << "Error: 8x8 IDCT has error " << error
-            << " at index " << j;
+            << "Error: 8x8 IDCT has error " << error << " at index " << j;
       }
     }
   }
@@ -459,13 +446,12 @@ class FwdTrans8x8TestBase {
         const uint32_t diff = coeff[j] - coeff_r[j];
         const uint32_t error = diff * diff;
         EXPECT_GE(9u << 2 * (bit_depth_ - 8), error)
-            << "Error: 8x8 DCT has error " << error
-            << " at index " << j;
+            << "Error: 8x8 DCT has error " << error << " at index " << j;
       }
     }
   }
 
-void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+  void CompareInvReference(IdctFunc ref_txfm, int thresh) {
     ACMRandom rnd(ACMRandom::DeterministicSeed());
     const int count_test_block = 10000;
     const int eob = 12;
@@ -482,7 +468,7 @@ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
       for (int j = 0; j < kNumCoeffs; ++j) {
         if (j < eob) {
           // Random values less than the threshold, either positive or negative
-          coeff[scan[j]] = rnd(thresh) * (1-2*(i%2));
+          coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
         } else {
           coeff[scan[j]] = 0;
         }
@@ -502,8 +488,8 @@ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
         ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
-        ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
-                                            pitch_));
+        ASM_REGISTER_STATE_CHECK(
+            RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
 #endif
       }
 
@@ -515,9 +501,8 @@ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
         const uint32_t diff = dst[j] - ref[j];
 #endif
         const uint32_t error = diff * diff;
-        EXPECT_EQ(0u, error)
-            << "Error: 8x8 IDCT has error " << error
-            << " at index " << j;
+        EXPECT_EQ(0u, error) << "Error: 8x8 IDCT has error " << error
+                             << " at index " << j;
       }
     }
   }
@@ -528,17 +513,16 @@ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
   int mask_;
 };
 
-class FwdTrans8x8DCT
-    : public FwdTrans8x8TestBase,
-      public ::testing::TestWithParam<Dct8x8Param> {
+class FwdTrans8x8DCT : public FwdTrans8x8TestBase,
+                       public ::testing::TestWithParam<Dct8x8Param> {
  public:
   virtual ~FwdTrans8x8DCT() {}
 
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
     inv_txfm_ = GET_PARAM(1);
-    tx_type_  = GET_PARAM(2);
-    pitch_    = 8;
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 8;
     fwd_txfm_ref = fdct8x8_ref;
     bit_depth_ = GET_PARAM(3);
     mask_ = (1 << bit_depth_) - 1;
@@ -558,37 +542,26 @@ class FwdTrans8x8DCT
   IdctFunc inv_txfm_;
 };
 
-TEST_P(FwdTrans8x8DCT, SignBiasCheck) {
-  RunSignBiasCheck();
-}
+TEST_P(FwdTrans8x8DCT, SignBiasCheck) { RunSignBiasCheck(); }
 
-TEST_P(FwdTrans8x8DCT, RoundTripErrorCheck) {
-  RunRoundTripErrorCheck();
-}
+TEST_P(FwdTrans8x8DCT, RoundTripErrorCheck) { RunRoundTripErrorCheck(); }
 
-TEST_P(FwdTrans8x8DCT, ExtremalCheck) {
-  RunExtremalCheck();
-}
+TEST_P(FwdTrans8x8DCT, ExtremalCheck) { RunExtremalCheck(); }
 
-TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) {
-  RunFwdAccuracyCheck();
-}
+TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) { RunFwdAccuracyCheck(); }
 
-TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) {
-  RunInvAccuracyCheck();
-}
+TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) { RunInvAccuracyCheck(); }
 
-class FwdTrans8x8HT
-    : public FwdTrans8x8TestBase,
-      public ::testing::TestWithParam<Ht8x8Param> {
+class FwdTrans8x8HT : public FwdTrans8x8TestBase,
+                      public ::testing::TestWithParam<Ht8x8Param> {
  public:
   virtual ~FwdTrans8x8HT() {}
 
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
     inv_txfm_ = GET_PARAM(1);
-    tx_type_  = GET_PARAM(2);
-    pitch_    = 8;
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 8;
     fwd_txfm_ref = fht8x8_ref;
     bit_depth_ = GET_PARAM(3);
     mask_ = (1 << bit_depth_) - 1;
@@ -608,21 +581,14 @@ class FwdTrans8x8HT
   IhtFunc inv_txfm_;
 };
 
-TEST_P(FwdTrans8x8HT, SignBiasCheck) {
-  RunSignBiasCheck();
-}
+TEST_P(FwdTrans8x8HT, SignBiasCheck) { RunSignBiasCheck(); }
 
-TEST_P(FwdTrans8x8HT, RoundTripErrorCheck) {
-  RunRoundTripErrorCheck();
-}
+TEST_P(FwdTrans8x8HT, RoundTripErrorCheck) { RunRoundTripErrorCheck(); }
 
-TEST_P(FwdTrans8x8HT, ExtremalCheck) {
-  RunExtremalCheck();
-}
+TEST_P(FwdTrans8x8HT, ExtremalCheck) { RunExtremalCheck(); }
 
-class InvTrans8x8DCT
-    : public FwdTrans8x8TestBase,
-      public ::testing::TestWithParam<Idct8x8Param> {
+class InvTrans8x8DCT : public FwdTrans8x8TestBase,
+                       public ::testing::TestWithParam<Idct8x8Param> {
  public:
   virtual ~InvTrans8x8DCT() {}
 
@@ -662,10 +628,10 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
         make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12)));
 #else
-INSTANTIATE_TEST_CASE_P(
-    C, FwdTrans8x8DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(C, FwdTrans8x8DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct8x8_c,
+                                                     &vpx_idct8x8_64_add_c, 0,
+                                                     VPX_BITS_8)));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -695,11 +661,10 @@ INSTANTIATE_TEST_CASE_P(
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    NEON, FwdTrans8x8DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct8x8_neon, &vpx_idct8x8_64_add_neon, 0,
-                   VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(NEON, FwdTrans8x8DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct8x8_neon,
+                                                     &vpx_idct8x8_64_add_neon,
+                                                     0, VPX_BITS_8)));
 #endif  // HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
@@ -713,34 +678,33 @@ INSTANTIATE_TEST_CASE_P(
 #endif  // HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    SSE2, FwdTrans8x8DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_sse2, 0,
-                   VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(SSE2, FwdTrans8x8DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct8x8_sse2,
+                                                     &vpx_idct8x8_64_add_sse2,
+                                                     0, VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, FwdTrans8x8HT,
     ::testing::Values(
         make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 0, VPX_BITS_8),
         make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 1, VPX_BITS_8),
         make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2,
-                   &vp10_iht8x8_64_add_sse2, 3, VPX_BITS_8)));
+        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 3,
+                   VPX_BITS_8)));
 #endif  // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, FwdTrans8x8DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
-        make_tuple(&vpx_highbd_fdct8x8_c,
-                   &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct8x8_sse2,
-                   &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
-        make_tuple(&vpx_highbd_fdct8x8_c,
-                   &idct8x8_64_add_12_sse2, 12, VPX_BITS_12),
-        make_tuple(&vpx_highbd_fdct8x8_sse2,
-                   &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
+    ::testing::Values(make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0,
+                                 VPX_BITS_8),
+                      make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_64_add_10_sse2,
+                                 12, VPX_BITS_10),
+                      make_tuple(&vpx_highbd_fdct8x8_sse2,
+                                 &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+                      make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_64_add_12_sse2,
+                                 12, VPX_BITS_12),
+                      make_tuple(&vpx_highbd_fdct8x8_sse2,
+                                 &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, FwdTrans8x8HT,
@@ -755,30 +719,27 @@ INSTANTIATE_TEST_CASE_P(
 INSTANTIATE_TEST_CASE_P(
     SSE2, InvTrans8x8DCT,
     ::testing::Values(
-        make_tuple(&idct8x8_10_add_10_c,
-                   &idct8x8_10_add_10_sse2, 6225, VPX_BITS_10),
-        make_tuple(&idct8x8_10,
-                   &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
-        make_tuple(&idct8x8_10_add_12_c,
-                   &idct8x8_10_add_12_sse2, 6225, VPX_BITS_12),
-        make_tuple(&idct8x8_12,
-                   &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
+        make_tuple(&idct8x8_10_add_10_c, &idct8x8_10_add_10_sse2, 6225,
+                   VPX_BITS_10),
+        make_tuple(&idct8x8_10, &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
+        make_tuple(&idct8x8_10_add_12_c, &idct8x8_10_add_12_sse2, 6225,
+                   VPX_BITS_12),
+        make_tuple(&idct8x8_12, &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
 #endif  // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
     !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    SSSE3, FwdTrans8x8DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct8x8_ssse3, &vpx_idct8x8_64_add_ssse3, 0,
-                   VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(SSSE3, FwdTrans8x8DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct8x8_ssse3,
+                                                     &vpx_idct8x8_64_add_ssse3,
+                                                     0, VPX_BITS_8)));
 #endif
 
 #if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
-    MSA, FwdTrans8x8DCT,
-    ::testing::Values(
-        make_tuple(&vpx_fdct8x8_msa, &vpx_idct8x8_64_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(MSA, FwdTrans8x8DCT,
+                        ::testing::Values(make_tuple(&vpx_fdct8x8_msa,
+                                                     &vpx_idct8x8_64_add_msa, 0,
+                                                     VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     MSA, FwdTrans8x8HT,
     ::testing::Values(
diff --git a/test/frame_size_tests.cc b/test/frame_size_tests.cc
index d630e050be2c836de8555b8932433936a3e46fd8..cc72d8e514c613d44e65e438b21e33b36e22b23d 100644
--- a/test/frame_size_tests.cc
+++ b/test/frame_size_tests.cc
@@ -13,12 +13,11 @@
 
 namespace {
 
-class VP9FrameSizeTestsLarge
-    : public ::libvpx_test::EncoderTest,
-      public ::testing::Test {
+class VP9FrameSizeTestsLarge : public ::libvpx_test::EncoderTest,
+                               public ::testing::Test {
  protected:
-  VP9FrameSizeTestsLarge() : EncoderTest(&::libvpx_test::kVP10),
-                             expected_res_(VPX_CODEC_OK) {}
+  VP9FrameSizeTestsLarge()
+      : EncoderTest(&::libvpx_test::kVP10), expected_res_(VPX_CODEC_OK) {}
   virtual ~VP9FrameSizeTestsLarge() {}
 
   virtual void SetUp() {
@@ -27,7 +26,7 @@ class VP9FrameSizeTestsLarge
   }
 
   virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
-                                  const libvpx_test::VideoSource& /*video*/,
+                                  const libvpx_test::VideoSource & /*video*/,
                                   libvpx_test::Decoder *decoder) {
     EXPECT_EQ(expected_res_, res_dec) << decoder->DecodeError();
     return !::testing::Test::HasFailure();
@@ -67,13 +66,13 @@ TEST_F(VP9FrameSizeTestsLarge, ValidSizes) {
   expected_res_ = VPX_CODEC_OK;
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 #else
-  // This test produces a pretty large single frame allocation,  (roughly
-  // 25 megabits). The encoder allocates a good number of these frames
-  // one for each lag in frames (for 2 pass), and then one for each possible
-  // reference buffer (8) - we can end up with up to 30 buffers of roughly this
-  // size or almost 1 gig of memory.
-  // In total the allocations will exceed 2GiB which may cause a failure with
-  // mingw + wine, use a smaller size in that case.
+// This test produces a pretty large single frame allocation,  (roughly
+// 25 megabits). The encoder allocates a good number of these frames
+// one for each lag in frames (for 2 pass), and then one for each possible
+// reference buffer (8) - we can end up with up to 30 buffers of roughly this
+// size or almost 1 gig of memory.
+// In total the allocations will exceed 2GiB which may cause a failure with
+// mingw + wine, use a smaller size in that case.
 #if defined(_WIN32) && !defined(_WIN64) || defined(__OS2__)
   video.SetSize(4096, 3072);
 #else
diff --git a/test/i420_video_source.h b/test/i420_video_source.h
index 0a184805c2f5d0bcfed0f871fab8ea744f7c3108..49573823b4f5701c246f4d43aeb51b9ee155aad5 100644
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -21,14 +21,11 @@ namespace libvpx_test {
 // so that we can do actual file encodes.
 class I420VideoSource : public YUVVideoSource {
  public:
-  I420VideoSource(const std::string &file_name,
-                  unsigned int width, unsigned int height,
-                  int rate_numerator, int rate_denominator,
+  I420VideoSource(const std::string &file_name, unsigned int width,
+                  unsigned int height, int rate_numerator, int rate_denominator,
                   unsigned int start, int limit)
-      : YUVVideoSource(file_name, VPX_IMG_FMT_I420,
-                       width, height,
-                       rate_numerator, rate_denominator,
-                       start, limit) {}
+      : YUVVideoSource(file_name, VPX_IMG_FMT_I420, width, height,
+                       rate_numerator, rate_denominator, start, limit) {}
 };
 
 }  // namespace libvpx_test
diff --git a/test/idct8x8_test.cc b/test/idct8x8_test.cc
index 7f9d751d650a3ac471553285e96e359f1432e40b..b7256f2c5e7e626ec7b7638843d3a860982b1310 100644
--- a/test/idct8x8_test.cc
+++ b/test/idct8x8_test.cc
@@ -37,9 +37,8 @@ void reference_dct_1d(double input[8], double output[8]) {
   for (int k = 0; k < 8; k++) {
     output[k] = 0.0;
     for (int n = 0; n < 8; n++)
-      output[k] += input[n]*cos(kPi*(2*n+1)*k/16.0);
-    if (k == 0)
-      output[k] = output[k]*kInvSqrt2;
+      output[k] += input[n] * cos(kPi * (2 * n + 1) * k / 16.0);
+    if (k == 0) output[k] = output[k] * kInvSqrt2;
   }
 }
 
@@ -47,24 +46,19 @@ void reference_dct_2d(int16_t input[64], double output[64]) {
   // First transform columns
   for (int i = 0; i < 8; ++i) {
     double temp_in[8], temp_out[8];
-    for (int j = 0; j < 8; ++j)
-      temp_in[j] = input[j*8 + i];
+    for (int j = 0; j < 8; ++j) temp_in[j] = input[j * 8 + i];
     reference_dct_1d(temp_in, temp_out);
-    for (int j = 0; j < 8; ++j)
-      output[j*8 + i] = temp_out[j];
+    for (int j = 0; j < 8; ++j) output[j * 8 + i] = temp_out[j];
   }
   // Then transform rows
   for (int i = 0; i < 8; ++i) {
     double temp_in[8], temp_out[8];
-    for (int j = 0; j < 8; ++j)
-      temp_in[j] = output[j + i*8];
+    for (int j = 0; j < 8; ++j) temp_in[j] = output[j + i * 8];
     reference_dct_1d(temp_in, temp_out);
-    for (int j = 0; j < 8; ++j)
-      output[j + i*8] = temp_out[j];
+    for (int j = 0; j < 8; ++j) output[j + i * 8] = temp_out[j];
   }
   // Scale by some magic number
-  for (int i = 0; i < 64; ++i)
-    output[i] *= 2;
+  for (int i = 0; i < 64; ++i) output[i] *= 2;
 }
 
 TEST(VP9Idct8x8Test, AccuracyCheck) {
@@ -81,19 +75,16 @@ TEST(VP9Idct8x8Test, AccuracyCheck) {
       dst[j] = rnd.Rand8();
     }
     // Initialize a test block with input range [-255, 255].
-    for (int j = 0; j < 64; ++j)
-      input[j] = src[j] - dst[j];
+    for (int j = 0; j < 64; ++j) input[j] = src[j] - dst[j];
 
     reference_dct_2d(input, output_r);
-    for (int j = 0; j < 64; ++j)
-      coeff[j] = round(output_r[j]);
+    for (int j = 0; j < 64; ++j) coeff[j] = round(output_r[j]);
     vpx_idct8x8_64_add_c(coeff, dst, 8);
     for (int j = 0; j < 64; ++j) {
       const int diff = dst[j] - src[j];
       const int error = diff * diff;
-      EXPECT_GE(1, error)
-          << "Error: 8x8 FDCT/IDCT has error " << error
-          << " at index " << j;
+      EXPECT_GE(1, error) << "Error: 8x8 FDCT/IDCT has error " << error
+                          << " at index " << j;
     }
   }
 }
diff --git a/test/intrapred_test.cc b/test/intrapred_test.cc
index 21abaa8cca7c24fa4c2dce5ba9dcbd27904c7245..535a568cd6236d502fdc8d10e53c5ca3ca4f8c44 100644
--- a/test/intrapred_test.cc
+++ b/test/intrapred_test.cc
@@ -43,14 +43,14 @@ class VP9IntraPredBase {
         *error_count += ref_dst_[x + y * stride_] != dst_[x + y * stride_];
         if (*error_count == 1) {
           ASSERT_EQ(ref_dst_[x + y * stride_], dst_[x + y * stride_])
-              << " Failed on Test Case Number "<< test_case_number;
+              << " Failed on Test Case Number " << test_case_number;
         }
       }
     }
   }
 
-  void RunTest(uint16_t* left_col, uint16_t* above_data,
-               uint16_t* dst, uint16_t* ref_dst) {
+  void RunTest(uint16_t *left_col, uint16_t *above_data, uint16_t *dst,
+               uint16_t *ref_dst) {
     ACMRandom rnd(ACMRandom::DeterministicSeed());
     left_col_ = left_col;
     dst_ = dst;
@@ -59,7 +59,7 @@ class VP9IntraPredBase {
     int error_count = 0;
     for (int i = 0; i < count_test_block; ++i) {
       // Fill edges with random data, try first with saturated values.
-      for (int x = -1; x <= block_size_*2; x++) {
+      for (int x = -1; x <= block_size_ * 2; x++) {
         if (i == 0) {
           above_row_[x] = mask_;
         } else {
@@ -88,30 +88,28 @@ class VP9IntraPredBase {
   int mask_;
 };
 
-typedef void (*intra_pred_fn_t)(
-      uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
-      const uint16_t *left, int bps);
-typedef std::tr1::tuple<intra_pred_fn_t,
-                        intra_pred_fn_t, int, int> intra_pred_params_t;
-class VP9IntraPredTest
-    : public VP9IntraPredBase,
-      public ::testing::TestWithParam<intra_pred_params_t> {
-
+typedef void (*intra_pred_fn_t)(uint16_t *dst, ptrdiff_t stride,
+                                const uint16_t *above, const uint16_t *left,
+                                int bps);
+typedef std::tr1::tuple<intra_pred_fn_t, intra_pred_fn_t, int, int>
+    intra_pred_params_t;
+class VP9IntraPredTest : public VP9IntraPredBase,
+                         public ::testing::TestWithParam<intra_pred_params_t> {
   virtual void SetUp() {
-    pred_fn_    = GET_PARAM(0);
-    ref_fn_     = GET_PARAM(1);
+    pred_fn_ = GET_PARAM(0);
+    ref_fn_ = GET_PARAM(1);
     block_size_ = GET_PARAM(2);
-    bit_depth_  = GET_PARAM(3);
-    stride_     = block_size_ * 3;
-    mask_       = (1 << bit_depth_) - 1;
+    bit_depth_ = GET_PARAM(3);
+    stride_ = block_size_ * 3;
+    mask_ = (1 << bit_depth_) - 1;
   }
 
   virtual void Predict(PREDICTION_MODE mode) {
     const uint16_t *const_above_row = above_row_;
     const uint16_t *const_left_col = left_col_;
     ref_fn_(ref_dst_, stride_, const_above_row, const_left_col, bit_depth_);
-    ASM_REGISTER_STATE_CHECK(pred_fn_(dst_, stride_, const_above_row,
-                                      const_left_col, bit_depth_));
+    ASM_REGISTER_STATE_CHECK(
+        pred_fn_(dst_, stride_, const_above_row, const_left_col, bit_depth_));
   }
   intra_pred_fn_t pred_fn_;
   intra_pred_fn_t ref_fn_;
@@ -120,8 +118,8 @@ class VP9IntraPredTest
 
 TEST_P(VP9IntraPredTest, IntraPredTests) {
   // max block size is 32
-  DECLARE_ALIGNED(16, uint16_t, left_col[2*32]);
-  DECLARE_ALIGNED(16, uint16_t, above_data[2*32+32]);
+  DECLARE_ALIGNED(16, uint16_t, left_col[2 * 32]);
+  DECLARE_ALIGNED(16, uint16_t, above_data[2 * 32 + 32]);
   DECLARE_ALIGNED(16, uint16_t, dst[3 * 32 * 32]);
   DECLARE_ALIGNED(16, uint16_t, ref_dst[3 * 32 * 32]);
   RunTest(left_col, above_data, dst, ref_dst);
@@ -132,98 +130,86 @@ using std::tr1::make_tuple;
 #if HAVE_SSE2
 #if CONFIG_VPX_HIGHBITDEPTH
 #if CONFIG_USE_X86INC
-INSTANTIATE_TEST_CASE_P(SSE2_TO_C_8, VP9IntraPredTest,
-                        ::testing::Values(
-                            make_tuple(&vpx_highbd_dc_predictor_32x32_sse2,
-                                       &vpx_highbd_dc_predictor_32x32_c, 32, 8),
-                            make_tuple(&vpx_highbd_tm_predictor_16x16_sse2,
-                                       &vpx_highbd_tm_predictor_16x16_c, 16, 8),
-                            make_tuple(&vpx_highbd_tm_predictor_32x32_sse2,
-                                       &vpx_highbd_tm_predictor_32x32_c, 32, 8),
-                            make_tuple(&vpx_highbd_dc_predictor_4x4_sse2,
-                                       &vpx_highbd_dc_predictor_4x4_c, 4, 8),
-                            make_tuple(&vpx_highbd_dc_predictor_8x8_sse2,
-                                       &vpx_highbd_dc_predictor_8x8_c, 8, 8),
-                            make_tuple(&vpx_highbd_dc_predictor_16x16_sse2,
-                                       &vpx_highbd_dc_predictor_16x16_c, 16, 8),
-                            make_tuple(&vpx_highbd_v_predictor_4x4_sse2,
-                                       &vpx_highbd_v_predictor_4x4_c, 4, 8),
-                            make_tuple(&vpx_highbd_v_predictor_8x8_sse2,
-                                       &vpx_highbd_v_predictor_8x8_c, 8, 8),
-                            make_tuple(&vpx_highbd_v_predictor_16x16_sse2,
-                                       &vpx_highbd_v_predictor_16x16_c, 16, 8),
-                            make_tuple(&vpx_highbd_v_predictor_32x32_sse2,
-                                       &vpx_highbd_v_predictor_32x32_c, 32, 8),
-                            make_tuple(&vpx_highbd_tm_predictor_4x4_sse2,
-                                       &vpx_highbd_tm_predictor_4x4_c, 4, 8),
-                            make_tuple(&vpx_highbd_tm_predictor_8x8_sse2,
-                                       &vpx_highbd_tm_predictor_8x8_c, 8, 8)));
-
-INSTANTIATE_TEST_CASE_P(SSE2_TO_C_10, VP9IntraPredTest,
-                        ::testing::Values(
-                            make_tuple(&vpx_highbd_dc_predictor_32x32_sse2,
-                                       &vpx_highbd_dc_predictor_32x32_c, 32,
-                                       10),
-                            make_tuple(&vpx_highbd_tm_predictor_16x16_sse2,
-                                       &vpx_highbd_tm_predictor_16x16_c, 16,
-                                       10),
-                            make_tuple(&vpx_highbd_tm_predictor_32x32_sse2,
-                                       &vpx_highbd_tm_predictor_32x32_c, 32,
-                                       10),
-                            make_tuple(&vpx_highbd_dc_predictor_4x4_sse2,
-                                       &vpx_highbd_dc_predictor_4x4_c, 4, 10),
-                            make_tuple(&vpx_highbd_dc_predictor_8x8_sse2,
-                                       &vpx_highbd_dc_predictor_8x8_c, 8, 10),
-                            make_tuple(&vpx_highbd_dc_predictor_16x16_sse2,
-                                       &vpx_highbd_dc_predictor_16x16_c, 16,
-                                       10),
-                            make_tuple(&vpx_highbd_v_predictor_4x4_sse2,
-                                       &vpx_highbd_v_predictor_4x4_c, 4, 10),
-                            make_tuple(&vpx_highbd_v_predictor_8x8_sse2,
-                                       &vpx_highbd_v_predictor_8x8_c, 8, 10),
-                            make_tuple(&vpx_highbd_v_predictor_16x16_sse2,
-                                       &vpx_highbd_v_predictor_16x16_c, 16,
-                                       10),
-                            make_tuple(&vpx_highbd_v_predictor_32x32_sse2,
-                                       &vpx_highbd_v_predictor_32x32_c, 32,
-                                       10),
-                            make_tuple(&vpx_highbd_tm_predictor_4x4_sse2,
-                                       &vpx_highbd_tm_predictor_4x4_c, 4, 10),
-                            make_tuple(&vpx_highbd_tm_predictor_8x8_sse2,
-                                       &vpx_highbd_tm_predictor_8x8_c, 8, 10)));
-
-INSTANTIATE_TEST_CASE_P(SSE2_TO_C_12, VP9IntraPredTest,
-                        ::testing::Values(
-                            make_tuple(&vpx_highbd_dc_predictor_32x32_sse2,
-                                       &vpx_highbd_dc_predictor_32x32_c, 32,
-                                       12),
-                            make_tuple(&vpx_highbd_tm_predictor_16x16_sse2,
-                                       &vpx_highbd_tm_predictor_16x16_c, 16,
-                                       12),
-                            make_tuple(&vpx_highbd_tm_predictor_32x32_sse2,
-                                       &vpx_highbd_tm_predictor_32x32_c, 32,
-                                       12),
-                            make_tuple(&vpx_highbd_dc_predictor_4x4_sse2,
-                                       &vpx_highbd_dc_predictor_4x4_c, 4, 12),
-                            make_tuple(&vpx_highbd_dc_predictor_8x8_sse2,
-                                       &vpx_highbd_dc_predictor_8x8_c, 8, 12),
-                            make_tuple(&vpx_highbd_dc_predictor_16x16_sse2,
-                                       &vpx_highbd_dc_predictor_16x16_c, 16,
-                                       12),
-                            make_tuple(&vpx_highbd_v_predictor_4x4_sse2,
-                                       &vpx_highbd_v_predictor_4x4_c, 4, 12),
-                            make_tuple(&vpx_highbd_v_predictor_8x8_sse2,
-                                       &vpx_highbd_v_predictor_8x8_c, 8, 12),
-                            make_tuple(&vpx_highbd_v_predictor_16x16_sse2,
-                                       &vpx_highbd_v_predictor_16x16_c, 16,
-                                       12),
-                            make_tuple(&vpx_highbd_v_predictor_32x32_sse2,
-                                       &vpx_highbd_v_predictor_32x32_c, 32,
-                                       12),
-                            make_tuple(&vpx_highbd_tm_predictor_4x4_sse2,
-                                       &vpx_highbd_tm_predictor_4x4_c, 4, 12),
-                            make_tuple(&vpx_highbd_tm_predictor_8x8_sse2,
-                                       &vpx_highbd_tm_predictor_8x8_c, 8, 12)));
+INSTANTIATE_TEST_CASE_P(
+    SSE2_TO_C_8, VP9IntraPredTest,
+    ::testing::Values(make_tuple(&vpx_highbd_dc_predictor_32x32_sse2,
+                                 &vpx_highbd_dc_predictor_32x32_c, 32, 8),
+                      make_tuple(&vpx_highbd_tm_predictor_16x16_sse2,
+                                 &vpx_highbd_tm_predictor_16x16_c, 16, 8),
+                      make_tuple(&vpx_highbd_tm_predictor_32x32_sse2,
+                                 &vpx_highbd_tm_predictor_32x32_c, 32, 8),
+                      make_tuple(&vpx_highbd_dc_predictor_4x4_sse2,
+                                 &vpx_highbd_dc_predictor_4x4_c, 4, 8),
+                      make_tuple(&vpx_highbd_dc_predictor_8x8_sse2,
+                                 &vpx_highbd_dc_predictor_8x8_c, 8, 8),
+                      make_tuple(&vpx_highbd_dc_predictor_16x16_sse2,
+                                 &vpx_highbd_dc_predictor_16x16_c, 16, 8),
+                      make_tuple(&vpx_highbd_v_predictor_4x4_sse2,
+                                 &vpx_highbd_v_predictor_4x4_c, 4, 8),
+                      make_tuple(&vpx_highbd_v_predictor_8x8_sse2,
+                                 &vpx_highbd_v_predictor_8x8_c, 8, 8),
+                      make_tuple(&vpx_highbd_v_predictor_16x16_sse2,
+                                 &vpx_highbd_v_predictor_16x16_c, 16, 8),
+                      make_tuple(&vpx_highbd_v_predictor_32x32_sse2,
+                                 &vpx_highbd_v_predictor_32x32_c, 32, 8),
+                      make_tuple(&vpx_highbd_tm_predictor_4x4_sse2,
+                                 &vpx_highbd_tm_predictor_4x4_c, 4, 8),
+                      make_tuple(&vpx_highbd_tm_predictor_8x8_sse2,
+                                 &vpx_highbd_tm_predictor_8x8_c, 8, 8)));
+
+INSTANTIATE_TEST_CASE_P(
+    SSE2_TO_C_10, VP9IntraPredTest,
+    ::testing::Values(make_tuple(&vpx_highbd_dc_predictor_32x32_sse2,
+                                 &vpx_highbd_dc_predictor_32x32_c, 32, 10),
+                      make_tuple(&vpx_highbd_tm_predictor_16x16_sse2,
+                                 &vpx_highbd_tm_predictor_16x16_c, 16, 10),
+                      make_tuple(&vpx_highbd_tm_predictor_32x32_sse2,
+                                 &vpx_highbd_tm_predictor_32x32_c, 32, 10),
+                      make_tuple(&vpx_highbd_dc_predictor_4x4_sse2,
+                                 &vpx_highbd_dc_predictor_4x4_c, 4, 10),
+                      make_tuple(&vpx_highbd_dc_predictor_8x8_sse2,
+                                 &vpx_highbd_dc_predictor_8x8_c, 8, 10),
+                      make_tuple(&vpx_highbd_dc_predictor_16x16_sse2,
+                                 &vpx_highbd_dc_predictor_16x16_c, 16, 10),
+                      make_tuple(&vpx_highbd_v_predictor_4x4_sse2,
+                                 &vpx_highbd_v_predictor_4x4_c, 4, 10),
+                      make_tuple(&vpx_highbd_v_predictor_8x8_sse2,
+                                 &vpx_highbd_v_predictor_8x8_c, 8, 10),
+                      make_tuple(&vpx_highbd_v_predictor_16x16_sse2,
+                                 &vpx_highbd_v_predictor_16x16_c, 16, 10),
+                      make_tuple(&vpx_highbd_v_predictor_32x32_sse2,
+                                 &vpx_highbd_v_predictor_32x32_c, 32, 10),
+                      make_tuple(&vpx_highbd_tm_predictor_4x4_sse2,
+                                 &vpx_highbd_tm_predictor_4x4_c, 4, 10),
+                      make_tuple(&vpx_highbd_tm_predictor_8x8_sse2,
+                                 &vpx_highbd_tm_predictor_8x8_c, 8, 10)));
+
+INSTANTIATE_TEST_CASE_P(
+    SSE2_TO_C_12, VP9IntraPredTest,
+    ::testing::Values(make_tuple(&vpx_highbd_dc_predictor_32x32_sse2,
+                                 &vpx_highbd_dc_predictor_32x32_c, 32, 12),
+                      make_tuple(&vpx_highbd_tm_predictor_16x16_sse2,
+                                 &vpx_highbd_tm_predictor_16x16_c, 16, 12),
+                      make_tuple(&vpx_highbd_tm_predictor_32x32_sse2,
+                                 &vpx_highbd_tm_predictor_32x32_c, 32, 12),
+                      make_tuple(&vpx_highbd_dc_predictor_4x4_sse2,
+                                 &vpx_highbd_dc_predictor_4x4_c, 4, 12),
+                      make_tuple(&vpx_highbd_dc_predictor_8x8_sse2,
+                                 &vpx_highbd_dc_predictor_8x8_c, 8, 12),
+                      make_tuple(&vpx_highbd_dc_predictor_16x16_sse2,
+                                 &vpx_highbd_dc_predictor_16x16_c, 16, 12),
+                      make_tuple(&vpx_highbd_v_predictor_4x4_sse2,
+                                 &vpx_highbd_v_predictor_4x4_c, 4, 12),
+                      make_tuple(&vpx_highbd_v_predictor_8x8_sse2,
+                                 &vpx_highbd_v_predictor_8x8_c, 8, 12),
+                      make_tuple(&vpx_highbd_v_predictor_16x16_sse2,
+                                 &vpx_highbd_v_predictor_16x16_c, 16, 12),
+                      make_tuple(&vpx_highbd_v_predictor_32x32_sse2,
+                                 &vpx_highbd_v_predictor_32x32_c, 32, 12),
+                      make_tuple(&vpx_highbd_tm_predictor_4x4_sse2,
+                                 &vpx_highbd_tm_predictor_4x4_c, 4, 12),
+                      make_tuple(&vpx_highbd_tm_predictor_8x8_sse2,
+                                 &vpx_highbd_tm_predictor_8x8_c, 8, 12)));
 
 #endif  // CONFIG_USE_X86INC
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/test/ivf_video_source.h b/test/ivf_video_source.h
index 824a39d7e669c672b708314f7ef42bf3bb36a617..b87624a11fc70d50d455e25d1b3494fc941b215a 100644
--- a/test/ivf_video_source.h
+++ b/test/ivf_video_source.h
@@ -29,19 +29,13 @@ static unsigned int MemGetLe32(const uint8_t *mem) {
 class IVFVideoSource : public CompressedVideoSource {
  public:
   explicit IVFVideoSource(const std::string &file_name)
-      : file_name_(file_name),
-        input_file_(NULL),
-        compressed_frame_buf_(NULL),
-        frame_sz_(0),
-        frame_(0),
-        end_of_file_(false) {
-  }
+      : file_name_(file_name), input_file_(NULL), compressed_frame_buf_(NULL),
+        frame_sz_(0), frame_(0), end_of_file_(false) {}
 
   virtual ~IVFVideoSource() {
     delete[] compressed_frame_buf_;
 
-    if (input_file_)
-      fclose(input_file_);
+    if (input_file_) fclose(input_file_);
   }
 
   virtual void Init() {
@@ -54,15 +48,16 @@ class IVFVideoSource : public CompressedVideoSource {
   virtual void Begin() {
     input_file_ = OpenTestDataFile(file_name_);
     ASSERT_TRUE(input_file_ != NULL) << "Input file open failed. Filename: "
-        << file_name_;
+                                     << file_name_;
 
     // Read file header
     uint8_t file_hdr[kIvfFileHdrSize];
     ASSERT_EQ(kIvfFileHdrSize, fread(file_hdr, 1, kIvfFileHdrSize, input_file_))
         << "File header read failed.";
     // Check file header
-    ASSERT_TRUE(file_hdr[0] == 'D' && file_hdr[1] == 'K' && file_hdr[2] == 'I'
-                && file_hdr[3] == 'F') << "Input is not an IVF file.";
+    ASSERT_TRUE(file_hdr[0] == 'D' && file_hdr[1] == 'K' &&
+                file_hdr[2] == 'I' && file_hdr[3] == 'F')
+        << "Input is not an IVF file.";
 
     FillFrame();
   }
@@ -76,8 +71,8 @@ class IVFVideoSource : public CompressedVideoSource {
     ASSERT_TRUE(input_file_ != NULL);
     uint8_t frame_hdr[kIvfFrameHdrSize];
     // Check frame header and read a frame from input_file.
-    if (fread(frame_hdr, 1, kIvfFrameHdrSize, input_file_)
-        != kIvfFrameHdrSize) {
+    if (fread(frame_hdr, 1, kIvfFrameHdrSize, input_file_) !=
+        kIvfFrameHdrSize) {
       end_of_file_ = true;
     } else {
       end_of_file_ = false;
diff --git a/test/lossless_test.cc b/test/lossless_test.cc
index 9dd43cb2ae9dfcb9d2477a0039d04aab9af07708..b3f23076a51e3f6d7d09bc0cbfc43b0437ddbce1 100644
--- a/test/lossless_test.cc
+++ b/test/lossless_test.cc
@@ -21,15 +21,13 @@ namespace {
 
 const int kMaxPsnr = 100;
 
-class LosslessTest : public ::libvpx_test::EncoderTest,
-    public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class LosslessTest
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
  protected:
   LosslessTest()
-      : EncoderTest(GET_PARAM(0)),
-        psnr_(kMaxPsnr),
-        nframes_(0),
-        encoding_mode_(GET_PARAM(1)) {
-  }
+      : EncoderTest(GET_PARAM(0)), psnr_(kMaxPsnr), nframes_(0),
+        encoding_mode_(GET_PARAM(1)) {}
 
   virtual ~LosslessTest() {}
 
@@ -55,13 +53,10 @@ class LosslessTest : public ::libvpx_test::EncoderTest,
   }
 
   virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
-    if (pkt->data.psnr.psnr[0] < psnr_)
-      psnr_= pkt->data.psnr.psnr[0];
+    if (pkt->data.psnr.psnr[0] < psnr_) psnr_ = pkt->data.psnr.psnr[0];
   }
 
-  double GetMinPsnr() const {
-      return psnr_;
-  }
+  double GetMinPsnr() const { return psnr_; }
 
  private:
   double psnr_;
diff --git a/test/lpf_8_test.cc b/test/lpf_8_test.cc
index 048dca8a9f40b83da217fe6110a65e585109bb95..5184fcc7dfb6e5b7c8aa4996b8b1e5e802932bad 100644
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -207,21 +207,18 @@ TEST_P(Loop8Test6Param, OperationCheck) {
   for (int i = 0; i < count_test_block; ++i) {
     int err_count = 0;
     uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                   tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
-    int32_t p = kNumCoeffs/32;
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+    int32_t p = kNumCoeffs / 32;
 
     uint16_t tmp_s[kNumCoeffs];
     int j = 0;
@@ -257,7 +254,7 @@ TEST_P(Loop8Test6Param, OperationCheck) {
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_, bd));
 #else
-    ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh, count_);
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
@@ -305,20 +302,17 @@ TEST_P(Loop8Test6Param, ValueCheck) {
   for (int i = 0; i < count_test_block; ++i) {
     int err_count = 0;
     uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                   tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     int32_t p = kNumCoeffs / 32;
     for (int j = 0; j < kNumCoeffs; ++j) {
       s[j] = rnd.Rand16() & mask_;
@@ -329,7 +323,7 @@ TEST_P(Loop8Test6Param, ValueCheck) {
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_, bd));
 #else
-    ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh, count_);
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
@@ -355,43 +349,37 @@ TEST_P(Loop8Test9Param, OperationCheck) {
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
-  DECLARE_ALIGNED(8,  uint8_t,  s[kNumCoeffs]);
-  DECLARE_ALIGNED(8,  uint8_t,  ref_s[kNumCoeffs]);
+  DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
+  DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
     int err_count = 0;
     uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit0[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit1[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     int32_t p = kNumCoeffs / 32;
     uint16_t tmp_s[kNumCoeffs];
     int j = 0;
@@ -424,17 +412,16 @@ TEST_P(Loop8Test9Param, OperationCheck) {
       ref_s[j] = s[j];
     }
 #if CONFIG_VPX_HIGHBITDEPTH
-    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1, bd);
-    ASM_REGISTER_STATE_CHECK(
-        loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1, bd));
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+                       limit1, thresh1, bd);
+    ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+                                            thresh0, blimit1, limit1, thresh1,
+                                            bd));
 #else
-    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1);
-    ASM_REGISTER_STATE_CHECK(
-        loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1));
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+                       limit1, thresh1);
+    ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+                                            thresh0, blimit1, limit1, thresh1));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
@@ -457,43 +444,37 @@ TEST_P(Loop8Test9Param, ValueCheck) {
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
-  DECLARE_ALIGNED(8,  uint8_t, s[kNumCoeffs]);
-  DECLARE_ALIGNED(8,  uint8_t, ref_s[kNumCoeffs]);
+  DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
+  DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
     int err_count = 0;
     uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit0[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit1[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     int32_t p = kNumCoeffs / 32;  // TODO(pdlf) can we have non-square here?
     for (int j = 0; j < kNumCoeffs; ++j) {
       s[j] = rnd.Rand16() & mask_;
@@ -501,17 +482,16 @@ TEST_P(Loop8Test9Param, ValueCheck) {
     }
 #if CONFIG_VPX_HIGHBITDEPTH
     const int32_t bd = bit_depth_;
-    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1, bd);
-    ASM_REGISTER_STATE_CHECK(
-        loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
-                       thresh0, blimit1, limit1, thresh1, bd));
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+                       limit1, thresh1, bd);
+    ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+                                            thresh0, blimit1, limit1, thresh1,
+                                            bd));
 #else
-    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1);
-    ASM_REGISTER_STATE_CHECK(
-        loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1));
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+                       limit1, thresh1);
+    ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+                                            thresh0, blimit1, limit1, thresh1));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
@@ -546,8 +526,7 @@ INSTANTIATE_TEST_CASE_P(
                    &vpx_highbd_lpf_horizontal_16_c, 8, 2),
         make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
                    &vpx_highbd_lpf_vertical_8_c, 8, 1),
-        make_tuple(&wrapper_vertical_16_sse2,
-                   &wrapper_vertical_16_c, 8, 1),
+        make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 8, 1),
         make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
                    &vpx_highbd_lpf_horizontal_4_c, 10, 1),
         make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
@@ -560,8 +539,7 @@ INSTANTIATE_TEST_CASE_P(
                    &vpx_highbd_lpf_horizontal_16_c, 10, 2),
         make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
                    &vpx_highbd_lpf_vertical_8_c, 10, 1),
-        make_tuple(&wrapper_vertical_16_sse2,
-                   &wrapper_vertical_16_c, 10, 1),
+        make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 10, 1),
         make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
                    &vpx_highbd_lpf_horizontal_4_c, 12, 1),
         make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
@@ -574,14 +552,13 @@ INSTANTIATE_TEST_CASE_P(
                    &vpx_highbd_lpf_horizontal_16_c, 12, 2),
         make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
                    &vpx_highbd_lpf_vertical_8_c, 12, 1),
-        make_tuple(&wrapper_vertical_16_sse2,
-                   &wrapper_vertical_16_c, 12, 1),
-        make_tuple(&wrapper_vertical_16_dual_sse2,
-                   &wrapper_vertical_16_dual_c, 8, 1),
-        make_tuple(&wrapper_vertical_16_dual_sse2,
-                   &wrapper_vertical_16_dual_c, 10, 1),
-        make_tuple(&wrapper_vertical_16_dual_sse2,
-                   &wrapper_vertical_16_dual_c, 12, 1)));
+        make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 12, 1),
+        make_tuple(&wrapper_vertical_16_dual_sse2, &wrapper_vertical_16_dual_c,
+                   8, 1),
+        make_tuple(&wrapper_vertical_16_dual_sse2, &wrapper_vertical_16_dual_c,
+                   10, 1),
+        make_tuple(&wrapper_vertical_16_dual_sse2, &wrapper_vertical_16_dual_c,
+                   12, 1)));
 #else
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test6Param,
@@ -591,61 +568,59 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(&vpx_lpf_horizontal_16_sse2, &vpx_lpf_horizontal_16_c, 8, 2),
         make_tuple(&vpx_lpf_vertical_8_sse2, &vpx_lpf_vertical_8_c, 8, 1),
         make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 8, 1),
-        make_tuple(&wrapper_vertical_16_dual_sse2,
-                   &wrapper_vertical_16_dual_c, 8, 1)));
+        make_tuple(&wrapper_vertical_16_dual_sse2, &wrapper_vertical_16_dual_c,
+                   8, 1)));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 #endif
 
 #if HAVE_AVX2 && (!CONFIG_VPX_HIGHBITDEPTH)
 INSTANTIATE_TEST_CASE_P(
     AVX2, Loop8Test6Param,
-    ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_16_avx2, &vpx_lpf_horizontal_16_c, 8, 1),
-        make_tuple(&vpx_lpf_horizontal_16_avx2, &vpx_lpf_horizontal_16_c, 8,
-                   2)));
+    ::testing::Values(make_tuple(&vpx_lpf_horizontal_16_avx2,
+                                 &vpx_lpf_horizontal_16_c, 8, 1),
+                      make_tuple(&vpx_lpf_horizontal_16_avx2,
+                                 &vpx_lpf_horizontal_16_c, 8, 2)));
 #endif
 
 #if HAVE_SSE2
 #if CONFIG_VPX_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test9Param,
-    ::testing::Values(
-        make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_4_dual_c, 8),
-        make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_8_dual_c, 8),
-        make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
-                   &vpx_highbd_lpf_vertical_4_dual_c, 8),
-        make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
-                   &vpx_highbd_lpf_vertical_8_dual_c, 8),
-        make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_4_dual_c, 10),
-        make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_8_dual_c, 10),
-        make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
-                   &vpx_highbd_lpf_vertical_4_dual_c, 10),
-        make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
-                   &vpx_highbd_lpf_vertical_8_dual_c, 10),
-        make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_4_dual_c, 12),
-        make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_8_dual_c, 12),
-        make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
-                   &vpx_highbd_lpf_vertical_4_dual_c, 12),
-        make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
-                   &vpx_highbd_lpf_vertical_8_dual_c, 12)));
+    ::testing::Values(make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_8_dual_c, 8),
+                      make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_4_dual_c, 10),
+                      make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_8_dual_c, 10),
+                      make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_4_dual_c, 10),
+                      make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_8_dual_c, 10),
+                      make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_4_dual_c, 12),
+                      make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_8_dual_c, 12),
+                      make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_4_dual_c, 12),
+                      make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_8_dual_c, 12)));
 #else
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test9Param,
-    ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_dual_sse2,
-                   &vpx_lpf_horizontal_4_dual_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_dual_sse2,
-                   &vpx_lpf_horizontal_8_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_dual_sse2,
-                   &vpx_lpf_vertical_4_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_dual_sse2,
-                   &vpx_lpf_vertical_8_dual_c, 8)));
+    ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_sse2,
+                                 &vpx_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_horizontal_8_dual_sse2,
+                                 &vpx_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_4_dual_sse2,
+                                 &vpx_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_8_dual_sse2,
+                                 &vpx_lpf_vertical_8_dual_c, 8)));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 #endif
 
@@ -657,38 +632,31 @@ INSTANTIATE_TEST_CASE_P(
     NEON, Loop8Test6Param,
     ::testing::Values(
 #if HAVE_NEON_ASM
-// Using #if inside the macro is unsupported on MSVS but the tests are not
-// currently built for MSVS with ARM and NEON.
-        make_tuple(&vpx_lpf_horizontal_16_neon,
-                   &vpx_lpf_horizontal_16_c, 8, 1),
-        make_tuple(&vpx_lpf_horizontal_16_neon,
-                   &vpx_lpf_horizontal_16_c, 8, 2),
-        make_tuple(&wrapper_vertical_16_neon,
-                   &wrapper_vertical_16_c, 8, 1),
-        make_tuple(&wrapper_vertical_16_dual_neon,
-                   &wrapper_vertical_16_dual_c, 8, 1),
+        // Using #if inside the macro is unsupported on MSVS but the tests are
+        // not
+        // currently built for MSVS with ARM and NEON.
+        make_tuple(&vpx_lpf_horizontal_16_neon, &vpx_lpf_horizontal_16_c, 8, 1),
+        make_tuple(&vpx_lpf_horizontal_16_neon, &vpx_lpf_horizontal_16_c, 8, 2),
+        make_tuple(&wrapper_vertical_16_neon, &wrapper_vertical_16_c, 8, 1),
+        make_tuple(&wrapper_vertical_16_dual_neon, &wrapper_vertical_16_dual_c,
+                   8, 1),
 #endif  // HAVE_NEON_ASM
-        make_tuple(&vpx_lpf_horizontal_8_neon,
-                   &vpx_lpf_horizontal_8_c, 8, 1),
-        make_tuple(&vpx_lpf_vertical_8_neon,
-                   &vpx_lpf_vertical_8_c, 8, 1),
-        make_tuple(&vpx_lpf_horizontal_4_neon,
-                   &vpx_lpf_horizontal_4_c, 8, 1),
-        make_tuple(&vpx_lpf_vertical_4_neon,
-                   &vpx_lpf_vertical_4_c, 8, 1)));
-INSTANTIATE_TEST_CASE_P(
-    NEON, Loop8Test9Param,
-    ::testing::Values(
+        make_tuple(&vpx_lpf_horizontal_8_neon, &vpx_lpf_horizontal_8_c, 8, 1),
+        make_tuple(&vpx_lpf_vertical_8_neon, &vpx_lpf_vertical_8_c, 8, 1),
+        make_tuple(&vpx_lpf_horizontal_4_neon, &vpx_lpf_horizontal_4_c, 8, 1),
+        make_tuple(&vpx_lpf_vertical_4_neon, &vpx_lpf_vertical_4_c, 8, 1)));
+INSTANTIATE_TEST_CASE_P(NEON, Loop8Test9Param,
+                        ::testing::Values(
 #if HAVE_NEON_ASM
-        make_tuple(&vpx_lpf_horizontal_8_dual_neon,
-                   &vpx_lpf_horizontal_8_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_dual_neon,
-                   &vpx_lpf_vertical_8_dual_c, 8),
+                            make_tuple(&vpx_lpf_horizontal_8_dual_neon,
+                                       &vpx_lpf_horizontal_8_dual_c, 8),
+                            make_tuple(&vpx_lpf_vertical_8_dual_neon,
+                                       &vpx_lpf_vertical_8_dual_c, 8),
 #endif  // HAVE_NEON_ASM
-        make_tuple(&vpx_lpf_horizontal_4_dual_neon,
-                   &vpx_lpf_horizontal_4_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_dual_neon,
-                   &vpx_lpf_vertical_4_dual_c, 8)));
+                            make_tuple(&vpx_lpf_horizontal_4_dual_neon,
+                                       &vpx_lpf_horizontal_4_dual_c, 8),
+                            make_tuple(&vpx_lpf_vertical_4_dual_neon,
+                                       &vpx_lpf_vertical_4_dual_c, 8)));
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 #endif  // HAVE_NEON
 
@@ -704,15 +672,14 @@ INSTANTIATE_TEST_CASE_P(
 
 INSTANTIATE_TEST_CASE_P(
     MSA, Loop8Test9Param,
-    ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_dual_msa,
-                   &vpx_lpf_horizontal_4_dual_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_dual_msa,
-                   &vpx_lpf_horizontal_8_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_dual_msa,
-                   &vpx_lpf_vertical_4_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_dual_msa,
-                   &vpx_lpf_vertical_8_dual_c, 8)));
+    ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_msa,
+                                 &vpx_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_horizontal_8_dual_msa,
+                                 &vpx_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_4_dual_msa,
+                                 &vpx_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_8_dual_msa,
+                                 &vpx_lpf_vertical_8_dual_c, 8)));
 #endif  // HAVE_MSA && (!CONFIG_VPX_HIGHBITDEPTH)
 
 }  // namespace
diff --git a/test/md5_helper.h b/test/md5_helper.h
index 742cf0b7b39477824e80e50fa13023b638ea959b..ef310a2d900a869e88b00e2a322fa3d319630d92 100644
--- a/test/md5_helper.h
+++ b/test/md5_helper.h
@@ -17,9 +17,7 @@
 namespace libvpx_test {
 class MD5 {
  public:
-  MD5() {
-    MD5Init(&md5_);
-  }
+  MD5() { MD5Init(&md5_); }
 
   void Add(const vpx_image_t *img) {
     for (int plane = 0; plane < 3; ++plane) {
@@ -30,10 +28,13 @@ class MD5 {
       // This works only for chroma_shift of 0 and 1.
       const int bytes_per_sample =
           (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
-      const int h = plane ? (img->d_h + img->y_chroma_shift) >>
-                    img->y_chroma_shift : img->d_h;
-      const int w = (plane ? (img->d_w + img->x_chroma_shift) >>
-                     img->x_chroma_shift : img->d_w) * bytes_per_sample;
+      const int h =
+          plane ? (img->d_h + img->y_chroma_shift) >> img->y_chroma_shift
+                : img->d_h;
+      const int w =
+          (plane ? (img->d_w + img->x_chroma_shift) >> img->x_chroma_shift
+                 : img->d_w) *
+          bytes_per_sample;
 
       for (int y = 0; y < h; ++y) {
         MD5Update(&md5_, buf, w);
@@ -56,8 +57,8 @@ class MD5 {
 
     MD5Final(tmp, &ctx_tmp);
     for (int i = 0; i < 16; i++) {
-      res_[i * 2 + 0]  = hex[tmp[i] >> 4];
-      res_[i * 2 + 1]  = hex[tmp[i] & 0xf];
+      res_[i * 2 + 0] = hex[tmp[i] >> 4];
+      res_[i * 2 + 1] = hex[tmp[i] & 0xf];
     }
     res_[32] = 0;
 
diff --git a/test/partial_idct_test.cc b/test/partial_idct_test.cc
index 9763789d0b9861ed9a6e299abb808cd6718f33dd..b5fad61f0401dc153d1a4f7b54b2af76ded8e840 100644
--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -29,10 +29,8 @@ using libvpx_test::ACMRandom;
 namespace {
 typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
 typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
-typedef std::tr1::tuple<FwdTxfmFunc,
-                        InvTxfmFunc,
-                        InvTxfmFunc,
-                        TX_SIZE, int> PartialInvTxfmParam;
+typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, InvTxfmFunc, TX_SIZE, int>
+    PartialInvTxfmParam;
 const int kMaxNumCoeffs = 1024;
 class PartialIDctTest : public ::testing::TestWithParam<PartialInvTxfmParam> {
  public:
@@ -41,7 +39,7 @@ class PartialIDctTest : public ::testing::TestWithParam<PartialInvTxfmParam> {
     ftxfm_ = GET_PARAM(0);
     full_itxfm_ = GET_PARAM(1);
     partial_itxfm_ = GET_PARAM(2);
-    tx_size_  = GET_PARAM(3);
+    tx_size_ = GET_PARAM(3);
     last_nonzero_ = GET_PARAM(4);
   }
 
@@ -59,21 +57,11 @@ TEST_P(PartialIDctTest, RunQuantCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   int size;
   switch (tx_size_) {
-    case TX_4X4:
-      size = 4;
-      break;
-    case TX_8X8:
-      size = 8;
-      break;
-    case TX_16X16:
-      size = 16;
-      break;
-    case TX_32X32:
-      size = 32;
-      break;
-    default:
-      FAIL() << "Wrong Size!";
-      break;
+    case TX_4X4: size = 4; break;
+    case TX_8X8: size = 8; break;
+    case TX_16X16: size = 16; break;
+    case TX_32X32: size = 32; break;
+    default: FAIL() << "Wrong Size!"; break;
   }
   DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
   DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
@@ -99,11 +87,9 @@ TEST_P(PartialIDctTest, RunQuantCheck) {
     for (int i = 0; i < count_test_block; ++i) {
       // Initialize a test block with input range [-255, 255].
       if (i == 0) {
-        for (int j = 0; j < block_size; ++j)
-          input_extreme_block[j] = 255;
+        for (int j = 0; j < block_size; ++j) input_extreme_block[j] = 255;
       } else if (i == 1) {
-        for (int j = 0; j < block_size; ++j)
-          input_extreme_block[j] = -255;
+        for (int j = 0; j < block_size; ++j) input_extreme_block[j] = -255;
       } else {
         for (int j = 0; j < block_size; ++j) {
           input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
@@ -115,8 +101,8 @@ TEST_P(PartialIDctTest, RunQuantCheck) {
       // quantization with maximum allowed step sizes
       test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
       for (int j = 1; j < last_nonzero_; ++j)
-        test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]]
-                         = (output_ref_block[j] / 1828) * 1828;
+        test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] =
+            (output_ref_block[j] / 1828) * 1828;
     }
 
     ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size));
@@ -125,8 +111,7 @@ TEST_P(PartialIDctTest, RunQuantCheck) {
     for (int j = 0; j < block_size; ++j) {
       const int diff = dst1[j] - dst2[j];
       const int error = diff * diff;
-      if (max_error < error)
-        max_error = error;
+      if (max_error < error) max_error = error;
     }
   }
 
@@ -138,21 +123,11 @@ TEST_P(PartialIDctTest, ResultsMatch) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   int size;
   switch (tx_size_) {
-    case TX_4X4:
-      size = 4;
-      break;
-    case TX_8X8:
-      size = 8;
-      break;
-    case TX_16X16:
-      size = 16;
-      break;
-    case TX_32X32:
-      size = 32;
-      break;
-    default:
-      FAIL() << "Wrong Size!";
-      break;
+    case TX_4X4: size = 4; break;
+    case TX_8X8: size = 8; break;
+    case TX_16X16: size = 16; break;
+    case TX_32X32: size = 32; break;
+    default: FAIL() << "Wrong Size!"; break;
   }
   DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
   DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
@@ -189,8 +164,7 @@ TEST_P(PartialIDctTest, ResultsMatch) {
     for (int j = 0; j < block_size; ++j) {
       const int diff = dst1[j] - dst2[j];
       const int error = diff * diff;
-      if (max_error < error)
-        max_error = error;
+      if (max_error < error) max_error = error;
     }
   }
 
@@ -201,143 +175,82 @@ using std::tr1::make_tuple;
 
 INSTANTIATE_TEST_CASE_P(
     C, PartialIDctTest,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_c,
-                   &vpx_idct32x32_34_add_c,
-                   TX_32X32, 34),
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_c,
-                   &vpx_idct32x32_1_add_c,
-                   TX_32X32, 1),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vpx_idct16x16_256_add_c,
-                   &vpx_idct16x16_10_add_c,
-                   TX_16X16, 10),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vpx_idct16x16_256_add_c,
-                   &vpx_idct16x16_1_add_c,
-                   TX_16X16, 1),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vpx_idct8x8_64_add_c,
-                   &vpx_idct8x8_12_add_c,
-                   TX_8X8, 12),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vpx_idct8x8_64_add_c,
-                   &vpx_idct8x8_1_add_c,
-                   TX_8X8, 1),
-        make_tuple(&vpx_fdct4x4_c,
-                   &vpx_idct4x4_16_add_c,
-                   &vpx_idct4x4_1_add_c,
-                   TX_4X4, 1)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+                                 &vpx_idct32x32_34_add_c, TX_32X32, 34),
+                      make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+                                 &vpx_idct32x32_1_add_c, TX_32X32, 1),
+                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+                                 &vpx_idct16x16_10_add_c, TX_16X16, 10),
+                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+                                 &vpx_idct16x16_1_add_c, TX_16X16, 1),
+                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+                                 &vpx_idct8x8_12_add_c, TX_8X8, 12),
+                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+                                 &vpx_idct8x8_1_add_c, TX_8X8, 1),
+                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
+                                 &vpx_idct4x4_1_add_c, TX_4X4, 1)));
 
 #if HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, PartialIDctTest,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_c,
-                   &vpx_idct32x32_1_add_neon,
-                   TX_32X32, 1),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vpx_idct16x16_256_add_c,
-                   &vpx_idct16x16_10_add_neon,
-                   TX_16X16, 10),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vpx_idct16x16_256_add_c,
-                   &vpx_idct16x16_1_add_neon,
-                   TX_16X16, 1),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vpx_idct8x8_64_add_c,
-                   &vpx_idct8x8_12_add_neon,
-                   TX_8X8, 12),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vpx_idct8x8_64_add_c,
-                   &vpx_idct8x8_1_add_neon,
-                   TX_8X8, 1),
-        make_tuple(&vpx_fdct4x4_c,
-                   &vpx_idct4x4_16_add_c,
-                   &vpx_idct4x4_1_add_neon,
-                   TX_4X4, 1)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+                                 &vpx_idct32x32_1_add_neon, TX_32X32, 1),
+                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+                                 &vpx_idct16x16_10_add_neon, TX_16X16, 10),
+                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+                                 &vpx_idct16x16_1_add_neon, TX_16X16, 1),
+                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+                                 &vpx_idct8x8_12_add_neon, TX_8X8, 12),
+                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+                                 &vpx_idct8x8_1_add_neon, TX_8X8, 1),
+                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
+                                 &vpx_idct4x4_1_add_neon, TX_4X4, 1)));
 #endif  // HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, PartialIDctTest,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_c,
-                   &vpx_idct32x32_34_add_sse2,
-                   TX_32X32, 34),
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_c,
-                   &vpx_idct32x32_1_add_sse2,
-                   TX_32X32, 1),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vpx_idct16x16_256_add_c,
-                   &vpx_idct16x16_10_add_sse2,
-                   TX_16X16, 10),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vpx_idct16x16_256_add_c,
-                   &vpx_idct16x16_1_add_sse2,
-                   TX_16X16, 1),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vpx_idct8x8_64_add_c,
-                   &vpx_idct8x8_12_add_sse2,
-                   TX_8X8, 12),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vpx_idct8x8_64_add_c,
-                   &vpx_idct8x8_1_add_sse2,
-                   TX_8X8, 1),
-        make_tuple(&vpx_fdct4x4_c,
-                   &vpx_idct4x4_16_add_c,
-                   &vpx_idct4x4_1_add_sse2,
-                   TX_4X4, 1)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+                                 &vpx_idct32x32_34_add_sse2, TX_32X32, 34),
+                      make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+                                 &vpx_idct32x32_1_add_sse2, TX_32X32, 1),
+                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+                                 &vpx_idct16x16_10_add_sse2, TX_16X16, 10),
+                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+                                 &vpx_idct16x16_1_add_sse2, TX_16X16, 1),
+                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+                                 &vpx_idct8x8_12_add_sse2, TX_8X8, 12),
+                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+                                 &vpx_idct8x8_1_add_sse2, TX_8X8, 1),
+                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
+                                 &vpx_idct4x4_1_add_sse2, TX_4X4, 1)));
 #endif
 
 #if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
     !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSSE3_64, PartialIDctTest,
-    ::testing::Values(
-        make_tuple(&vpx_fdct8x8_c,
-                   &vpx_idct8x8_64_add_c,
-                   &vpx_idct8x8_12_add_ssse3,
-                   TX_8X8, 12)));
+    ::testing::Values(make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+                                 &vpx_idct8x8_12_add_ssse3, TX_8X8, 12)));
 #endif
 
 #if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, PartialIDctTest,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_c,
-                   &vpx_idct32x32_34_add_msa,
-                   TX_32X32, 34),
-        make_tuple(&vpx_fdct32x32_c,
-                   &vpx_idct32x32_1024_add_c,
-                   &vpx_idct32x32_1_add_msa,
-                   TX_32X32, 1),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vpx_idct16x16_256_add_c,
-                   &vpx_idct16x16_10_add_msa,
-                   TX_16X16, 10),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vpx_idct16x16_256_add_c,
-                   &vpx_idct16x16_1_add_msa,
-                   TX_16X16, 1),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vpx_idct8x8_64_add_c,
-                   &vpx_idct8x8_12_add_msa,
-                   TX_8X8, 10),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vpx_idct8x8_64_add_c,
-                   &vpx_idct8x8_1_add_msa,
-                   TX_8X8, 1),
-        make_tuple(&vpx_fdct4x4_c,
-                   &vpx_idct4x4_16_add_c,
-                   &vpx_idct4x4_1_add_msa,
-                   TX_4X4, 1)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+                                 &vpx_idct32x32_34_add_msa, TX_32X32, 34),
+                      make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+                                 &vpx_idct32x32_1_add_msa, TX_32X32, 1),
+                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+                                 &vpx_idct16x16_10_add_msa, TX_16X16, 10),
+                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+                                 &vpx_idct16x16_1_add_msa, TX_16X16, 1),
+                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+                                 &vpx_idct8x8_12_add_msa, TX_8X8, 10),
+                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+                                 &vpx_idct8x8_1_add_msa, TX_8X8, 1),
+                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
+                                 &vpx_idct4x4_1_add_msa, TX_4X4, 1)));
 #endif  // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 }  // namespace
diff --git a/test/quantize_test.cc b/test/quantize_test.cc
index 48c685534671f44bfec47575568f06507026bede..ddaa1253501b23dcb193d6bce44ea993ac691c26 100644
--- a/test/quantize_test.cc
+++ b/test/quantize_test.cc
@@ -34,9 +34,8 @@ const int number_of_iterations = 100;
 typedef void (*QuantizeFunc)(const tran_low_t *coeff, intptr_t count,
                              int skip_block, const int16_t *zbin,
                              const int16_t *round, const int16_t *quant,
-                             const int16_t *quant_shift,
-                             tran_low_t *qcoeff, tran_low_t *dqcoeff,
-                             const int16_t *dequant,
+                             const int16_t *quant_shift, tran_low_t *qcoeff,
+                             tran_low_t *dqcoeff, const int16_t *dequant,
                              uint16_t *eob, const int16_t *scan,
                              const int16_t *iscan);
 typedef std::tr1::tuple<QuantizeFunc, QuantizeFunc, vpx_bit_depth_t>
@@ -46,9 +45,9 @@ class VP9QuantizeTest : public ::testing::TestWithParam<QuantizeParam> {
  public:
   virtual ~VP9QuantizeTest() {}
   virtual void SetUp() {
-    quantize_op_   = GET_PARAM(0);
+    quantize_op_ = GET_PARAM(0);
     ref_quantize_op_ = GET_PARAM(1);
-    bit_depth_  = GET_PARAM(2);
+    bit_depth_ = GET_PARAM(2);
     mask_ = (1 << bit_depth_) - 1;
   }
 
@@ -65,9 +64,9 @@ class VP9Quantize32Test : public ::testing::TestWithParam<QuantizeParam> {
  public:
   virtual ~VP9Quantize32Test() {}
   virtual void SetUp() {
-    quantize_op_   = GET_PARAM(0);
+    quantize_op_ = GET_PARAM(0);
     ref_quantize_op_ = GET_PARAM(1);
-    bit_depth_  = GET_PARAM(2);
+    bit_depth_ = GET_PARAM(2);
     mask_ = (1 << bit_depth_) - 1;
   }
 
@@ -106,10 +105,10 @@ TEST_P(VP9QuantizeTest, OperationCheck) {
     *eob_ptr = rnd.Rand16();
     *ref_eob_ptr = *eob_ptr;
     for (int j = 0; j < count; j++) {
-      coeff_ptr[j] = rnd.Rand16()&mask_;
+      coeff_ptr[j] = rnd.Rand16() & mask_;
     }
     for (int j = 0; j < 2; j++) {
-      zbin_ptr[j] = rnd.Rand16()&mask_;
+      zbin_ptr[j] = rnd.Rand16() & mask_;
       round_ptr[j] = rnd.Rand16();
       quant_ptr[j] = rnd.Rand16();
       quant_shift_ptr[j] = rnd.Rand16();
@@ -117,16 +116,15 @@ TEST_P(VP9QuantizeTest, OperationCheck) {
     }
     ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr,
                      quant_ptr, quant_shift_ptr, ref_qcoeff_ptr,
-                     ref_dqcoeff_ptr, dequant_ptr,
-                     ref_eob_ptr, scan_order->scan, scan_order->iscan);
-    ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block,
-                                          zbin_ptr, round_ptr, quant_ptr,
-                                          quant_shift_ptr, qcoeff_ptr,
-                                          dqcoeff_ptr, dequant_ptr, eob_ptr,
-                                          scan_order->scan, scan_order->iscan));
+                     ref_dqcoeff_ptr, dequant_ptr, ref_eob_ptr,
+                     scan_order->scan, scan_order->iscan);
+    ASM_REGISTER_STATE_CHECK(quantize_op_(
+        coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr,
+        quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr,
+        scan_order->scan, scan_order->iscan));
     for (int j = 0; j < sz; ++j) {
-      err_count += (ref_qcoeff_ptr[j]  != qcoeff_ptr[j]) |
-          (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
+      err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) |
+                   (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
     }
     err_count += (*ref_eob_ptr != *eob_ptr);
     if (err_count && !err_count_total) {
@@ -165,10 +163,10 @@ TEST_P(VP9Quantize32Test, OperationCheck) {
     *eob_ptr = rnd.Rand16();
     *ref_eob_ptr = *eob_ptr;
     for (int j = 0; j < count; j++) {
-      coeff_ptr[j] = rnd.Rand16()&mask_;
+      coeff_ptr[j] = rnd.Rand16() & mask_;
     }
     for (int j = 0; j < 2; j++) {
-      zbin_ptr[j] = rnd.Rand16()&mask_;
+      zbin_ptr[j] = rnd.Rand16() & mask_;
       round_ptr[j] = rnd.Rand16();
       quant_ptr[j] = rnd.Rand16();
       quant_shift_ptr[j] = rnd.Rand16();
@@ -176,16 +174,15 @@ TEST_P(VP9Quantize32Test, OperationCheck) {
     }
     ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr,
                      quant_ptr, quant_shift_ptr, ref_qcoeff_ptr,
-                     ref_dqcoeff_ptr, dequant_ptr,
-                     ref_eob_ptr, scan_order->scan, scan_order->iscan);
-    ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block,
-                                          zbin_ptr, round_ptr, quant_ptr,
-                                          quant_shift_ptr, qcoeff_ptr,
-                                          dqcoeff_ptr, dequant_ptr, eob_ptr,
-                                          scan_order->scan, scan_order->iscan));
+                     ref_dqcoeff_ptr, dequant_ptr, ref_eob_ptr,
+                     scan_order->scan, scan_order->iscan);
+    ASM_REGISTER_STATE_CHECK(quantize_op_(
+        coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr,
+        quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr,
+        scan_order->scan, scan_order->iscan));
     for (int j = 0; j < sz; ++j) {
-      err_count += (ref_qcoeff_ptr[j]  != qcoeff_ptr[j]) |
-          (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
+      err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) |
+                   (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
     }
     err_count += (*ref_eob_ptr != *eob_ptr);
     if (err_count && !err_count_total) {
@@ -227,10 +224,10 @@ TEST_P(VP9QuantizeTest, EOBCheck) {
     for (int j = 0; j < count; j++) {
       coeff_ptr[j] = 0;
     }
-    coeff_ptr[rnd(count)] = rnd.Rand16()&mask_;
-    coeff_ptr[rnd(count)] = rnd.Rand16()&mask_;
+    coeff_ptr[rnd(count)] = rnd.Rand16() & mask_;
+    coeff_ptr[rnd(count)] = rnd.Rand16() & mask_;
     for (int j = 0; j < 2; j++) {
-      zbin_ptr[j] = rnd.Rand16()&mask_;
+      zbin_ptr[j] = rnd.Rand16() & mask_;
       round_ptr[j] = rnd.Rand16();
       quant_ptr[j] = rnd.Rand16();
       quant_shift_ptr[j] = rnd.Rand16();
@@ -239,17 +236,16 @@ TEST_P(VP9QuantizeTest, EOBCheck) {
 
     ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr,
                      quant_ptr, quant_shift_ptr, ref_qcoeff_ptr,
-                     ref_dqcoeff_ptr, dequant_ptr,
-                     ref_eob_ptr, scan_order->scan, scan_order->iscan);
-    ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block,
-                                          zbin_ptr, round_ptr, quant_ptr,
-                                          quant_shift_ptr, qcoeff_ptr,
-                                          dqcoeff_ptr, dequant_ptr, eob_ptr,
-                                          scan_order->scan, scan_order->iscan));
+                     ref_dqcoeff_ptr, dequant_ptr, ref_eob_ptr,
+                     scan_order->scan, scan_order->iscan);
+    ASM_REGISTER_STATE_CHECK(quantize_op_(
+        coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr,
+        quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr,
+        scan_order->scan, scan_order->iscan));
 
     for (int j = 0; j < sz; ++j) {
-      err_count += (ref_qcoeff_ptr[j]  != qcoeff_ptr[j]) |
-          (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
+      err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) |
+                   (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
     }
     err_count += (*ref_eob_ptr != *eob_ptr);
     if (err_count && !err_count_total) {
@@ -291,10 +287,10 @@ TEST_P(VP9Quantize32Test, EOBCheck) {
       coeff_ptr[j] = 0;
     }
     // Two random entries
-    coeff_ptr[rnd(count)] = rnd.Rand16()&mask_;
-    coeff_ptr[rnd(count)] = rnd.Rand16()&mask_;
+    coeff_ptr[rnd(count)] = rnd.Rand16() & mask_;
+    coeff_ptr[rnd(count)] = rnd.Rand16() & mask_;
     for (int j = 0; j < 2; j++) {
-      zbin_ptr[j] = rnd.Rand16()&mask_;
+      zbin_ptr[j] = rnd.Rand16() & mask_;
       round_ptr[j] = rnd.Rand16();
       quant_ptr[j] = rnd.Rand16();
       quant_shift_ptr[j] = rnd.Rand16();
@@ -303,17 +299,16 @@ TEST_P(VP9Quantize32Test, EOBCheck) {
 
     ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr,
                      quant_ptr, quant_shift_ptr, ref_qcoeff_ptr,
-                     ref_dqcoeff_ptr, dequant_ptr,
-                     ref_eob_ptr, scan_order->scan, scan_order->iscan);
-    ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block,
-                                          zbin_ptr, round_ptr, quant_ptr,
-                                          quant_shift_ptr, qcoeff_ptr,
-                                          dqcoeff_ptr, dequant_ptr, eob_ptr,
-                                          scan_order->scan, scan_order->iscan));
+                     ref_dqcoeff_ptr, dequant_ptr, ref_eob_ptr,
+                     scan_order->scan, scan_order->iscan);
+    ASM_REGISTER_STATE_CHECK(quantize_op_(
+        coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr,
+        quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr,
+        scan_order->scan, scan_order->iscan));
 
     for (int j = 0; j < sz; ++j) {
-      err_count += (ref_qcoeff_ptr[j]  != qcoeff_ptr[j]) |
-          (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
+      err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) |
+                   (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
     }
     err_count += (*ref_eob_ptr != *eob_ptr);
     if (err_count && !err_count_total) {
@@ -330,22 +325,20 @@ using std::tr1::make_tuple;
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(
     SSE2, VP9QuantizeTest,
-    ::testing::Values(
-        make_tuple(&vpx_highbd_quantize_b_sse2,
-                   &vpx_highbd_quantize_b_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_quantize_b_sse2,
-                   &vpx_highbd_quantize_b_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_quantize_b_sse2,
-                   &vpx_highbd_quantize_b_c, VPX_BITS_12)));
+    ::testing::Values(make_tuple(&vpx_highbd_quantize_b_sse2,
+                                 &vpx_highbd_quantize_b_c, VPX_BITS_8),
+                      make_tuple(&vpx_highbd_quantize_b_sse2,
+                                 &vpx_highbd_quantize_b_c, VPX_BITS_10),
+                      make_tuple(&vpx_highbd_quantize_b_sse2,
+                                 &vpx_highbd_quantize_b_c, VPX_BITS_12)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, VP9Quantize32Test,
-    ::testing::Values(
-        make_tuple(&vpx_highbd_quantize_b_32x32_sse2,
-                   &vpx_highbd_quantize_b_32x32_c, VPX_BITS_8),
-        make_tuple(&vpx_highbd_quantize_b_32x32_sse2,
-                   &vpx_highbd_quantize_b_32x32_c, VPX_BITS_10),
-        make_tuple(&vpx_highbd_quantize_b_32x32_sse2,
-                   &vpx_highbd_quantize_b_32x32_c, VPX_BITS_12)));
+    ::testing::Values(make_tuple(&vpx_highbd_quantize_b_32x32_sse2,
+                                 &vpx_highbd_quantize_b_32x32_c, VPX_BITS_8),
+                      make_tuple(&vpx_highbd_quantize_b_32x32_sse2,
+                                 &vpx_highbd_quantize_b_32x32_c, VPX_BITS_10),
+                      make_tuple(&vpx_highbd_quantize_b_32x32_sse2,
+                                 &vpx_highbd_quantize_b_32x32_c, VPX_BITS_12)));
 #endif  // HAVE_SSE2
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 }  // namespace
diff --git a/test/register_state_check.h b/test/register_state_check.h
index d832a6c15d122da8e764609d8b06662562e096c0..496780e848093378829bfd283e8b08471c060040 100644
--- a/test/register_state_check.h
+++ b/test/register_state_check.h
@@ -86,15 +86,16 @@ class RegisterStateCheck {
   CONTEXT pre_context_;
 };
 
-#define ASM_REGISTER_STATE_CHECK(statement) do {  \
-  libvpx_test::RegisterStateCheck reg_check;      \
-  statement;                                      \
-} while (false)
+#define ASM_REGISTER_STATE_CHECK(statement)    \
+  do {                                         \
+    libvpx_test::RegisterStateCheck reg_check; \
+    statement;                                 \
+  } while (false)
 
 }  // namespace libvpx_test
 
-#elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) \
-      && !CONFIG_SHARED && HAVE_NEON_ASM && CONFIG_VP10
+#elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) && !CONFIG_SHARED && \
+    HAVE_NEON_ASM && CONFIG_VP10
 
 extern "C" {
 // Save the d8-d15 registers into store.
@@ -123,8 +124,8 @@ class RegisterStateCheck {
     int64_t post_store[8];
     vpx_push_neon(post_store);
     for (int i = 0; i < 8; ++i) {
-      EXPECT_EQ(pre_store_[i], post_store[i]) << "d"
-          << i + 8 << " has been modified";
+      EXPECT_EQ(pre_store_[i], post_store[i]) << "d" << i + 8
+                                              << " has been modified";
     }
     return !testing::Test::HasNonfatalFailure();
   }
@@ -133,10 +134,11 @@ class RegisterStateCheck {
   int64_t pre_store_[8];
 };
 
-#define ASM_REGISTER_STATE_CHECK(statement) do {  \
-  libvpx_test::RegisterStateCheck reg_check;      \
-  statement;                                      \
-} while (false)
+#define ASM_REGISTER_STATE_CHECK(statement)    \
+  do {                                         \
+    libvpx_test::RegisterStateCheck reg_check; \
+    statement;                                 \
+  } while (false)
 
 }  // namespace libvpx_test
 
@@ -181,10 +183,11 @@ class RegisterStateCheckMMX {
   uint16_t pre_fpu_env_[14];
 };
 
-#define API_REGISTER_STATE_CHECK(statement) do {  \
-  libvpx_test::RegisterStateCheckMMX reg_check;   \
-  ASM_REGISTER_STATE_CHECK(statement);            \
-} while (false)
+#define API_REGISTER_STATE_CHECK(statement)       \
+  do {                                            \
+    libvpx_test::RegisterStateCheckMMX reg_check; \
+    ASM_REGISTER_STATE_CHECK(statement);          \
+  } while (false)
 
 }  // namespace libvpx_test
 
diff --git a/test/resize_test.cc b/test/resize_test.cc
index 31d1e7859fa1d0ae2edab903d9bee921c1e503e0..9c7cc2df8e68953fc8c8527a740f4af3cbca4131 100644
--- a/test/resize_test.cc
+++ b/test/resize_test.cc
@@ -42,9 +42,9 @@ static void write_ivf_file_header(const vpx_codec_enc_cfg_t *const cfg,
   header[1] = 'K';
   header[2] = 'I';
   header[3] = 'F';
-  mem_put_le16(header + 4,  0);                   /* version */
-  mem_put_le16(header + 6,  32);                  /* headersize */
-  mem_put_le32(header + 8,  0x30395056);          /* fourcc (vp9) */
+  mem_put_le16(header + 4, 0);                    /* version */
+  mem_put_le16(header + 6, 32);                   /* headersize */
+  mem_put_le32(header + 8, 0x30395056);           /* fourcc (vp9) */
   mem_put_le16(header + 12, cfg->g_w);            /* width */
   mem_put_le16(header + 14, cfg->g_h);            /* height */
   mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */
@@ -66,8 +66,7 @@ static void write_ivf_frame_header(const vpx_codec_cx_pkt_t *const pkt,
   char header[12];
   vpx_codec_pts_t pts;
 
-  if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
-    return;
+  if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return;
 
   pts = pkt->data.frame.pts;
   mem_put_le32(header, static_cast<unsigned int>(pkt->data.frame.sz));
@@ -91,16 +90,11 @@ struct FrameInfo {
 };
 
 unsigned int ScaleForFrameNumber(unsigned int frame, unsigned int val) {
-  if (frame < 10)
-    return val;
-  if (frame < 20)
-    return val / 2;
-  if (frame < 30)
-    return val * 2 / 3;
-  if (frame < 40)
-    return val / 4;
-  if (frame < 50)
-    return val * 7 / 8;
+  if (frame < 10) return val;
+  if (frame < 20) return val / 2;
+  if (frame < 30) return val * 2 / 3;
+  if (frame < 40) return val / 4;
+  if (frame < 50) return val * 7 / 8;
   return val;
 }
 
@@ -122,8 +116,9 @@ class ResizingVideoSource : public ::libvpx_test::DummyVideoSource {
   }
 };
 
-class ResizeTest : public ::libvpx_test::EncoderTest,
-  public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class ResizeTest
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
  protected:
   ResizeTest() : EncoderTest(GET_PARAM(0)) {}
 
@@ -139,7 +134,7 @@ class ResizeTest : public ::libvpx_test::EncoderTest,
     frame_info_list_.push_back(FrameInfo(pts, img.d_w, img.d_h));
   }
 
-  std::vector< FrameInfo > frame_info_list_;
+  std::vector<FrameInfo> frame_info_list_;
 };
 
 TEST_P(ResizeTest, TestExternalResizeWorks) {
@@ -153,10 +148,10 @@ TEST_P(ResizeTest, TestExternalResizeWorks) {
     const unsigned int expected_w = ScaleForFrameNumber(frame, kInitialWidth);
     const unsigned int expected_h = ScaleForFrameNumber(frame, kInitialHeight);
 
-    EXPECT_EQ(expected_w, info->w)
-        << "Frame " << frame << " had unexpected width";
-    EXPECT_EQ(expected_h, info->h)
-        << "Frame " << frame << " had unexpected height";
+    EXPECT_EQ(expected_w, info->w) << "Frame " << frame
+                                   << " had unexpected width";
+    EXPECT_EQ(expected_h, info->h) << "Frame " << frame
+                                   << " had unexpected height";
   }
 }
 
@@ -167,10 +162,7 @@ class ResizeInternalTest : public ResizeTest {
  protected:
 #if WRITE_COMPRESSED_STREAM
   ResizeInternalTest()
-      : ResizeTest(),
-        frame0_psnr_(0.0),
-        outfile_(NULL),
-        out_frames_(0) {}
+      : ResizeTest(), frame0_psnr_(0.0), outfile_(NULL), out_frames_(0) {}
 #else
   ResizeInternalTest() : ResizeTest(), frame0_psnr_(0.0) {}
 #endif
@@ -199,30 +191,29 @@ class ResizeInternalTest : public ResizeTest {
     if (change_config_) {
       int new_q = 60;
       if (video->frame() == 0) {
-        struct vpx_scaling_mode mode = {VP8E_ONETWO, VP8E_ONETWO};
+        struct vpx_scaling_mode mode = { VP8E_ONETWO, VP8E_ONETWO };
         encoder->Control(VP8E_SET_SCALEMODE, &mode);
       }
       if (video->frame() == 1) {
-        struct vpx_scaling_mode mode = {VP8E_NORMAL, VP8E_NORMAL};
+        struct vpx_scaling_mode mode = { VP8E_NORMAL, VP8E_NORMAL };
         encoder->Control(VP8E_SET_SCALEMODE, &mode);
         cfg_.rc_min_quantizer = cfg_.rc_max_quantizer = new_q;
         encoder->Config(&cfg_);
       }
     } else {
       if (video->frame() == kStepDownFrame) {
-        struct vpx_scaling_mode mode = {VP8E_FOURFIVE, VP8E_THREEFIVE};
+        struct vpx_scaling_mode mode = { VP8E_FOURFIVE, VP8E_THREEFIVE };
         encoder->Control(VP8E_SET_SCALEMODE, &mode);
       }
       if (video->frame() == kStepUpFrame) {
-        struct vpx_scaling_mode mode = {VP8E_NORMAL, VP8E_NORMAL};
+        struct vpx_scaling_mode mode = { VP8E_NORMAL, VP8E_NORMAL };
         encoder->Control(VP8E_SET_SCALEMODE, &mode);
       }
     }
   }
 
   virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
-    if (!frame0_psnr_)
-      frame0_psnr_ = pkt->data.psnr.psnr[0];
+    if (!frame0_psnr_) frame0_psnr_ = pkt->data.psnr.psnr[0];
     EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0);
   }
 
@@ -231,8 +222,7 @@ class ResizeInternalTest : public ResizeTest {
     ++out_frames_;
 
     // Write initial file header if first frame.
-    if (pkt->data.frame.pts == 0)
-      write_ivf_file_header(&cfg_, 0, outfile_);
+    if (pkt->data.frame.pts == 0) write_ivf_file_header(&cfg_, 0, outfile_);
 
     // Write frame header and data.
     write_ivf_frame_header(pkt, outfile_);
@@ -286,8 +276,9 @@ TEST_P(ResizeInternalTest, TestInternalResizeChangeConfig) {
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-class ResizeRealtimeTest : public ::libvpx_test::EncoderTest,
-  public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
+class ResizeRealtimeTest
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
  protected:
   ResizeRealtimeTest() : EncoderTest(GET_PARAM(0)) {}
   virtual ~ResizeRealtimeTest() {}
@@ -332,14 +323,14 @@ class ResizeRealtimeTest : public ::libvpx_test::EncoderTest,
     // Enable dropped frames.
     cfg_.rc_dropframe_thresh = 1;
     // Enable error_resilience mode.
-    cfg_.g_error_resilient  = 1;
+    cfg_.g_error_resilient = 1;
     // Enable dynamic resizing.
     cfg_.rc_resize_allowed = 1;
     // Run at low bitrate.
     cfg_.rc_target_bitrate = 200;
   }
 
-  std::vector< FrameInfo > frame_info_list_;
+  std::vector<FrameInfo> frame_info_list_;
   int set_cpu_used_;
   bool change_bitrate_;
 };
@@ -356,10 +347,10 @@ TEST_P(ResizeRealtimeTest, TestExternalResizeWorks) {
     const unsigned int expected_w = ScaleForFrameNumber(frame, kInitialWidth);
     const unsigned int expected_h = ScaleForFrameNumber(frame, kInitialHeight);
 
-    EXPECT_EQ(expected_w, info->w)
-        << "Frame " << frame << " had unexpected width";
-    EXPECT_EQ(expected_h, info->h)
-        << "Frame " << frame << " had unexpected height";
+    EXPECT_EQ(expected_w, info->w) << "Frame " << frame
+                                   << " had unexpected width";
+    EXPECT_EQ(expected_h, info->h) << "Frame " << frame
+                                   << " had unexpected height";
   }
 }
 
@@ -436,10 +427,8 @@ TEST_P(ResizeRealtimeTest, TestInternalResizeDownUpChangeBitRate) {
 }
 
 vpx_img_fmt_t CspForFrameNumber(int frame) {
-  if (frame < 10)
-    return VPX_IMG_FMT_I420;
-  if (frame < 20)
-    return VPX_IMG_FMT_I444;
+  if (frame < 10) return VPX_IMG_FMT_I420;
+  if (frame < 20) return VPX_IMG_FMT_I444;
   return VPX_IMG_FMT_I420;
 }
 
@@ -447,10 +436,7 @@ class ResizeCspTest : public ResizeTest {
  protected:
 #if WRITE_COMPRESSED_STREAM
   ResizeCspTest()
-      : ResizeTest(),
-        frame0_psnr_(0.0),
-        outfile_(NULL),
-        out_frames_(0) {}
+      : ResizeTest(), frame0_psnr_(0.0), outfile_(NULL), out_frames_(0) {}
 #else
   ResizeCspTest() : ResizeTest(), frame0_psnr_(0.0) {}
 #endif
@@ -489,8 +475,7 @@ class ResizeCspTest : public ResizeTest {
   }
 
   virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
-    if (!frame0_psnr_)
-      frame0_psnr_ = pkt->data.psnr.psnr[0];
+    if (!frame0_psnr_) frame0_psnr_ = pkt->data.psnr.psnr[0];
     EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0);
   }
 
@@ -499,8 +484,7 @@ class ResizeCspTest : public ResizeTest {
     ++out_frames_;
 
     // Write initial file header if first frame.
-    if (pkt->data.frame.pts == 0)
-      write_ivf_file_header(&cfg_, 0, outfile_);
+    if (pkt->data.frame.pts == 0) write_ivf_file_header(&cfg_, 0, outfile_);
 
     // Write frame header and data.
     write_ivf_frame_header(pkt, outfile_);
@@ -541,12 +525,12 @@ TEST_P(ResizeCspTest, TestResizeCspWorks) {
 }
 
 VP10_INSTANTIATE_TEST_CASE(ResizeTest,
-                          ::testing::Values(::libvpx_test::kRealTime));
+                           ::testing::Values(::libvpx_test::kRealTime));
 VP10_INSTANTIATE_TEST_CASE(ResizeInternalTest,
-                          ::testing::Values(::libvpx_test::kOnePassBest));
+                           ::testing::Values(::libvpx_test::kOnePassBest));
 VP10_INSTANTIATE_TEST_CASE(ResizeRealtimeTest,
-                          ::testing::Values(::libvpx_test::kRealTime),
-                          ::testing::Range(5, 9));
+                           ::testing::Values(::libvpx_test::kRealTime),
+                           ::testing::Range(5, 9));
 VP10_INSTANTIATE_TEST_CASE(ResizeCspTest,
-                          ::testing::Values(::libvpx_test::kRealTime));
+                           ::testing::Values(::libvpx_test::kRealTime));
 }  // namespace
diff --git a/test/sad_test.cc b/test/sad_test.cc
index 5429a96e1bba768e9f983e5ea42d26ba32932950..b4d8148729c5207b6cc8858f388c881175604aad 100644
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #include <string.h>
 #include <limits.h>
 #include <stdio.h>
@@ -25,23 +24,17 @@
 #include "vpx_mem/vpx_mem.h"
 #include "vpx_ports/mem.h"
 
-typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr,
-                                   int src_stride,
-                                   const uint8_t *ref_ptr,
-                                   int ref_stride);
+typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr, int src_stride,
+                                   const uint8_t *ref_ptr, int ref_stride);
 typedef std::tr1::tuple<int, int, SadMxNFunc, int> SadMxNParam;
 
-typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr,
-                                  int src_stride,
-                                  const uint8_t *ref_ptr,
-                                  int ref_stride,
+typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr, int src_stride,
+                                  const uint8_t *ref_ptr, int ref_stride,
                                   const uint8_t *second_pred);
 typedef std::tr1::tuple<int, int, SadMxNAvgFunc, int> SadMxNAvgParam;
 
-typedef void (*SadMxNx4Func)(const uint8_t *src_ptr,
-                             int src_stride,
-                             const uint8_t *const ref_ptr[],
-                             int ref_stride,
+typedef void (*SadMxNx4Func)(const uint8_t *src_ptr, int src_stride,
+                             const uint8_t *const ref_ptr[], int ref_stride,
                              uint32_t *sad_array);
 typedef std::tr1::tuple<int, int, SadMxNx4Func, int> SadMxNx4Param;
 
@@ -50,22 +43,22 @@ using libvpx_test::ACMRandom;
 namespace {
 class SADTestBase : public ::testing::Test {
  public:
-  SADTestBase(int width, int height, int bit_depth) :
-      width_(width), height_(height), bd_(bit_depth) {}
+  SADTestBase(int width, int height, int bit_depth)
+      : width_(width), height_(height), bd_(bit_depth) {}
 
   static void SetUpTestCase() {
-    source_data8_ = reinterpret_cast<uint8_t*>(
+    source_data8_ = reinterpret_cast<uint8_t *>(
         vpx_memalign(kDataAlignment, kDataBlockSize));
-    reference_data8_ = reinterpret_cast<uint8_t*>(
+    reference_data8_ = reinterpret_cast<uint8_t *>(
         vpx_memalign(kDataAlignment, kDataBufferSize));
-    second_pred8_ = reinterpret_cast<uint8_t*>(
-        vpx_memalign(kDataAlignment, 64*64));
-    source_data16_ = reinterpret_cast<uint16_t*>(
-        vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
-    reference_data16_ = reinterpret_cast<uint16_t*>(
-        vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
-    second_pred16_ = reinterpret_cast<uint16_t*>(
-        vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t)));
+    second_pred8_ =
+        reinterpret_cast<uint8_t *>(vpx_memalign(kDataAlignment, 64 * 64));
+    source_data16_ = reinterpret_cast<uint16_t *>(
+        vpx_memalign(kDataAlignment, kDataBlockSize * sizeof(uint16_t)));
+    reference_data16_ = reinterpret_cast<uint16_t *>(
+        vpx_memalign(kDataAlignment, kDataBufferSize * sizeof(uint16_t)));
+    second_pred16_ = reinterpret_cast<uint16_t *>(
+        vpx_memalign(kDataAlignment, 64 * 64 * sizeof(uint16_t)));
   }
 
   static void TearDownTestCase() {
@@ -83,9 +76,7 @@ class SADTestBase : public ::testing::Test {
     second_pred16_ = NULL;
   }
 
-  virtual void TearDown() {
-    libvpx_test::ClearSystemState();
-  }
+  virtual void TearDown() { libvpx_test::ClearSystemState(); }
 
  protected:
   // Handle blocks up to 4 blocks 64x64 with stride up to 128
@@ -128,12 +119,12 @@ class SADTestBase : public ::testing::Test {
   // difference between two pixels in the same relative location; accumulate.
   unsigned int ReferenceSAD(int block_idx) {
     unsigned int sad = 0;
-      const uint8_t *const reference8 = GetReference(block_idx);
-      const uint8_t *const source8 = source_data_;
+    const uint8_t *const reference8 = GetReference(block_idx);
+    const uint8_t *const source8 = source_data_;
 #if CONFIG_VPX_HIGHBITDEPTH
-      const uint16_t *const reference16 =
-          CONVERT_TO_SHORTPTR(GetReference(block_idx));
-      const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+    const uint16_t *const reference16 =
+        CONVERT_TO_SHORTPTR(GetReference(block_idx));
+    const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
     for (int h = 0; h < height_; ++h) {
       for (int w = 0; w < width_; ++w) {
@@ -169,13 +160,13 @@ class SADTestBase : public ::testing::Test {
       for (int w = 0; w < width_; ++w) {
         if (!use_high_bit_depth_) {
           const int tmp = second_pred8[h * width_ + w] +
-              reference8[h * reference_stride_ + w];
+                          reference8[h * reference_stride_ + w];
           const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
           sad += abs(source8[h * source_stride_ + w] - comp_pred);
 #if CONFIG_VPX_HIGHBITDEPTH
         } else {
           const int tmp = second_pred16[h * width_ + w] +
-              reference16[h * reference_stride_ + w];
+                          reference16[h * reference_stride_ + w];
           const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
           sad += abs(source16[h * source_stride_ + w] - comp_pred);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
@@ -239,20 +230,18 @@ class SADTestBase : public ::testing::Test {
   ACMRandom rnd_;
 };
 
-class SADx4Test
-    : public SADTestBase,
-      public ::testing::WithParamInterface<SadMxNx4Param> {
+class SADx4Test : public SADTestBase,
+                  public ::testing::WithParamInterface<SadMxNx4Param> {
  public:
   SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
 
  protected:
   void SADs(unsigned int *results) {
-    const uint8_t *references[] = {GetReference(0), GetReference(1),
-                                   GetReference(2), GetReference(3)};
+    const uint8_t *references[] = { GetReference(0), GetReference(1),
+                                    GetReference(2), GetReference(3) };
 
-    ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
-                                          references, reference_stride_,
-                                          results));
+    ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(
+        source_data_, source_stride_, references, reference_stride_, results));
   }
 
   void CheckSADs() {
@@ -267,9 +256,8 @@ class SADx4Test
   }
 };
 
-class SADTest
-    : public SADTestBase,
-      public ::testing::WithParamInterface<SadMxNParam> {
+class SADTest : public SADTestBase,
+                public ::testing::WithParamInterface<SadMxNParam> {
  public:
   SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
 
@@ -291,9 +279,8 @@ class SADTest
   }
 };
 
-class SADavgTest
-    : public SADTestBase,
-      public ::testing::WithParamInterface<SadMxNAvgParam> {
+class SADavgTest : public SADTestBase,
+                   public ::testing::WithParamInterface<SadMxNAvgParam> {
  public:
   SADavgTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
 
@@ -469,7 +456,7 @@ TEST_P(SADx4Test, ShortSrc) {
 }
 
 TEST_P(SADx4Test, SrcAlignedByWidth) {
-  uint8_t * tmp_source_data = source_data_;
+  uint8_t *tmp_source_data = source_data_;
   source_data_ += width_;
   FillRandom(source_data_, source_stride_);
   FillRandom(GetReference(0), reference_stride_);
diff --git a/test/subtract_test.cc b/test/subtract_test.cc
index 03d18949dfbbf927612225a64a23cded582b1c67..56bbf474f961c843789f45c1db694a14cd18586c 100644
--- a/test/subtract_test.cc
+++ b/test/subtract_test.cc
@@ -19,18 +19,16 @@
 #include "vp10/common/blockd.h"
 #include "vpx_mem/vpx_mem.h"
 
-typedef void (*SubtractFunc)(int rows, int cols,
-                             int16_t *diff_ptr, ptrdiff_t diff_stride,
-                             const uint8_t *src_ptr, ptrdiff_t src_stride,
-                             const uint8_t *pred_ptr, ptrdiff_t pred_stride);
+typedef void (*SubtractFunc)(int rows, int cols, int16_t *diff_ptr,
+                             ptrdiff_t diff_stride, const uint8_t *src_ptr,
+                             ptrdiff_t src_stride, const uint8_t *pred_ptr,
+                             ptrdiff_t pred_stride);
 
 namespace vp9 {
 
 class VP9SubtractBlockTest : public ::testing::TestWithParam<SubtractFunc> {
  public:
-  virtual void TearDown() {
-    libvpx_test::ClearSystemState();
-  }
+  virtual void TearDown() { libvpx_test::ClearSystemState(); }
 };
 
 using libvpx_test::ACMRandom;
@@ -47,7 +45,7 @@ TEST_P(VP9SubtractBlockTest, SimpleSubtract) {
         vpx_memalign(16, sizeof(*diff) * block_width * block_height * 2));
     uint8_t *pred = reinterpret_cast<uint8_t *>(
         vpx_memalign(16, block_width * block_height * 2));
-    uint8_t *src  = reinterpret_cast<uint8_t *>(
+    uint8_t *src = reinterpret_cast<uint8_t *>(
         vpx_memalign(16, block_width * block_height * 2));
 
     for (int n = 0; n < 100; n++) {
@@ -58,29 +56,26 @@ TEST_P(VP9SubtractBlockTest, SimpleSubtract) {
         }
       }
 
-      GetParam()(block_height, block_width, diff, block_width,
-                 src, block_width, pred, block_width);
+      GetParam()(block_height, block_width, diff, block_width, src, block_width,
+                 pred, block_width);
 
       for (int r = 0; r < block_height; ++r) {
         for (int c = 0; c < block_width; ++c) {
           EXPECT_EQ(diff[r * block_width + c],
-                    (src[r * block_width + c] -
-                     pred[r * block_width + c])) << "r = " << r
-                                                 << ", c = " << c
-                                                 << ", bs = " << bsize;
+                    (src[r * block_width + c] - pred[r * block_width + c]))
+              << "r = " << r << ", c = " << c << ", bs = " << bsize;
         }
       }
 
-      GetParam()(block_height, block_width, diff, block_width * 2,
-                 src, block_width * 2, pred, block_width * 2);
+      GetParam()(block_height, block_width, diff, block_width * 2, src,
+                 block_width * 2, pred, block_width * 2);
 
       for (int r = 0; r < block_height; ++r) {
         for (int c = 0; c < block_width; ++c) {
-          EXPECT_EQ(diff[r * block_width * 2 + c],
-                    (src[r * block_width * 2 + c] -
-                     pred[r * block_width * 2 + c])) << "r = " << r
-                                                     << ", c = " << c
-                                                     << ", bs = " << bsize;
+          EXPECT_EQ(
+              diff[r * block_width * 2 + c],
+              (src[r * block_width * 2 + c] - pred[r * block_width * 2 + c]))
+              << "r = " << r << ", c = " << c << ", bs = " << bsize;
         }
       }
     }
diff --git a/test/superframe_test.cc b/test/superframe_test.cc
index 6e5461746602ed2ed0d3c9a57330aee27f97a337..0e3fcbf4d53596ccfe6233d3c63996b859bfb2b6 100644
--- a/test/superframe_test.cc
+++ b/test/superframe_test.cc
@@ -19,13 +19,14 @@ namespace {
 const int kTestMode = 0;
 const int kSuperframeSyntax = 1;
 
-typedef std::tr1::tuple<libvpx_test::TestMode,int> SuperframeTestParam;
+typedef std::tr1::tuple<libvpx_test::TestMode, int> SuperframeTestParam;
 
-class SuperframeTest : public ::libvpx_test::EncoderTest,
-    public ::libvpx_test::CodecTestWithParam<SuperframeTestParam> {
+class SuperframeTest
+    : public ::libvpx_test::EncoderTest,
+      public ::libvpx_test::CodecTestWithParam<SuperframeTestParam> {
  protected:
-  SuperframeTest() : EncoderTest(GET_PARAM(0)), modified_buf_(NULL),
-      last_sf_pts_(0) {}
+  SuperframeTest()
+      : EncoderTest(GET_PARAM(0)), modified_buf_(NULL), last_sf_pts_(0) {}
   virtual ~SuperframeTest() {}
 
   virtual void SetUp() {
@@ -39,9 +40,7 @@ class SuperframeTest : public ::libvpx_test::EncoderTest,
     is_vp10_style_superframe_ = syntax;
   }
 
-  virtual void TearDown() {
-    delete[] modified_buf_;
-  }
+  virtual void TearDown() { delete[] modified_buf_; }
 
   virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
                                   libvpx_test::Encoder *encoder) {
@@ -50,26 +49,22 @@ class SuperframeTest : public ::libvpx_test::EncoderTest,
     }
   }
 
-  virtual const vpx_codec_cx_pkt_t * MutateEncoderOutputHook(
+  virtual const vpx_codec_cx_pkt_t *MutateEncoderOutputHook(
       const vpx_codec_cx_pkt_t *pkt) {
-    if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
-      return pkt;
+    if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return pkt;
 
-    const uint8_t *buffer = reinterpret_cast<uint8_t*>(pkt->data.frame.buf);
+    const uint8_t *buffer = reinterpret_cast<uint8_t *>(pkt->data.frame.buf);
     const uint8_t marker = buffer[pkt->data.frame.sz - 1];
     const int frames = (marker & 0x7) + 1;
     const int mag = ((marker >> 3) & 3) + 1;
     const unsigned int index_sz =
         2 + mag * (frames - is_vp10_style_superframe_);
-    if ((marker & 0xe0) == 0xc0 &&
-        pkt->data.frame.sz >= index_sz &&
+    if ((marker & 0xe0) == 0xc0 && pkt->data.frame.sz >= index_sz &&
         buffer[pkt->data.frame.sz - index_sz] == marker) {
       // frame is a superframe. strip off the index.
-      if (modified_buf_)
-        delete[] modified_buf_;
+      if (modified_buf_) delete[] modified_buf_;
       modified_buf_ = new uint8_t[pkt->data.frame.sz - index_sz];
-      memcpy(modified_buf_, pkt->data.frame.buf,
-             pkt->data.frame.sz - index_sz);
+      memcpy(modified_buf_, pkt->data.frame.buf, pkt->data.frame.sz - index_sz);
       modified_pkt_ = *pkt;
       modified_pkt_.data.frame.buf = modified_buf_;
       modified_pkt_.data.frame.sz -= index_sz;
@@ -80,8 +75,8 @@ class SuperframeTest : public ::libvpx_test::EncoderTest,
     }
 
     // Make sure we do a few frames after the last SF
-    abort_ |= sf_count_ > sf_count_max_ &&
-              pkt->data.frame.pts - last_sf_pts_ >= 5;
+    abort_ |=
+        sf_count_ > sf_count_max_ && pkt->data.frame.pts - last_sf_pts_ >= 5;
     return pkt;
   }
 
@@ -103,7 +98,8 @@ TEST_P(SuperframeTest, TestSuperframeIndexIsOptional) {
   EXPECT_EQ(sf_count_, 1);
 }
 
-VP10_INSTANTIATE_TEST_CASE(SuperframeTest, ::testing::Combine(
-    ::testing::Values(::libvpx_test::kTwoPassGood),
-    ::testing::Values(CONFIG_MISC_FIXES)));
+VP10_INSTANTIATE_TEST_CASE(
+    SuperframeTest,
+    ::testing::Combine(::testing::Values(::libvpx_test::kTwoPassGood),
+                       ::testing::Values(CONFIG_MISC_FIXES)));
 }  // namespace
diff --git a/test/svc_test.cc b/test/svc_test.cc
index eb27975673d7bb10952d2edaed82f54edc2fa21f..2055915bb2b702d921818f4da411d23b32c86835 100644
--- a/test/svc_test.cc
+++ b/test/svc_test.cc
@@ -33,10 +33,8 @@ class SvcTest : public ::testing::Test {
   static const uint32_t kHeight = 288;
 
   SvcTest()
-      : codec_iface_(0),
-        test_file_name_("hantro_collage_w352h288.yuv"),
-        codec_initialized_(false),
-        decoder_(0) {
+      : codec_iface_(0), test_file_name_("hantro_collage_w352h288.yuv"),
+        codec_initialized_(false), decoder_(0) {
     memset(&svc_, 0, sizeof(svc_));
     memset(&codec_, 0, sizeof(codec_));
     memset(&codec_enc_, 0, sizeof(codec_enc_));
@@ -70,7 +68,7 @@ class SvcTest : public ::testing::Test {
 
   virtual void TearDown() {
     ReleaseEncoder();
-    delete(decoder_);
+    delete (decoder_);
   }
 
   void InitializeEncoder() {
@@ -97,7 +95,7 @@ class SvcTest : public ::testing::Test {
       if (cx_pkt->kind == VPX_CODEC_STATS_PKT) {
         EXPECT_GT(cx_pkt->data.twopass_stats.sz, 0U);
         ASSERT_TRUE(cx_pkt->data.twopass_stats.buf != NULL);
-        stats_buf->append(static_cast<char*>(cx_pkt->data.twopass_stats.buf),
+        stats_buf->append(static_cast<char *>(cx_pkt->data.twopass_stats.buf),
                           cx_pkt->data.twopass_stats.sz);
       }
     }
@@ -113,10 +111,9 @@ class SvcTest : public ::testing::Test {
     codec_enc_.g_pass = VPX_RC_FIRST_PASS;
     InitializeEncoder();
 
-    libvpx_test::I420VideoSource video(test_file_name_,
-                                       codec_enc_.g_w, codec_enc_.g_h,
-                                       codec_enc_.g_timebase.den,
-                                       codec_enc_.g_timebase.num, 0, 30);
+    libvpx_test::I420VideoSource video(
+        test_file_name_, codec_enc_.g_w, codec_enc_.g_h,
+        codec_enc_.g_timebase.den, codec_enc_.g_timebase.num, 0, 30);
     video.Begin();
 
     for (int i = 0; i < n; ++i) {
@@ -128,8 +125,8 @@ class SvcTest : public ::testing::Test {
     }
 
     // Flush encoder and test EOS packet.
-    res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
-                         video.duration(), VPX_DL_GOOD_QUALITY);
+    res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(), video.duration(),
+                         VPX_DL_GOOD_QUALITY);
     ASSERT_EQ(VPX_CODEC_OK, res);
     GetStatsData(stats_buf);
 
@@ -163,8 +160,8 @@ class SvcTest : public ::testing::Test {
     }
   }
 
-  void Pass2EncodeNFrames(std::string *const stats_buf,
-                          const int n, const int layers,
+  void Pass2EncodeNFrames(std::string *const stats_buf, const int n,
+                          const int layers,
                           struct vpx_fixed_buf *const outputs) {
     vpx_codec_err_t res;
     size_t frame_received = 0;
@@ -182,10 +179,9 @@ class SvcTest : public ::testing::Test {
     }
     InitializeEncoder();
 
-    libvpx_test::I420VideoSource video(test_file_name_,
-                                       codec_enc_.g_w, codec_enc_.g_h,
-                                       codec_enc_.g_timebase.den,
-                                       codec_enc_.g_timebase.num, 0, 30);
+    libvpx_test::I420VideoSource video(
+        test_file_name_, codec_enc_.g_w, codec_enc_.g_h,
+        codec_enc_.g_timebase.den, codec_enc_.g_timebase.num, 0, 30);
     video.Begin();
 
     for (int i = 0; i < n; ++i) {
@@ -197,8 +193,8 @@ class SvcTest : public ::testing::Test {
     }
 
     // Flush encoder.
-    res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
-                         video.duration(), VPX_DL_GOOD_QUALITY);
+    res = vpx_svc_encode(&svc_, &codec_, NULL, 0, video.duration(),
+                         VPX_DL_GOOD_QUALITY);
     EXPECT_EQ(VPX_CODEC_OK, res);
     StoreFrames(n, outputs, &frame_received);
 
@@ -217,9 +213,8 @@ class SvcTest : public ::testing::Test {
     for (int i = 0; i < n; ++i) {
       ASSERT_TRUE(inputs[i].buf != NULL);
       ASSERT_GT(inputs[i].sz, 0U);
-      const vpx_codec_err_t res_dec =
-          decoder_->DecodeFrame(static_cast<const uint8_t *>(inputs[i].buf),
-                                inputs[i].sz);
+      const vpx_codec_err_t res_dec = decoder_->DecodeFrame(
+          static_cast<const uint8_t *>(inputs[i].buf), inputs[i].sz);
       ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
       ++decoded_frames;
 
@@ -240,17 +235,16 @@ class SvcTest : public ::testing::Test {
     ASSERT_GT(remained_spatial_layers, 0);
 
     for (int i = 0; i < num_super_frames; ++i) {
-      uint32_t frame_sizes[8] = {0};
+      uint32_t frame_sizes[8] = { 0 };
       int frame_count = 0;
       int frames_found = 0;
       int frame;
       ASSERT_TRUE(inputs[i].buf != NULL);
       ASSERT_GT(inputs[i].sz, 0U);
 
-      vpx_codec_err_t res =
-          vp9_parse_superframe_index(static_cast<const uint8_t*>(inputs[i].buf),
-                                     inputs[i].sz, frame_sizes, &frame_count,
-                                     NULL, NULL);
+      vpx_codec_err_t res = vp9_parse_superframe_index(
+          static_cast<const uint8_t *>(inputs[i].buf), inputs[i].sz,
+          frame_sizes, &frame_count, NULL, NULL);
       ASSERT_EQ(VPX_CODEC_OK, res);
 
       if (frame_count == 0) {
@@ -258,28 +252,27 @@ class SvcTest : public ::testing::Test {
         ASSERT_EQ(1, remained_spatial_layers);
       } else {
         // Found a super frame.
-        uint8_t *frame_data = static_cast<uint8_t*>(inputs[i].buf);
+        uint8_t *frame_data = static_cast<uint8_t *>(inputs[i].buf);
         uint8_t *frame_start = frame_data;
         for (frame = 0; frame < frame_count; ++frame) {
           // Looking for a visible frame.
           if (frame_data[0] & 0x02) {
             ++frames_found;
-            if (frames_found == remained_spatial_layers)
-              break;
+            if (frames_found == remained_spatial_layers) break;
           }
           frame_data += frame_sizes[frame];
         }
-        ASSERT_LT(frame, frame_count) << "Couldn't find a visible frame. "
+        ASSERT_LT(frame, frame_count)
+            << "Couldn't find a visible frame. "
             << "remained_spatial_layers: " << remained_spatial_layers
             << "    super_frame: " << i;
-        if (frame == frame_count - 1)
-          continue;
+        if (frame == frame_count - 1) continue;
 
         frame_data += frame_sizes[frame];
 
         // We need to add one more frame for multiple frame contexts.
         uint8_t marker =
-            static_cast<const uint8_t*>(inputs[i].buf)[inputs[i].sz - 1];
+            static_cast<const uint8_t *>(inputs[i].buf)[inputs[i].sz - 1];
         const uint32_t mag = ((marker >> 3) & 0x3) + 1;
         const size_t index_sz = 2 + mag * frame_count;
         const size_t new_index_sz = 2 + mag * (frame + 1);
@@ -445,7 +438,7 @@ TEST_F(SvcTest, SetAutoAltRefOption) {
 // Test that decoder can handle an SVC frame as the first frame in a sequence.
 TEST_F(SvcTest, OnePassEncodeOneFrame) {
   codec_enc_.g_pass = VPX_RC_ONE_PASS;
-  vpx_fixed_buf output = {0};
+  vpx_fixed_buf output = { 0 };
   Pass2EncodeNFrames(NULL, 1, 2, &output);
   DecodeNFrames(&output, 1);
   FreeBitstreamBuffers(&output, 1);
@@ -539,8 +532,7 @@ TEST_F(SvcTest, TwoPassEncode2SNRLayers) {
 
   // Second pass encode
   codec_enc_.g_pass = VPX_RC_LAST_PASS;
-  vpx_svc_set_options(&svc_,
-                      "auto-alt-refs=1,1 scale-factors=1/1,1/1");
+  vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 scale-factors=1/1,1/1");
   vpx_fixed_buf outputs[20];
   memset(&outputs[0], 0, sizeof(outputs));
   Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]);
@@ -556,8 +548,7 @@ TEST_F(SvcTest, TwoPassEncode3SNRLayersDecode321Layers) {
 
   // Second pass encode
   codec_enc_.g_pass = VPX_RC_LAST_PASS;
-  vpx_svc_set_options(&svc_,
-                      "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1");
+  vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1");
   vpx_fixed_buf outputs[20];
   memset(&outputs[0], 0, sizeof(outputs));
   Pass2EncodeNFrames(&stats_buf, 20, 3, &outputs[0]);
@@ -572,8 +563,7 @@ TEST_F(SvcTest, TwoPassEncode3SNRLayersDecode321Layers) {
 
 TEST_F(SvcTest, SetMultipleFrameContextsOption) {
   svc_.spatial_layers = 5;
-  vpx_codec_err_t res =
-      vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
+  vpx_codec_err_t res = vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
   EXPECT_EQ(VPX_CODEC_OK, res);
   res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
   EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
@@ -626,7 +616,8 @@ TEST_F(SvcTest, TwoPassEncode2SNRLayersWithMultipleFrameContexts) {
   // Second pass encode
   codec_enc_.g_pass = VPX_RC_LAST_PASS;
   codec_enc_.g_error_resilient = 0;
-  vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 scale-factors=1/1,1/1 "
+  vpx_svc_set_options(&svc_,
+                      "auto-alt-refs=1,1 scale-factors=1/1,1/1 "
                       "multi-frame-contexts=1");
   vpx_fixed_buf outputs[10];
   memset(&outputs[0], 0, sizeof(outputs));
@@ -645,7 +636,8 @@ TEST_F(SvcTest,
   // Second pass encode
   codec_enc_.g_pass = VPX_RC_LAST_PASS;
   codec_enc_.g_error_resilient = 0;
-  vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1 "
+  vpx_svc_set_options(&svc_,
+                      "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1 "
                       "multi-frame-contexts=1");
   vpx_fixed_buf outputs[10];
   memset(&outputs[0], 0, sizeof(outputs));
@@ -689,7 +681,8 @@ TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithMultipleFrameContexts) {
   codec_enc_.g_pass = VPX_RC_LAST_PASS;
   svc_.temporal_layers = 2;
   codec_enc_.g_error_resilient = 0;
-  vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+  vpx_svc_set_options(&svc_,
+                      "auto-alt-refs=1 scale-factors=1/1 "
                       "multi-frame-contexts=1");
   vpx_fixed_buf outputs[10];
   memset(&outputs[0], 0, sizeof(outputs));
@@ -714,8 +707,7 @@ TEST_F(SvcTest, TwoPassEncode2TemporalLayersDecodeBaseLayer) {
   Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
 
   vpx_fixed_buf base_layer[5];
-  for (int i = 0; i < 5; ++i)
-    base_layer[i] = outputs[i * 2];
+  for (int i = 0; i < 5; ++i) base_layer[i] = outputs[i * 2];
 
   DecodeNFrames(&base_layer[0], 5);
   FreeBitstreamBuffers(&outputs[0], 10);
@@ -733,15 +725,15 @@ TEST_F(SvcTest,
   codec_enc_.g_pass = VPX_RC_LAST_PASS;
   svc_.temporal_layers = 2;
   codec_enc_.g_error_resilient = 0;
-  vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+  vpx_svc_set_options(&svc_,
+                      "auto-alt-refs=1 scale-factors=1/1 "
                       "multi-frame-contexts=1");
   vpx_fixed_buf outputs[10];
   memset(&outputs[0], 0, sizeof(outputs));
   Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
 
   vpx_fixed_buf base_layer[5];
-  for (int i = 0; i < 5; ++i)
-    base_layer[i] = outputs[i * 2];
+  for (int i = 0; i < 5; ++i) base_layer[i] = outputs[i * 2];
 
   DecodeNFrames(&base_layer[0], 5);
   FreeBitstreamBuffers(&outputs[0], 10);
@@ -769,8 +761,7 @@ TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithTiles) {
   FreeBitstreamBuffers(&outputs[0], 10);
 }
 
-TEST_F(SvcTest,
-       TwoPassEncode2TemporalLayersWithMultipleFrameContextsAndTiles) {
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithMultipleFrameContextsAndTiles) {
   // First pass encode
   std::string stats_buf;
   vpx_svc_set_options(&svc_, "scale-factors=1/1");
@@ -785,7 +776,8 @@ TEST_F(SvcTest,
   codec_enc_.g_h = 144;
   tile_columns_ = 1;
   tile_rows_ = 1;
-  vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+  vpx_svc_set_options(&svc_,
+                      "auto-alt-refs=1 scale-factors=1/1 "
                       "multi-frame-contexts=1");
   vpx_fixed_buf outputs[10];
   memset(&outputs[0], 0, sizeof(outputs));
diff --git a/test/test_intra_pred_speed.cc b/test/test_intra_pred_speed.cc
index 3e65fecfb6324ced7bcf5656e0d0480f0b38d86b..9062f23af74bd1effb4a2137606b05fa901a9bf9 100644
--- a/test/test_intra_pred_speed.cc
+++ b/test/test_intra_pred_speed.cc
@@ -31,9 +31,9 @@ typedef void (*VpxPredFunc)(uint8_t *dst, ptrdiff_t y_stride,
 
 const int kNumVp9IntraPredFuncs = 13;
 const char *kVp9IntraPredNames[kNumVp9IntraPredFuncs] = {
-  "DC_PRED", "DC_LEFT_PRED", "DC_TOP_PRED", "DC_128_PRED", "V_PRED", "H_PRED",
-  "D45_PRED", "D135_PRED", "D117_PRED", "D153_PRED", "D207_PRED", "D63_PRED",
-  "TM_PRED"
+  "DC_PRED",   "DC_LEFT_PRED", "DC_TOP_PRED", "DC_128_PRED", "V_PRED",
+  "H_PRED",    "D45_PRED",     "D135_PRED",   "D117_PRED",   "D153_PRED",
+  "D207_PRED", "D63_PRED",     "TM_PRED"
 };
 
 void TestIntraPred(const char name[], VpxPredFunc const *pred_funcs,
@@ -82,18 +82,12 @@ void TestIntraPred(const char name[], VpxPredFunc const *pred_funcs,
 void TestIntraPred4(VpxPredFunc const *pred_funcs) {
   static const int kNumVp9IntraFuncs = 13;
   static const char *const kSignatures[kNumVp9IntraFuncs] = {
-    "4334156168b34ab599d9b5b30f522fe9",
-    "bc4649d5ba47c7ff178d92e475960fb0",
-    "8d316e5933326dcac24e1064794b5d12",
-    "a27270fed024eafd762c95de85f4da51",
-    "c33dff000d4256c2b8f3bf9e9bab14d2",
-    "44d8cddc2ad8f79b8ed3306051722b4f",
-    "eb54839b2bad6699d8946f01ec041cd0",
-    "ecb0d56ae5f677ea45127ce9d5c058e4",
-    "0b7936841f6813da818275944895b574",
-    "9117972ef64f91a58ff73e1731c81db2",
-    "c56d5e8c729e46825f46dd5d3b5d508a",
-    "c0889e2039bcf7bcb5d2f33cdca69adc",
+    "4334156168b34ab599d9b5b30f522fe9", "bc4649d5ba47c7ff178d92e475960fb0",
+    "8d316e5933326dcac24e1064794b5d12", "a27270fed024eafd762c95de85f4da51",
+    "c33dff000d4256c2b8f3bf9e9bab14d2", "44d8cddc2ad8f79b8ed3306051722b4f",
+    "eb54839b2bad6699d8946f01ec041cd0", "ecb0d56ae5f677ea45127ce9d5c058e4",
+    "0b7936841f6813da818275944895b574", "9117972ef64f91a58ff73e1731c81db2",
+    "c56d5e8c729e46825f46dd5d3b5d508a", "c0889e2039bcf7bcb5d2f33cdca69adc",
     "309a618577b27c648f9c5ee45252bc8f",
   };
   TestIntraPred("Intra4", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
@@ -103,18 +97,12 @@ void TestIntraPred4(VpxPredFunc const *pred_funcs) {
 void TestIntraPred8(VpxPredFunc const *pred_funcs) {
   static const int kNumVp9IntraFuncs = 13;
   static const char *const kSignatures[kNumVp9IntraFuncs] = {
-    "7694ddeeefed887faf9d339d18850928",
-    "7d726b1213591b99f736be6dec65065b",
-    "19c5711281357a485591aaf9c96c0a67",
-    "ba6b66877a089e71cd938e3b8c40caac",
-    "802440c93317e0f8ba93fab02ef74265",
-    "9e09a47a15deb0b9d8372824f9805080",
-    "b7c2d8c662268c0c427da412d7b0311d",
-    "78339c1c60bb1d67d248ab8c4da08b7f",
-    "5c97d70f7d47de1882a6cd86c165c8a9",
-    "8182bf60688b42205acd95e59e967157",
-    "08323400005a297f16d7e57e7fe1eaac",
-    "95f7bfc262329a5849eda66d8f7c68ce",
+    "7694ddeeefed887faf9d339d18850928", "7d726b1213591b99f736be6dec65065b",
+    "19c5711281357a485591aaf9c96c0a67", "ba6b66877a089e71cd938e3b8c40caac",
+    "802440c93317e0f8ba93fab02ef74265", "9e09a47a15deb0b9d8372824f9805080",
+    "b7c2d8c662268c0c427da412d7b0311d", "78339c1c60bb1d67d248ab8c4da08b7f",
+    "5c97d70f7d47de1882a6cd86c165c8a9", "8182bf60688b42205acd95e59e967157",
+    "08323400005a297f16d7e57e7fe1eaac", "95f7bfc262329a5849eda66d8f7c68ce",
     "815b75c8e0d91cc1ae766dc5d3e445a3",
   };
   TestIntraPred("Intra8", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
@@ -124,18 +112,12 @@ void TestIntraPred8(VpxPredFunc const *pred_funcs) {
 void TestIntraPred16(VpxPredFunc const *pred_funcs) {
   static const int kNumVp9IntraFuncs = 13;
   static const char *const kSignatures[kNumVp9IntraFuncs] = {
-    "b40dbb555d5d16a043dc361e6694fe53",
-    "fb08118cee3b6405d64c1fd68be878c6",
-    "6c190f341475c837cc38c2e566b64875",
-    "db5c34ccbe2c7f595d9b08b0dc2c698c",
-    "a62cbfd153a1f0b9fed13e62b8408a7a",
-    "143df5b4c89335e281103f610f5052e4",
-    "d87feb124107cdf2cfb147655aa0bb3c",
-    "7841fae7d4d47b519322e6a03eeed9dc",
-    "f6ebed3f71cbcf8d6d0516ce87e11093",
-    "3cc480297dbfeed01a1c2d78dd03d0c5",
-    "b9f69fa6532b372c545397dcb78ef311",
-    "a8fe1c70432f09d0c20c67bdb6432c4d",
+    "b40dbb555d5d16a043dc361e6694fe53", "fb08118cee3b6405d64c1fd68be878c6",
+    "6c190f341475c837cc38c2e566b64875", "db5c34ccbe2c7f595d9b08b0dc2c698c",
+    "a62cbfd153a1f0b9fed13e62b8408a7a", "143df5b4c89335e281103f610f5052e4",
+    "d87feb124107cdf2cfb147655aa0bb3c", "7841fae7d4d47b519322e6a03eeed9dc",
+    "f6ebed3f71cbcf8d6d0516ce87e11093", "3cc480297dbfeed01a1c2d78dd03d0c5",
+    "b9f69fa6532b372c545397dcb78ef311", "a8fe1c70432f09d0c20c67bdb6432c4d",
     "b8a41aa968ec108af447af4217cba91b",
   };
   TestIntraPred("Intra16", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
@@ -145,18 +127,12 @@ void TestIntraPred16(VpxPredFunc const *pred_funcs) {
 void TestIntraPred32(VpxPredFunc const *pred_funcs) {
   static const int kNumVp9IntraFuncs = 13;
   static const char *const kSignatures[kNumVp9IntraFuncs] = {
-    "558541656d84f9ae7896db655826febe",
-    "b3587a1f9a01495fa38c8cd3c8e2a1bf",
-    "4c6501e64f25aacc55a2a16c7e8f0255",
-    "b3b01379ba08916ef6b1b35f7d9ad51c",
-    "0f1eb38b6cbddb3d496199ef9f329071",
-    "911c06efb9ed1c3b4c104b232b55812f",
-    "9225beb0ddfa7a1d24eaa1be430a6654",
-    "0a6d584a44f8db9aa7ade2e2fdb9fc9e",
-    "b01c9076525216925f3456f034fb6eee",
-    "d267e20ad9e5cd2915d1a47254d3d149",
-    "ed012a4a5da71f36c2393023184a0e59",
-    "f162b51ed618d28b936974cff4391da5",
+    "558541656d84f9ae7896db655826febe", "b3587a1f9a01495fa38c8cd3c8e2a1bf",
+    "4c6501e64f25aacc55a2a16c7e8f0255", "b3b01379ba08916ef6b1b35f7d9ad51c",
+    "0f1eb38b6cbddb3d496199ef9f329071", "911c06efb9ed1c3b4c104b232b55812f",
+    "9225beb0ddfa7a1d24eaa1be430a6654", "0a6d584a44f8db9aa7ade2e2fdb9fc9e",
+    "b01c9076525216925f3456f034fb6eee", "d267e20ad9e5cd2915d1a47254d3d149",
+    "ed012a4a5da71f36c2393023184a0e59", "f162b51ed618d28b936974cff4391da5",
     "9e1370c6d42e08d357d9612c93a71cfc",
   };
   TestIntraPred("Intra32", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
@@ -167,13 +143,13 @@ void TestIntraPred32(VpxPredFunc const *pred_funcs) {
 
 // Defines a test case for |arch| (e.g., C, SSE2, ...) passing the predictors
 // to |test_func|. The test name is 'arch.test_func', e.g., C.TestIntraPred4.
-#define INTRA_PRED_TEST(arch, test_func, dc, dc_left, dc_top, dc_128, v, h, \
-                        d45, d135, d117, d153, d207, d63, tm)               \
-  TEST(arch, test_func) {                                                   \
-    static const VpxPredFunc vpx_intra_pred[] = {                           \
-        dc,   dc_left, dc_top, dc_128, v,   h, d45,                         \
-        d135, d117,    d153,   d207,   d63, tm};                            \
-    test_func(vpx_intra_pred);                                              \
+#define INTRA_PRED_TEST(arch, test_func, dc, dc_left, dc_top, dc_128, v, h,   \
+                        d45, d135, d117, d153, d207, d63, tm)                 \
+  TEST(arch, test_func) {                                                     \
+    static const VpxPredFunc vpx_intra_pred[] = {                             \
+      dc, dc_left, dc_top, dc_128, v, h, d45, d135, d117, d153, d207, d63, tm \
+    };                                                                        \
+    test_func(vpx_intra_pred);                                                \
   }
 
 // -----------------------------------------------------------------------------
@@ -196,8 +172,8 @@ INTRA_PRED_TEST(SSE2, TestIntraPred4, vpx_dc_predictor_4x4_sse2,
 #endif  // HAVE_SSE2 && CONFIG_USE_X86INC
 
 #if HAVE_SSSE3 && CONFIG_USE_X86INC
-INTRA_PRED_TEST(SSSE3, TestIntraPred4, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_d45_predictor_4x4_ssse3, NULL, NULL,
+INTRA_PRED_TEST(SSSE3, TestIntraPred4, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_d45_predictor_4x4_ssse3, NULL, NULL,
                 vpx_d153_predictor_4x4_ssse3, vpx_d207_predictor_4x4_ssse3,
                 vpx_d63_predictor_4x4_ssse3, NULL)
 #endif  // HAVE_SSSE3 && CONFIG_USE_X86INC
@@ -221,8 +197,8 @@ INTRA_PRED_TEST(NEON, TestIntraPred4, vpx_dc_predictor_4x4_neon,
 INTRA_PRED_TEST(MSA, TestIntraPred4, vpx_dc_predictor_4x4_msa,
                 vpx_dc_left_predictor_4x4_msa, vpx_dc_top_predictor_4x4_msa,
                 vpx_dc_128_predictor_4x4_msa, vpx_v_predictor_4x4_msa,
-                vpx_h_predictor_4x4_msa, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_tm_predictor_4x4_msa)
+                vpx_h_predictor_4x4_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_tm_predictor_4x4_msa)
 #endif  // HAVE_MSA
 
 // -----------------------------------------------------------------------------
@@ -240,13 +216,13 @@ INTRA_PRED_TEST(C, TestIntraPred8, vpx_dc_predictor_8x8_c,
 INTRA_PRED_TEST(SSE2, TestIntraPred8, vpx_dc_predictor_8x8_sse2,
                 vpx_dc_left_predictor_8x8_sse2, vpx_dc_top_predictor_8x8_sse2,
                 vpx_dc_128_predictor_8x8_sse2, vpx_v_predictor_8x8_sse2,
-                vpx_h_predictor_8x8_sse2, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_tm_predictor_8x8_sse2)
+                vpx_h_predictor_8x8_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_tm_predictor_8x8_sse2)
 #endif  // HAVE_SSE2 && CONFIG_USE_X86INC
 
 #if HAVE_SSSE3 && CONFIG_USE_X86INC
-INTRA_PRED_TEST(SSSE3, TestIntraPred8, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_d45_predictor_8x8_ssse3, NULL, NULL,
+INTRA_PRED_TEST(SSSE3, TestIntraPred8, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_d45_predictor_8x8_ssse3, NULL, NULL,
                 vpx_d153_predictor_8x8_ssse3, vpx_d207_predictor_8x8_ssse3,
                 vpx_d63_predictor_8x8_ssse3, NULL)
 #endif  // HAVE_SSSE3 && CONFIG_USE_X86INC
@@ -270,8 +246,8 @@ INTRA_PRED_TEST(NEON, TestIntraPred8, vpx_dc_predictor_8x8_neon,
 INTRA_PRED_TEST(MSA, TestIntraPred8, vpx_dc_predictor_8x8_msa,
                 vpx_dc_left_predictor_8x8_msa, vpx_dc_top_predictor_8x8_msa,
                 vpx_dc_128_predictor_8x8_msa, vpx_v_predictor_8x8_msa,
-                vpx_h_predictor_8x8_msa, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_tm_predictor_8x8_msa)
+                vpx_h_predictor_8x8_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_tm_predictor_8x8_msa)
 #endif  // HAVE_MSA
 
 // -----------------------------------------------------------------------------
@@ -295,11 +271,10 @@ INTRA_PRED_TEST(SSE2, TestIntraPred16, vpx_dc_predictor_16x16_sse2,
 #endif  // HAVE_SSE2 && CONFIG_USE_X86INC
 
 #if HAVE_SSSE3 && CONFIG_USE_X86INC
-INTRA_PRED_TEST(SSSE3, TestIntraPred16, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_d45_predictor_16x16_ssse3,
-                NULL, NULL, vpx_d153_predictor_16x16_ssse3,
-                vpx_d207_predictor_16x16_ssse3, vpx_d63_predictor_16x16_ssse3,
-                NULL)
+INTRA_PRED_TEST(SSSE3, TestIntraPred16, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_d45_predictor_16x16_ssse3, NULL, NULL,
+                vpx_d153_predictor_16x16_ssse3, vpx_d207_predictor_16x16_ssse3,
+                vpx_d63_predictor_16x16_ssse3, NULL)
 #endif  // HAVE_SSSE3 && CONFIG_USE_X86INC
 
 #if HAVE_DSPR2
@@ -321,8 +296,8 @@ INTRA_PRED_TEST(NEON, TestIntraPred16, vpx_dc_predictor_16x16_neon,
 INTRA_PRED_TEST(MSA, TestIntraPred16, vpx_dc_predictor_16x16_msa,
                 vpx_dc_left_predictor_16x16_msa, vpx_dc_top_predictor_16x16_msa,
                 vpx_dc_128_predictor_16x16_msa, vpx_v_predictor_16x16_msa,
-                vpx_h_predictor_16x16_msa, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_tm_predictor_16x16_msa)
+                vpx_h_predictor_16x16_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_tm_predictor_16x16_msa)
 #endif  // HAVE_MSA
 
 // -----------------------------------------------------------------------------
@@ -341,13 +316,13 @@ INTRA_PRED_TEST(SSE2, TestIntraPred32, vpx_dc_predictor_32x32_sse2,
                 vpx_dc_left_predictor_32x32_sse2,
                 vpx_dc_top_predictor_32x32_sse2,
                 vpx_dc_128_predictor_32x32_sse2, vpx_v_predictor_32x32_sse2,
-                vpx_h_predictor_32x32_sse2, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_tm_predictor_32x32_sse2)
+                vpx_h_predictor_32x32_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_tm_predictor_32x32_sse2)
 #endif  // HAVE_SSE2 && CONFIG_USE_X86INC
 
 #if HAVE_SSSE3 && CONFIG_USE_X86INC
-INTRA_PRED_TEST(SSSE3, TestIntraPred32, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_d45_predictor_32x32_ssse3, NULL, NULL,
+INTRA_PRED_TEST(SSSE3, TestIntraPred32, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_d45_predictor_32x32_ssse3, NULL, NULL,
                 vpx_d153_predictor_32x32_ssse3, vpx_d207_predictor_32x32_ssse3,
                 vpx_d63_predictor_32x32_ssse3, NULL)
 #endif  // HAVE_SSSE3 && CONFIG_USE_X86INC
@@ -365,8 +340,8 @@ INTRA_PRED_TEST(NEON, TestIntraPred32, vpx_dc_predictor_32x32_neon,
 INTRA_PRED_TEST(MSA, TestIntraPred32, vpx_dc_predictor_32x32_msa,
                 vpx_dc_left_predictor_32x32_msa, vpx_dc_top_predictor_32x32_msa,
                 vpx_dc_128_predictor_32x32_msa, vpx_v_predictor_32x32_msa,
-                vpx_h_predictor_32x32_msa, NULL, NULL, NULL, NULL, NULL,
-                NULL, vpx_tm_predictor_32x32_msa)
+                vpx_h_predictor_32x32_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+                vpx_tm_predictor_32x32_msa)
 #endif  // HAVE_MSA
 
 #include "test/test_libvpx.cc"
diff --git a/test/test_libvpx.cc b/test/test_libvpx.cc
index b59d876092e2ed15b6eaffa99ad01faa3d666946..9867f9d4f3c84d9cbbd298b354a8a845b1b173a6 100644
--- a/test/test_libvpx.cc
+++ b/test/test_libvpx.cc
@@ -38,22 +38,16 @@ int main(int argc, char **argv) {
 
 #if ARCH_X86 || ARCH_X86_64
   const int simd_caps = x86_simd_caps();
-  if (!(simd_caps & HAS_MMX))
-    append_negative_gtest_filter(":MMX.*:MMX/*");
-  if (!(simd_caps & HAS_SSE))
-    append_negative_gtest_filter(":SSE.*:SSE/*");
-  if (!(simd_caps & HAS_SSE2))
-    append_negative_gtest_filter(":SSE2.*:SSE2/*");
-  if (!(simd_caps & HAS_SSE3))
-    append_negative_gtest_filter(":SSE3.*:SSE3/*");
+  if (!(simd_caps & HAS_MMX)) append_negative_gtest_filter(":MMX.*:MMX/*");
+  if (!(simd_caps & HAS_SSE)) append_negative_gtest_filter(":SSE.*:SSE/*");
+  if (!(simd_caps & HAS_SSE2)) append_negative_gtest_filter(":SSE2.*:SSE2/*");
+  if (!(simd_caps & HAS_SSE3)) append_negative_gtest_filter(":SSE3.*:SSE3/*");
   if (!(simd_caps & HAS_SSSE3))
     append_negative_gtest_filter(":SSSE3.*:SSSE3/*");
   if (!(simd_caps & HAS_SSE4_1))
     append_negative_gtest_filter(":SSE4_1.*:SSE4_1/*");
-  if (!(simd_caps & HAS_AVX))
-    append_negative_gtest_filter(":AVX.*:AVX/*");
-  if (!(simd_caps & HAS_AVX2))
-    append_negative_gtest_filter(":AVX2.*:AVX2/*");
+  if (!(simd_caps & HAS_AVX)) append_negative_gtest_filter(":AVX.*:AVX/*");
+  if (!(simd_caps & HAS_AVX2)) append_negative_gtest_filter(":AVX2.*:AVX2/*");
 #endif  // ARCH_X86 || ARCH_X86_64
 
 #if !CONFIG_SHARED
diff --git a/test/tile_independence_test.cc b/test/tile_independence_test.cc
index 41a968cef893d2744e2f4cb2831881cf43ca3eb3..c28ae88003dda3321078a3dbfa4258d49f851d5a 100644
--- a/test/tile_independence_test.cc
+++ b/test/tile_independence_test.cc
@@ -24,9 +24,7 @@ class TileIndependenceTest : public ::libvpx_test::EncoderTest,
                              public ::libvpx_test::CodecTestWithParam<int> {
  protected:
   TileIndependenceTest()
-      : EncoderTest(GET_PARAM(0)),
-        md5_fw_order_(),
-        md5_inv_order_(),
+      : EncoderTest(GET_PARAM(0)), md5_fw_order_(), md5_inv_order_(),
         n_tiles_(GET_PARAM(1)) {
     init_flags_ = VPX_CODEC_USE_PSNR;
     vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
@@ -58,7 +56,7 @@ class TileIndependenceTest : public ::libvpx_test::EncoderTest,
   void UpdateMD5(::libvpx_test::Decoder *dec, const vpx_codec_cx_pkt_t *pkt,
                  ::libvpx_test::MD5 *md5) {
     const vpx_codec_err_t res = dec->DecodeFrame(
-        reinterpret_cast<uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz);
+        reinterpret_cast<uint8_t *>(pkt->data.frame.buf), pkt->data.frame.sz);
     if (res != VPX_CODEC_OK) {
       abort_ = true;
       ASSERT_EQ(VPX_CODEC_OK, res);
diff --git a/test/util.h b/test/util.h
index b27bffa94be61b5b3f8c2ae43508ee314121d796..0ef2ad8a50ffc05f2ec1f01dd2b3191b2526351f 100644
--- a/test/util.h
+++ b/test/util.h
@@ -17,14 +17,13 @@
 #include "vpx/vpx_image.h"
 
 // Macros
-#define GET_PARAM(k) std::tr1::get< k >(GetParam())
+#define GET_PARAM(k) std::tr1::get<k>(GetParam())
 
 inline double compute_psnr(const vpx_image_t *img1, const vpx_image_t *img2) {
-  assert((img1->fmt == img2->fmt) &&
-         (img1->d_w == img2->d_w) &&
+  assert((img1->fmt == img2->fmt) && (img1->d_w == img2->d_w) &&
          (img1->d_h == img2->d_h));
 
-  const unsigned int width_y  = img1->d_w;
+  const unsigned int width_y = img1->d_w;
   const unsigned int height_y = img1->d_h;
   unsigned int i, j;
 
diff --git a/test/variance_test.cc b/test/variance_test.cc
index 7e2ce0a092aac3c32404dcf05e73718d96acf254..2a33ed81523865dafa8dbff1079f874d8bb4ebd9 100644
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -41,7 +41,6 @@ typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
                                       const uint8_t *b, int b_stride);
 typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
 
-
 using ::std::tr1::get;
 using ::std::tr1::make_tuple;
 using ::std::tr1::tuple;
@@ -61,8 +60,7 @@ static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
       *se = (*se + 2) >> 2;
       break;
     case VPX_BITS_8:
-    default:
-      break;
+    default: break;
   }
 }
 
@@ -74,8 +72,8 @@ static unsigned int mb_ss_ref(const int16_t *src) {
   return res;
 }
 
-static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
-                             int l2w, int l2h, int src_stride_coeff,
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref, int l2w,
+                             int l2h, int src_stride_coeff,
                              int ref_stride_coeff, uint32_t *sse_ptr,
                              bool use_high_bit_depth_,
                              vpx_bit_depth_t bit_depth) {
@@ -103,9 +101,8 @@ static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
   }
   RoundHighBitDepth(bit_depth, &se, &sse);
   *sse_ptr = static_cast<uint32_t>(sse);
-  return static_cast<uint32_t>(sse -
-                               ((static_cast<int64_t>(se) * se) >>
-                                (l2w + l2h)));
+  return static_cast<uint32_t>(
+      sse - ((static_cast<int64_t>(se) * se) >> (l2w + l2h)));
 }
 
 /* The subpel reference functions differ from the codec version in one aspect:
@@ -116,8 +113,7 @@ static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
  */
 static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
                                     int l2w, int l2h, int xoff, int yoff,
-                                    uint32_t *sse_ptr,
-                                    bool use_high_bit_depth_,
+                                    uint32_t *sse_ptr, bool use_high_bit_depth_,
                                     vpx_bit_depth_t bit_depth) {
   int64_t se = 0;
   uint64_t sse = 0;
@@ -161,18 +157,15 @@ static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
   }
   RoundHighBitDepth(bit_depth, &se, &sse);
   *sse_ptr = static_cast<uint32_t>(sse);
-  return static_cast<uint32_t>(sse -
-                               ((static_cast<int64_t>(se) * se) >>
-                                (l2w + l2h)));
+  return static_cast<uint32_t>(
+      sse - ((static_cast<int64_t>(se) * se) >> (l2w + l2h)));
 }
 
 class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
  public:
   SumOfSquaresTest() : func_(GetParam()) {}
 
-  virtual ~SumOfSquaresTest() {
-    libvpx_test::ClearSystemState();
-  }
+  virtual ~SumOfSquaresTest() { libvpx_test::ClearSystemState(); }
 
  protected:
   void ConstTest();
@@ -208,14 +201,13 @@ void SumOfSquaresTest::RefTest() {
   }
 }
 
-template<typename VarianceFunctionType>
-class VarianceTest
-    : public ::testing::TestWithParam<tuple<int, int,
-                                            VarianceFunctionType, int> > {
+template <typename VarianceFunctionType>
+class VarianceTest : public ::testing::TestWithParam<
+                         tuple<int, int, VarianceFunctionType, int> > {
  public:
   virtual void SetUp() {
-    const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
-    log2width_  = get<0>(params);
+    const tuple<int, int, VarianceFunctionType, int> &params = this->GetParam();
+    log2width_ = get<0>(params);
     width_ = 1 << log2width_;
     log2height_ = get<1>(params);
     height_ = 1 << log2height_;
@@ -276,7 +268,7 @@ class VarianceTest
   VarianceFunctionType variance_;
 };
 
-template<typename VarianceFunctionType>
+template <typename VarianceFunctionType>
 void VarianceTest<VarianceFunctionType>::ZeroTest() {
   for (int i = 0; i <= 255; ++i) {
     if (!use_high_bit_depth_) {
@@ -292,48 +284,47 @@ void VarianceTest<VarianceFunctionType>::ZeroTest() {
         memset(ref_, j, block_size_);
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
-        vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j  << (bit_depth_ - 8),
+        vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
                      block_size_);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
       }
       unsigned int sse;
       unsigned int var;
-      ASM_REGISTER_STATE_CHECK(
-          var = variance_(src_, width_, ref_, width_, &sse));
+      ASM_REGISTER_STATE_CHECK(var =
+                                   variance_(src_, width_, ref_, width_, &sse));
       EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
     }
   }
 }
 
-template<typename VarianceFunctionType>
+template <typename VarianceFunctionType>
 void VarianceTest<VarianceFunctionType>::RefTest() {
   for (int i = 0; i < 10; ++i) {
     for (int j = 0; j < block_size_; j++) {
-    if (!use_high_bit_depth_) {
-      src_[j] = rnd_.Rand8();
-      ref_[j] = rnd_.Rand8();
+      if (!use_high_bit_depth_) {
+        src_[j] = rnd_.Rand8();
+        ref_[j] = rnd_.Rand8();
 #if CONFIG_VPX_HIGHBITDEPTH
-    } else {
-      CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
-      CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+      } else {
+        CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+        CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
 #endif  // CONFIG_VPX_HIGHBITDEPTH
-    }
+      }
     }
     unsigned int sse1, sse2;
     unsigned int var1;
     const int stride_coeff = 1;
-    ASM_REGISTER_STATE_CHECK(
-        var1 = variance_(src_, width_, ref_, width_, &sse1));
-    const unsigned int var2 = variance_ref(src_, ref_, log2width_,
-                                           log2height_, stride_coeff,
-                                           stride_coeff, &sse2,
-                                           use_high_bit_depth_, bit_depth_);
+    ASM_REGISTER_STATE_CHECK(var1 =
+                                 variance_(src_, width_, ref_, width_, &sse1));
+    const unsigned int var2 =
+        variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+                     stride_coeff, &sse2, use_high_bit_depth_, bit_depth_);
     EXPECT_EQ(sse1, sse2);
     EXPECT_EQ(var1, var2);
   }
 }
 
-template<typename VarianceFunctionType>
+template <typename VarianceFunctionType>
 void VarianceTest<VarianceFunctionType>::RefStrideTest() {
   for (int i = 0; i < 10; ++i) {
     int ref_stride_coeff = i % 2;
@@ -354,19 +345,18 @@ void VarianceTest<VarianceFunctionType>::RefStrideTest() {
     unsigned int sse1, sse2;
     unsigned int var1;
 
-    ASM_REGISTER_STATE_CHECK(
-        var1 = variance_(src_, width_ * src_stride_coeff,
-                         ref_, width_ * ref_stride_coeff, &sse1));
-    const unsigned int var2 = variance_ref(src_, ref_, log2width_,
-                                           log2height_, src_stride_coeff,
-                                           ref_stride_coeff, &sse2,
-                                           use_high_bit_depth_, bit_depth_);
+    ASM_REGISTER_STATE_CHECK(var1 = variance_(src_, width_ * src_stride_coeff,
+                                              ref_, width_ * ref_stride_coeff,
+                                              &sse1));
+    const unsigned int var2 =
+        variance_ref(src_, ref_, log2width_, log2height_, src_stride_coeff,
+                     ref_stride_coeff, &sse2, use_high_bit_depth_, bit_depth_);
     EXPECT_EQ(sse1, sse2);
     EXPECT_EQ(var1, var2);
   }
 }
 
-template<typename VarianceFunctionType>
+template <typename VarianceFunctionType>
 void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
   const int half = block_size_ / 2;
   if (!use_high_bit_depth_) {
@@ -388,13 +378,13 @@ void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
   EXPECT_EQ(expected, var);
 }
 
-template<typename MseFunctionType>
+template <typename MseFunctionType>
 class MseTest
     : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
  public:
   virtual void SetUp() {
-    const tuple<int, int, MseFunctionType>& params = this->GetParam();
-    log2width_  = get<0>(params);
+    const tuple<int, int, MseFunctionType> &params = this->GetParam();
+    log2width_ = get<0>(params);
     width_ = 1 << log2width_;
     log2height_ = get<1>(params);
     height_ = 1 << log2height_;
@@ -421,15 +411,15 @@ class MseTest
   void MaxTest_sse();
 
   ACMRandom rnd;
-  uint8_t* src_;
-  uint8_t* ref_;
+  uint8_t *src_;
+  uint8_t *ref_;
   int width_, log2width_;
   int height_, log2height_;
   int block_size_;
   MseFunctionType mse_;
 };
 
-template<typename MseFunctionType>
+template <typename MseFunctionType>
 void MseTest<MseFunctionType>::RefTest_mse() {
   for (int i = 0; i < 10; ++i) {
     for (int j = 0; j < block_size_; j++) {
@@ -445,7 +435,7 @@ void MseTest<MseFunctionType>::RefTest_mse() {
   }
 }
 
-template<typename MseFunctionType>
+template <typename MseFunctionType>
 void MseTest<MseFunctionType>::RefTest_sse() {
   for (int i = 0; i < 10; ++i) {
     for (int j = 0; j < block_size_; j++) {
@@ -462,7 +452,7 @@ void MseTest<MseFunctionType>::RefTest_sse() {
   }
 }
 
-template<typename MseFunctionType>
+template <typename MseFunctionType>
 void MseTest<MseFunctionType>::MaxTest_mse() {
   memset(src_, 255, block_size_);
   memset(ref_, 0, block_size_);
@@ -472,7 +462,7 @@ void MseTest<MseFunctionType>::MaxTest_mse() {
   EXPECT_EQ(expected, sse);
 }
 
-template<typename MseFunctionType>
+template <typename MseFunctionType>
 void MseTest<MseFunctionType>::MaxTest_sse() {
   memset(src_, 255, block_size_);
   memset(ref_, 0, block_size_);
@@ -482,11 +472,9 @@ void MseTest<MseFunctionType>::MaxTest_sse() {
   EXPECT_EQ(expected, var);
 }
 
-static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
-                                        const uint8_t *src,
-                                        const uint8_t *second_pred,
-                                        int l2w, int l2h,
-                                        int xoff, int yoff,
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref, const uint8_t *src,
+                                        const uint8_t *second_pred, int l2w,
+                                        int l2h, int xoff, int yoff,
                                         uint32_t *sse_ptr,
                                         bool use_high_bit_depth,
                                         vpx_bit_depth_t bit_depth) {
@@ -509,14 +497,15 @@ static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
         const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
         const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
         const int r = a + (((b - a) * yoff + 8) >> 4);
-        const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+        const int diff =
+            ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
         se += diff;
         sse += diff * diff;
 #if CONFIG_VPX_HIGHBITDEPTH
       } else {
         uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
         uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
-        uint16_t *sec16   = CONVERT_TO_SHORTPTR(second_pred);
+        uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
         const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
         const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
         const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
@@ -533,32 +522,31 @@ static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
   }
   RoundHighBitDepth(bit_depth, &se, &sse);
   *sse_ptr = static_cast<uint32_t>(sse);
-  return static_cast<uint32_t>(sse -
-                               ((static_cast<int64_t>(se) * se) >>
-                                (l2w + l2h)));
+  return static_cast<uint32_t>(
+      sse - ((static_cast<int64_t>(se) * se) >> (l2w + l2h)));
 }
 
-template<typename SubpelVarianceFunctionType>
+template <typename SubpelVarianceFunctionType>
 class SubpelVarianceTest
-    : public ::testing::TestWithParam<tuple<int, int,
-                                            SubpelVarianceFunctionType, int> > {
+    : public ::testing::TestWithParam<
+          tuple<int, int, SubpelVarianceFunctionType, int> > {
  public:
   virtual void SetUp() {
-    const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+    const tuple<int, int, SubpelVarianceFunctionType, int> &params =
         this->GetParam();
-    log2width_  = get<0>(params);
+    log2width_ = get<0>(params);
     width_ = 1 << log2width_;
     log2height_ = get<1>(params);
     height_ = 1 << log2height_;
     subpel_variance_ = get<2>(params);
     if (get<3>(params)) {
-      bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+      bit_depth_ = (vpx_bit_depth_t)get<3>(params);
       use_high_bit_depth_ = true;
     } else {
       bit_depth_ = VPX_BITS_8;
       use_high_bit_depth_ = false;
     }
-    mask_ = (1 << bit_depth_)-1;
+    mask_ = (1 << bit_depth_) - 1;
 
     rnd_.Reset(ACMRandom::DeterministicSeed());
     block_size_ = width_ * height_;
@@ -568,14 +556,12 @@ class SubpelVarianceTest
       ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
 #if CONFIG_VPX_HIGHBITDEPTH
     } else {
-      src_ = CONVERT_TO_BYTEPTR(
-          reinterpret_cast<uint16_t *>(
-              vpx_memalign(16, block_size_*sizeof(uint16_t))));
-      sec_ = CONVERT_TO_BYTEPTR(
-          reinterpret_cast<uint16_t *>(
-              vpx_memalign(16, block_size_*sizeof(uint16_t))));
-      ref_ = CONVERT_TO_BYTEPTR(
-          new uint16_t[block_size_ + width_ + height_ + 1]);
+      src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+          vpx_memalign(16, block_size_ * sizeof(uint16_t))));
+      sec_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+          vpx_memalign(16, block_size_ * sizeof(uint16_t))));
+      ref_ =
+          CONVERT_TO_BYTEPTR(new uint16_t[block_size_ + width_ + height_ + 1]);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
     }
     ASSERT_TRUE(src_ != NULL);
@@ -610,11 +596,11 @@ class SubpelVarianceTest
   vpx_bit_depth_t bit_depth_;
   int width_, log2width_;
   int height_, log2height_;
-  int block_size_,  mask_;
+  int block_size_, mask_;
   SubpelVarianceFunctionType subpel_variance_;
 };
 
-template<typename SubpelVarianceFunctionType>
+template <typename SubpelVarianceFunctionType>
 void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
   for (int x = 0; x < 8; ++x) {
     for (int y = 0; y < 8; ++y) {
@@ -637,20 +623,18 @@ void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
       }
       unsigned int sse1, sse2;
       unsigned int var1;
-      ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
-                                                       src_, width_, &sse1));
-      const unsigned int var2 = subpel_variance_ref(ref_, src_,
-                                                    log2width_, log2height_,
-                                                    x, y, &sse2,
-                                                    use_high_bit_depth_,
-                                                    bit_depth_);
+      ASM_REGISTER_STATE_CHECK(
+          var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+      const unsigned int var2 =
+          subpel_variance_ref(ref_, src_, log2width_, log2height_, x, y, &sse2,
+                              use_high_bit_depth_, bit_depth_);
       EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
       EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
     }
   }
 }
 
-template<typename SubpelVarianceFunctionType>
+template <typename SubpelVarianceFunctionType>
 void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
   // Compare against reference.
   // Src: Set the first half of values to 0, the second half to the maximum.
@@ -677,15 +661,15 @@ void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
       ASM_REGISTER_STATE_CHECK(
           var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
       const unsigned int var2 =
-          subpel_variance_ref(ref_, src_, log2width_, log2height_,
-                              x, y, &sse2, use_high_bit_depth_, bit_depth_);
+          subpel_variance_ref(ref_, src_, log2width_, log2height_, x, y, &sse2,
+                              use_high_bit_depth_, bit_depth_);
       EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
       EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
     }
   }
 }
 
-template<>
+template <>
 void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
   for (int x = 0; x < 8; ++x) {
     for (int y = 0; y < 8; ++y) {
@@ -710,14 +694,12 @@ void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
       }
       unsigned int sse1, sse2;
       unsigned int var1;
-      ASM_REGISTER_STATE_CHECK(
-          var1 = subpel_variance_(ref_, width_ + 1, x, y,
-                                  src_, width_, &sse1, sec_));
-      const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
-                                                        log2width_, log2height_,
-                                                        x, y, &sse2,
-                                                        use_high_bit_depth_,
-                                                        bit_depth_);
+      ASM_REGISTER_STATE_CHECK(var1 =
+                                   subpel_variance_(ref_, width_ + 1, x, y,
+                                                    src_, width_, &sse1, sec_));
+      const unsigned int var2 =
+          subpel_avg_variance_ref(ref_, src_, sec_, log2width_, log2height_, x,
+                                  y, &sse2, use_high_bit_depth_, bit_depth_);
       EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
       EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
     }
@@ -809,8 +791,7 @@ INSTANTIATE_TEST_CASE_P(
 typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
 typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
 typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
-typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
-    VpxHBDSubpelAvgVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxHBDSubpelAvgVarianceTest;
 
 TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
 TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
diff --git a/test/video_source.h b/test/video_source.h
index ade323e7c3eabbfb4a56e3da41117015c921ac09..94a95ce8d36229220968837e442956c6e414420b 100644
--- a/test/video_source.h
+++ b/test/video_source.h
@@ -51,7 +51,7 @@ static std::string GetDataPath() {
 #undef TO_STRING
 #undef STRINGIFY
 
-inline FILE *OpenTestDataFile(const std::string& file_name) {
+inline FILE *OpenTestDataFile(const std::string &file_name) {
   const std::string path_to_source = GetDataPath() + "/" + file_name;
   return fopen(path_to_source.c_str(), "rb");
 }
@@ -76,21 +76,15 @@ static FILE *GetTempOutFile(std::string *file_name) {
 
 class TempOutFile {
  public:
-  TempOutFile() {
-    file_ = GetTempOutFile(&file_name_);
-  }
+  TempOutFile() { file_ = GetTempOutFile(&file_name_); }
   ~TempOutFile() {
     CloseFile();
     if (!file_name_.empty()) {
       EXPECT_EQ(0, remove(file_name_.c_str()));
     }
   }
-  FILE *file() {
-    return file_;
-  }
-  const std::string& file_name() {
-    return file_name_;
-  }
+  FILE *file() { return file_; }
+  const std::string &file_name() { return file_name_; }
 
  protected:
   void CloseFile() {
@@ -134,14 +128,10 @@ class VideoSource {
   virtual unsigned int limit() const = 0;
 };
 
-
 class DummyVideoSource : public VideoSource {
  public:
   DummyVideoSource()
-      : img_(NULL),
-        limit_(100),
-        width_(80),
-        height_(64),
+      : img_(NULL), limit_(100), width_(80), height_(64),
         format_(VPX_IMG_FMT_I420) {
     ReallocImage();
   }
@@ -158,9 +148,7 @@ class DummyVideoSource : public VideoSource {
     FillFrame();
   }
 
-  virtual vpx_image_t *img() const {
-    return (frame_ < limit_) ? img_ : NULL;
-  }
+  virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; }
 
   // Models a stream where Timebase = 1/FPS, so pts == frame.
   virtual vpx_codec_pts_t pts() const { return frame_; }
@@ -168,7 +156,7 @@ class DummyVideoSource : public VideoSource {
   virtual unsigned long duration() const { return 1; }
 
   virtual vpx_rational_t timebase() const {
-    const vpx_rational_t t = {1, 30};
+    const vpx_rational_t t = { 1, 30 };
     return t;
   }
 
@@ -176,9 +164,7 @@ class DummyVideoSource : public VideoSource {
 
   virtual unsigned int limit() const { return limit_; }
 
-  void set_limit(unsigned int limit) {
-    limit_ = limit;
-  }
+  void set_limit(unsigned int limit) { limit_ = limit; }
 
   void SetSize(unsigned int width, unsigned int height) {
     if (width != width_ || height != height_) {
@@ -196,7 +182,9 @@ class DummyVideoSource : public VideoSource {
   }
 
  protected:
-  virtual void FillFrame() { if (img_) memset(img_->img_data, 0, raw_sz_); }
+  virtual void FillFrame() {
+    if (img_) memset(img_->img_data, 0, raw_sz_);
+  }
 
   void ReallocImage() {
     vpx_img_free(img_);
@@ -205,7 +193,7 @@ class DummyVideoSource : public VideoSource {
   }
 
   vpx_image_t *img_;
-  size_t       raw_sz_;
+  size_t raw_sz_;
   unsigned int limit_;
   unsigned int frame_;
   unsigned int width_;
@@ -213,12 +201,10 @@ class DummyVideoSource : public VideoSource {
   vpx_img_fmt_t format_;
 };
 
-
 class RandomVideoSource : public DummyVideoSource {
  public:
   RandomVideoSource(int seed = ACMRandom::DeterministicSeed())
-      : rnd_(seed),
-        seed_(seed) { }
+      : rnd_(seed), seed_(seed) {}
 
  protected:
   // Reset the RNG to get a matching stream for the second pass
@@ -233,8 +219,7 @@ class RandomVideoSource : public DummyVideoSource {
   virtual void FillFrame() {
     if (img_) {
       if (frame_ % 30 < 15)
-        for (size_t i = 0; i < raw_sz_; ++i)
-          img_->img_data[i] = rnd_.Rand8();
+        for (size_t i = 0; i < raw_sz_; ++i) img_->img_data[i] = rnd_.Rand8();
       else
         memset(img_->img_data, 0, raw_sz_);
     }
diff --git a/test/vp10_dct_test.cc b/test/vp10_dct_test.cc
index b2c301ae39dd8a5ba8c8b51233f73767c4119b1d..a287e0cecaf4acb6a663719e86a2713bb16b3978 100644
--- a/test/vp10_dct_test.cc
+++ b/test/vp10_dct_test.cc
@@ -33,8 +33,7 @@ void reference_dct_1d(const double *in, double *out, int size) {
     for (int n = 0; n < size; ++n) {
       out[k] += in[n] * cos(PI * (2 * n + 1) * k / (2 * size));
     }
-    if (k == 0)
-      out[k] = out[k] * kInvSqrt2;
+    if (k == 0) out[k] = out[k] * kInvSqrt2;
   }
 }
 
@@ -49,14 +48,14 @@ class TransTestBase {
 
  protected:
   void RunFwdAccuracyCheck() {
-    tran_low_t *input  = new tran_low_t[txfm_size_];
+    tran_low_t *input = new tran_low_t[txfm_size_];
     tran_low_t *output = new tran_low_t[txfm_size_];
-    double *ref_input  = new double[txfm_size_];
+    double *ref_input = new double[txfm_size_];
     double *ref_output = new double[txfm_size_];
 
     ACMRandom rnd(ACMRandom::DeterministicSeed());
     const int count_test_block = 5000;
-    for (int ti =  0; ti < count_test_block; ++ti) {
+    for (int ti = 0; ti < count_test_block; ++ti) {
       for (int ni = 0; ni < txfm_size_; ++ni) {
         input[ni] = rnd.Rand8() - rnd.Rand8();
         ref_input[ni] = static_cast<double>(input[ni]);
@@ -85,9 +84,8 @@ class TransTestBase {
 };
 
 typedef std::tr1::tuple<FdctFunc, FdctFuncRef, int, int> FdctParam;
-class Vp10FwdTxfm
-    : public TransTestBase,
-      public ::testing::TestWithParam<FdctParam> {
+class Vp10FwdTxfm : public TransTestBase,
+                    public ::testing::TestWithParam<FdctParam> {
  public:
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
@@ -98,14 +96,11 @@ class Vp10FwdTxfm
   virtual void TearDown() {}
 };
 
-TEST_P(Vp10FwdTxfm, RunFwdAccuracyCheck) {
-  RunFwdAccuracyCheck();
-}
+TEST_P(Vp10FwdTxfm, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
 
 INSTANTIATE_TEST_CASE_P(
     C, Vp10FwdTxfm,
-    ::testing::Values(
-        FdctParam(&fdct4, &reference_dct_1d, 4, 1),
-        FdctParam(&fdct8, &reference_dct_1d, 8, 1),
-        FdctParam(&fdct16, &reference_dct_1d, 16, 2)));
+    ::testing::Values(FdctParam(&fdct4, &reference_dct_1d, 4, 1),
+                      FdctParam(&fdct8, &reference_dct_1d, 8, 1),
+                      FdctParam(&fdct16, &reference_dct_1d, 16, 2)));
 }  // namespace
diff --git a/test/vp10_inv_txfm_test.cc b/test/vp10_inv_txfm_test.cc
index c49081ef851af46b80163acbe80555621e28e55c..aa91f1998e48064aa18d68fc523038e3f0d0c6eb 100644
--- a/test/vp10_inv_txfm_test.cc
+++ b/test/vp10_inv_txfm_test.cc
@@ -52,14 +52,14 @@ class TransTestBase {
 
  protected:
   void RunInvAccuracyCheck() {
-    tran_low_t *input  = new tran_low_t[txfm_size_];
+    tran_low_t *input = new tran_low_t[txfm_size_];
     tran_low_t *output = new tran_low_t[txfm_size_];
-    double *ref_input  = new double[txfm_size_];
+    double *ref_input = new double[txfm_size_];
     double *ref_output = new double[txfm_size_];
 
     ACMRandom rnd(ACMRandom::DeterministicSeed());
     const int count_test_block = 5000;
-    for (int ti =  0; ti < count_test_block; ++ti) {
+    for (int ti = 0; ti < count_test_block; ++ti) {
       for (int ni = 0; ni < txfm_size_; ++ni) {
         input[ni] = rnd.Rand8() - rnd.Rand8();
         ref_input[ni] = static_cast<double>(input[ni]);
@@ -88,9 +88,8 @@ class TransTestBase {
 };
 
 typedef std::tr1::tuple<IdctFunc, IdctFuncRef, int, int> IdctParam;
-class Vp10InvTxfm
-    : public TransTestBase,
-      public ::testing::TestWithParam<IdctParam> {
+class Vp10InvTxfm : public TransTestBase,
+                    public ::testing::TestWithParam<IdctParam> {
  public:
   virtual void SetUp() {
     fwd_txfm_ = GET_PARAM(0);
@@ -101,25 +100,19 @@ class Vp10InvTxfm
   virtual void TearDown() {}
 };
 
-TEST_P(Vp10InvTxfm, RunInvAccuracyCheck) {
-  RunInvAccuracyCheck();
-}
+TEST_P(Vp10InvTxfm, RunInvAccuracyCheck) { RunInvAccuracyCheck(); }
 
 INSTANTIATE_TEST_CASE_P(
     C, Vp10InvTxfm,
-    ::testing::Values(
-        IdctParam(&vp10_idct4_c, &reference_idct_1d, 4, 1),
-        IdctParam(&vp10_idct8_c, &reference_idct_1d, 8, 2),
-        IdctParam(&vp10_idct16_c, &reference_idct_1d, 16, 4),
-        IdctParam(&vp10_idct32_c, &reference_idct_1d, 32, 6))
-);
+    ::testing::Values(IdctParam(&vp10_idct4_c, &reference_idct_1d, 4, 1),
+                      IdctParam(&vp10_idct8_c, &reference_idct_1d, 8, 2),
+                      IdctParam(&vp10_idct16_c, &reference_idct_1d, 16, 4),
+                      IdctParam(&vp10_idct32_c, &reference_idct_1d, 32, 6)));
 
 typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
 typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
-typedef std::tr1::tuple<FwdTxfmFunc,
-                        InvTxfmFunc,
-                        InvTxfmFunc,
-                        TX_SIZE, int> PartialInvTxfmParam;
+typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, InvTxfmFunc, TX_SIZE, int>
+    PartialInvTxfmParam;
 const int kMaxNumCoeffs = 1024;
 class Vp10PartialIDctTest
     : public ::testing::TestWithParam<PartialInvTxfmParam> {
@@ -129,7 +122,7 @@ class Vp10PartialIDctTest
     ftxfm_ = GET_PARAM(0);
     full_itxfm_ = GET_PARAM(1);
     partial_itxfm_ = GET_PARAM(2);
-    tx_size_  = GET_PARAM(3);
+    tx_size_ = GET_PARAM(3);
     last_nonzero_ = GET_PARAM(4);
   }
 
@@ -147,21 +140,11 @@ TEST_P(Vp10PartialIDctTest, RunQuantCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   int size;
   switch (tx_size_) {
-    case TX_4X4:
-      size = 4;
-      break;
-    case TX_8X8:
-      size = 8;
-      break;
-    case TX_16X16:
-      size = 16;
-      break;
-    case TX_32X32:
-      size = 32;
-      break;
-    default:
-      FAIL() << "Wrong Size!";
-      break;
+    case TX_4X4: size = 4; break;
+    case TX_8X8: size = 8; break;
+    case TX_16X16: size = 16; break;
+    case TX_32X32: size = 32; break;
+    default: FAIL() << "Wrong Size!"; break;
   }
   DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
   DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
@@ -187,11 +170,9 @@ TEST_P(Vp10PartialIDctTest, RunQuantCheck) {
     for (int i = 0; i < count_test_block; ++i) {
       // Initialize a test block with input range [-255, 255].
       if (i == 0) {
-        for (int j = 0; j < block_size; ++j)
-          input_extreme_block[j] = 255;
+        for (int j = 0; j < block_size; ++j) input_extreme_block[j] = 255;
       } else if (i == 1) {
-        for (int j = 0; j < block_size; ++j)
-          input_extreme_block[j] = -255;
+        for (int j = 0; j < block_size; ++j) input_extreme_block[j] = -255;
       } else {
         for (int j = 0; j < block_size; ++j) {
           input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
@@ -203,8 +184,8 @@ TEST_P(Vp10PartialIDctTest, RunQuantCheck) {
       // quantization with maximum allowed step sizes
       test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
       for (int j = 1; j < last_nonzero_; ++j)
-        test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]]
-                         = (output_ref_block[j] / 1828) * 1828;
+        test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] =
+            (output_ref_block[j] / 1828) * 1828;
     }
 
     ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size));
@@ -213,8 +194,7 @@ TEST_P(Vp10PartialIDctTest, RunQuantCheck) {
     for (int j = 0; j < block_size; ++j) {
       const int diff = dst1[j] - dst2[j];
       const int error = diff * diff;
-      if (max_error < error)
-        max_error = error;
+      if (max_error < error) max_error = error;
     }
   }
 
@@ -226,21 +206,11 @@ TEST_P(Vp10PartialIDctTest, ResultsMatch) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   int size;
   switch (tx_size_) {
-    case TX_4X4:
-      size = 4;
-      break;
-    case TX_8X8:
-      size = 8;
-      break;
-    case TX_16X16:
-      size = 16;
-      break;
-    case TX_32X32:
-      size = 32;
-      break;
-    default:
-      FAIL() << "Wrong Size!";
-      break;
+    case TX_4X4: size = 4; break;
+    case TX_8X8: size = 8; break;
+    case TX_16X16: size = 16; break;
+    case TX_32X32: size = 32; break;
+    default: FAIL() << "Wrong Size!"; break;
   }
   DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
   DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
@@ -277,8 +247,7 @@ TEST_P(Vp10PartialIDctTest, ResultsMatch) {
     for (int j = 0; j < block_size; ++j) {
       const int diff = dst1[j] - dst2[j];
       const int error = diff * diff;
-      if (max_error < error)
-        max_error = error;
+      if (max_error < error) max_error = error;
     }
   }
 
@@ -289,33 +258,18 @@ using std::tr1::make_tuple;
 
 INSTANTIATE_TEST_CASE_P(
     C, Vp10PartialIDctTest,
-    ::testing::Values(
-        make_tuple(&vpx_fdct32x32_c,
-                   &vp10_idct32x32_1024_add_c,
-                   &vp10_idct32x32_34_add_c,
-                   TX_32X32, 34),
-        make_tuple(&vpx_fdct32x32_c,
-                   &vp10_idct32x32_1024_add_c,
-                   &vp10_idct32x32_1_add_c,
-                   TX_32X32, 1),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vp10_idct16x16_256_add_c,
-                   &vp10_idct16x16_10_add_c,
-                   TX_16X16, 10),
-        make_tuple(&vpx_fdct16x16_c,
-                   &vp10_idct16x16_256_add_c,
-                   &vp10_idct16x16_1_add_c,
-                   TX_16X16, 1),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vp10_idct8x8_64_add_c,
-                   &vp10_idct8x8_12_add_c,
-                   TX_8X8, 12),
-        make_tuple(&vpx_fdct8x8_c,
-                   &vp10_idct8x8_64_add_c,
-                   &vp10_idct8x8_1_add_c,
-                   TX_8X8, 1),
-        make_tuple(&vpx_fdct4x4_c,
-                   &vp10_idct4x4_16_add_c,
-                   &vp10_idct4x4_1_add_c,
-                   TX_4X4, 1)));
+    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vp10_idct32x32_1024_add_c,
+                                 &vp10_idct32x32_34_add_c, TX_32X32, 34),
+                      make_tuple(&vpx_fdct32x32_c, &vp10_idct32x32_1024_add_c,
+                                 &vp10_idct32x32_1_add_c, TX_32X32, 1),
+                      make_tuple(&vpx_fdct16x16_c, &vp10_idct16x16_256_add_c,
+                                 &vp10_idct16x16_10_add_c, TX_16X16, 10),
+                      make_tuple(&vpx_fdct16x16_c, &vp10_idct16x16_256_add_c,
+                                 &vp10_idct16x16_1_add_c, TX_16X16, 1),
+                      make_tuple(&vpx_fdct8x8_c, &vp10_idct8x8_64_add_c,
+                                 &vp10_idct8x8_12_add_c, TX_8X8, 12),
+                      make_tuple(&vpx_fdct8x8_c, &vp10_idct8x8_64_add_c,
+                                 &vp10_idct8x8_1_add_c, TX_8X8, 1),
+                      make_tuple(&vpx_fdct4x4_c, &vp10_idct4x4_16_add_c,
+                                 &vp10_idct4x4_1_add_c, TX_4X4, 1)));
 }  // namespace
diff --git a/test/webm_video_source.h b/test/webm_video_source.h
index 650bc52dce0915e7f8273b3074a542ad4caced75..20faa3a0eb2bc36f0a40e5d87b7d222b5f5f8efb 100644
--- a/test/webm_video_source.h
+++ b/test/webm_video_source.h
@@ -25,30 +25,23 @@ namespace libvpx_test {
 class WebMVideoSource : public CompressedVideoSource {
  public:
   explicit WebMVideoSource(const std::string &file_name)
-      : file_name_(file_name),
-        vpx_ctx_(new VpxInputContext()),
-        webm_ctx_(new WebmInputContext()),
-        buf_(NULL),
-        buf_sz_(0),
-        frame_(0),
-        end_of_file_(false) {
-  }
+      : file_name_(file_name), vpx_ctx_(new VpxInputContext()),
+        webm_ctx_(new WebmInputContext()), buf_(NULL), buf_sz_(0), frame_(0),
+        end_of_file_(false) {}
 
   virtual ~WebMVideoSource() {
-    if (vpx_ctx_->file != NULL)
-      fclose(vpx_ctx_->file);
+    if (vpx_ctx_->file != NULL) fclose(vpx_ctx_->file);
     webm_free(webm_ctx_);
     delete vpx_ctx_;
     delete webm_ctx_;
   }
 
-  virtual void Init() {
-  }
+  virtual void Init() {}
 
   virtual void Begin() {
     vpx_ctx_->file = OpenTestDataFile(file_name_);
     ASSERT_TRUE(vpx_ctx_->file != NULL) << "Input file open failed. Filename: "
-        << file_name_;
+                                        << file_name_;
 
     ASSERT_EQ(file_is_webm(webm_ctx_, vpx_ctx_), 1) << "file is not WebM";
 
@@ -81,9 +74,7 @@ class WebMVideoSource : public CompressedVideoSource {
     } while (!webm_ctx_->is_key_frame && !end_of_file_);
   }
 
-  virtual const uint8_t *cxdata() const {
-    return end_of_file_ ? NULL : buf_;
-  }
+  virtual const uint8_t *cxdata() const { return end_of_file_ ? NULL : buf_; }
   virtual size_t frame_size() const { return buf_sz_; }
   virtual unsigned int frame_number() const { return frame_; }
 
diff --git a/test/y4m_test.cc b/test/y4m_test.cc
index 91b30b12c2636870e1750beb23e509cc55d10c49..ced717a7c192e643cae4a2e9f3891b16fa142cf0 100644
--- a/test/y4m_test.cc
+++ b/test/y4m_test.cc
@@ -22,7 +22,7 @@ namespace {
 
 using std::string;
 
-static const unsigned int kWidth  = 160;
+static const unsigned int kWidth = 160;
 static const unsigned int kHeight = 90;
 static const unsigned int kFrames = 10;
 
@@ -34,24 +34,24 @@ struct Y4mTestParam {
 };
 
 const Y4mTestParam kY4mTestVectors[] = {
-  {"park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420,
-    "e5406275b9fc6bb3436c31d4a05c1cab"},
-  {"park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422,
-    "284a47a47133b12884ec3a14e959a0b6"},
-  {"park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444,
-    "90517ff33843d85de712fd4fe60dbed0"},
-  {"park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016,
-    "63f21f9f717d8b8631bd2288ee87137b"},
-  {"park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216,
-    "48ab51fb540aed07f7ff5af130c9b605"},
-  {"park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416,
-    "067bfd75aa85ff9bae91fa3e0edd1e3e"},
-  {"park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016,
-    "9e6d8f6508c6e55625f6b697bc461cef"},
-  {"park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216,
-    "b239c6b301c0b835485be349ca83a7e3"},
-  {"park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416,
-    "5a6481a550821dab6d0192f5c63845e9"},
+  { "park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420,
+    "e5406275b9fc6bb3436c31d4a05c1cab" },
+  { "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422,
+    "284a47a47133b12884ec3a14e959a0b6" },
+  { "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444,
+    "90517ff33843d85de712fd4fe60dbed0" },
+  { "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016,
+    "63f21f9f717d8b8631bd2288ee87137b" },
+  { "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216,
+    "48ab51fb540aed07f7ff5af130c9b605" },
+  { "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416,
+    "067bfd75aa85ff9bae91fa3e0edd1e3e" },
+  { "park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016,
+    "9e6d8f6508c6e55625f6b697bc461cef" },
+  { "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216,
+    "b239c6b301c0b835485be349ca83a7e3" },
+  { "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416,
+    "5a6481a550821dab6d0192f5c63845e9" },
 };
 
 static void write_image_file(const vpx_image_t *img, FILE *file) {
@@ -60,10 +60,12 @@ static void write_image_file(const vpx_image_t *img, FILE *file) {
     const unsigned char *buf = img->planes[plane];
     const int stride = img->stride[plane];
     const int bytes_per_sample = (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
-    const int h = (plane ? (img->d_h + img->y_chroma_shift) >>
-                   img->y_chroma_shift : img->d_h);
-    const int w = (plane ? (img->d_w + img->x_chroma_shift) >>
-                   img->x_chroma_shift : img->d_w);
+    const int h =
+        (plane ? (img->d_h + img->y_chroma_shift) >> img->y_chroma_shift
+               : img->d_h);
+    const int w =
+        (plane ? (img->d_w + img->x_chroma_shift) >> img->x_chroma_shift
+               : img->d_w);
     for (y = 0; y < h; ++y) {
       fwrite(buf, bytes_per_sample, w, file);
       buf += stride;
@@ -71,15 +73,12 @@ static void write_image_file(const vpx_image_t *img, FILE *file) {
   }
 }
 
-class Y4mVideoSourceTest
-    : public ::testing::TestWithParam<Y4mTestParam>,
-      public ::libvpx_test::Y4mVideoSource {
+class Y4mVideoSourceTest : public ::testing::TestWithParam<Y4mTestParam>,
+                           public ::libvpx_test::Y4mVideoSource {
  protected:
   Y4mVideoSourceTest() : Y4mVideoSource("", 0, 0) {}
 
-  virtual ~Y4mVideoSourceTest() {
-    CloseSource();
-  }
+  virtual ~Y4mVideoSourceTest() { CloseSource(); }
 
   virtual void Init(const std::string &file_name, int limit) {
     file_name_ = file_name;
@@ -137,8 +136,7 @@ TEST_P(Y4mVideoSourceTest, SourceTest) {
 INSTANTIATE_TEST_CASE_P(C, Y4mVideoSourceTest,
                         ::testing::ValuesIn(kY4mTestVectors));
 
-class Y4mVideoWriteTest
-    : public Y4mVideoSourceTest {
+class Y4mVideoWriteTest : public Y4mVideoSourceTest {
  protected:
   Y4mVideoWriteTest() : tmpfile_(NULL) {}
 
@@ -158,14 +156,12 @@ class Y4mVideoWriteTest
   // Writes out a y4m file and then reads it back
   void WriteY4mAndReadBack() {
     ASSERT_TRUE(input_file_ != NULL);
-    char buf[Y4M_BUFFER_SIZE] = {0};
-    const struct VpxRational framerate = {y4m_.fps_n, y4m_.fps_d};
+    char buf[Y4M_BUFFER_SIZE] = { 0 };
+    const struct VpxRational framerate = { y4m_.fps_n, y4m_.fps_d };
     tmpfile_ = new libvpx_test::TempOutFile;
     ASSERT_TRUE(tmpfile_->file() != NULL);
-    y4m_write_file_header(buf, sizeof(buf),
-                          kWidth, kHeight,
-                          &framerate, y4m_.vpx_fmt,
-                          y4m_.bit_depth);
+    y4m_write_file_header(buf, sizeof(buf), kWidth, kHeight, &framerate,
+                          y4m_.vpx_fmt, y4m_.bit_depth);
     fputs(buf, tmpfile_->file());
     for (unsigned int i = start_; i < limit_; i++) {
       y4m_write_frame_header(buf, sizeof(buf));
diff --git a/test/y4m_video_source.h b/test/y4m_video_source.h
index 03d9388db84758683e4735892bbc02d615646a33..2682ddde3d4f3617a1cff0a9b2e0bcb9ad737eee 100644
--- a/test/y4m_video_source.h
+++ b/test/y4m_video_source.h
@@ -21,18 +21,10 @@ namespace libvpx_test {
 // so that we can do actual file encodes.
 class Y4mVideoSource : public VideoSource {
  public:
-  Y4mVideoSource(const std::string &file_name,
-                  unsigned int start, int limit)
-      : file_name_(file_name),
-        input_file_(NULL),
-        img_(new vpx_image_t()),
-        start_(start),
-        limit_(limit),
-        frame_(0),
-        framerate_numerator_(0),
-        framerate_denominator_(0),
-        y4m_() {
-  }
+  Y4mVideoSource(const std::string &file_name, unsigned int start, int limit)
+      : file_name_(file_name), input_file_(NULL), img_(new vpx_image_t()),
+        start_(start), limit_(limit), frame_(0), framerate_numerator_(0),
+        framerate_denominator_(0), y4m_() {}
 
   virtual ~Y4mVideoSource() {
     vpx_img_free(img_.get());
diff --git a/test/yuv_video_source.h b/test/yuv_video_source.h
index 3c852b24260b704def33d95027fb659a62a41102..2cc81a0f3d23f49ef31c8ee0211d6e97e4cbe09a 100644
--- a/test/yuv_video_source.h
+++ b/test/yuv_video_source.h
@@ -25,19 +25,11 @@ namespace libvpx_test {
 class YUVVideoSource : public VideoSource {
  public:
   YUVVideoSource(const std::string &file_name, vpx_img_fmt format,
-                 unsigned int width, unsigned int height,
-                 int rate_numerator, int rate_denominator,
-                 unsigned int start, int limit)
-      : file_name_(file_name),
-        input_file_(NULL),
-        img_(NULL),
-        start_(start),
-        limit_(limit),
-        frame_(0),
-        width_(0),
-        height_(0),
-        format_(VPX_IMG_FMT_NONE),
-        framerate_numerator_(rate_numerator),
+                 unsigned int width, unsigned int height, int rate_numerator,
+                 int rate_denominator, unsigned int start, int limit)
+      : file_name_(file_name), input_file_(NULL), img_(NULL), start_(start),
+        limit_(limit), frame_(0), width_(0), height_(0),
+        format_(VPX_IMG_FMT_NONE), framerate_numerator_(rate_numerator),
         framerate_denominator_(rate_denominator) {
     // This initializes format_, raw_size_, width_, height_ and allocates img.
     SetSize(width, height, format);
@@ -45,13 +37,11 @@ class YUVVideoSource : public VideoSource {
 
   virtual ~YUVVideoSource() {
     vpx_img_free(img_);
-    if (input_file_)
-      fclose(input_file_);
+    if (input_file_) fclose(input_file_);
   }
 
   virtual void Begin() {
-    if (input_file_)
-      fclose(input_file_);
+    if (input_file_) fclose(input_file_);
     input_file_ = OpenTestDataFile(file_name_);
     ASSERT_TRUE(input_file_ != NULL) << "Input file open failed. Filename: "
                                      << file_name_;
@@ -67,7 +57,7 @@ class YUVVideoSource : public VideoSource {
     FillFrame();
   }
 
-  virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL;  }
+  virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; }
 
   // Models a stream where Timebase = 1/FPS, so pts == frame.
   virtual vpx_codec_pts_t pts() const { return frame_; }
@@ -93,32 +83,15 @@ class YUVVideoSource : public VideoSource {
       height_ = height;
       format_ = format;
       switch (format) {
-        case VPX_IMG_FMT_I420:
-          raw_size_ = width * height * 3 / 2;
-          break;
-        case VPX_IMG_FMT_I422:
-          raw_size_ = width * height * 2;
-          break;
-        case VPX_IMG_FMT_I440:
-          raw_size_ = width * height * 2;
-          break;
-        case VPX_IMG_FMT_I444:
-          raw_size_ = width * height * 3;
-          break;
-        case VPX_IMG_FMT_I42016:
-          raw_size_ = width * height * 3;
-          break;
-        case VPX_IMG_FMT_I42216:
-          raw_size_ = width * height * 4;
-          break;
-        case VPX_IMG_FMT_I44016:
-          raw_size_ = width * height * 4;
-          break;
-        case VPX_IMG_FMT_I44416:
-          raw_size_ = width * height * 6;
-          break;
-        default:
-          ASSERT_TRUE(0);
+        case VPX_IMG_FMT_I420: raw_size_ = width * height * 3 / 2; break;
+        case VPX_IMG_FMT_I422: raw_size_ = width * height * 2; break;
+        case VPX_IMG_FMT_I440: raw_size_ = width * height * 2; break;
+        case VPX_IMG_FMT_I444: raw_size_ = width * height * 3; break;
+        case VPX_IMG_FMT_I42016: raw_size_ = width * height * 3; break;
+        case VPX_IMG_FMT_I42216: raw_size_ = width * height * 4; break;
+        case VPX_IMG_FMT_I44016: raw_size_ = width * height * 4; break;
+        case VPX_IMG_FMT_I44416: raw_size_ = width * height * 6; break;
+        default: ASSERT_TRUE(0);
       }
     }
   }
diff --git a/tools_common.c b/tools_common.c
index d12b854afece2205659656edfd30294130b65356..4f0417e386d5d7cacd7127e3aa499b4166f5d9f2 100644
--- a/tools_common.c
+++ b/tools_common.c
@@ -29,23 +29,22 @@
 #include <fcntl.h>
 
 #ifdef __OS2__
-#define _setmode    setmode
-#define _fileno     fileno
-#define _O_BINARY   O_BINARY
+#define _setmode setmode
+#define _fileno fileno
+#define _O_BINARY O_BINARY
 #endif
 #endif
 
-#define LOG_ERROR(label) do {\
-  const char *l = label;\
-  va_list ap;\
-  va_start(ap, fmt);\
-  if (l)\
-    fprintf(stderr, "%s: ", l);\
-  vfprintf(stderr, fmt, ap);\
-  fprintf(stderr, "\n");\
-  va_end(ap);\
-} while (0)
-
+#define LOG_ERROR(label)               \
+  do {                                 \
+    const char *l = label;             \
+    va_list ap;                        \
+    va_start(ap, fmt);                 \
+    if (l) fprintf(stderr, "%s: ", l); \
+    vfprintf(stderr, fmt, ap);         \
+    fprintf(stderr, "\n");             \
+    va_end(ap);                        \
+  } while (0)
 
 FILE *set_binary_mode(FILE *stream) {
   (void)stream;
@@ -65,16 +64,13 @@ void fatal(const char *fmt, ...) {
   exit(EXIT_FAILURE);
 }
 
-void warn(const char *fmt, ...) {
-  LOG_ERROR("Warning");
-}
+void warn(const char *fmt, ...) { LOG_ERROR("Warning"); }
 
 void die_codec(vpx_codec_ctx_t *ctx, const char *s) {
   const char *detail = vpx_codec_error_detail(ctx);
 
   printf("%s: %s\n", s, vpx_codec_error(ctx));
-  if (detail)
-    printf("    %s\n", detail);
+  if (detail) printf("    %s\n", detail);
   exit(EXIT_FAILURE);
 }
 
@@ -97,15 +93,16 @@ int read_yuv_frame(struct VpxInputContext *input_ctx, vpx_image_t *yuv_frame) {
      */
     switch (plane) {
       case 1:
-        ptr = yuv_frame->planes[
-            yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_V : VPX_PLANE_U];
+        ptr =
+            yuv_frame->planes[yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_V
+                                                                 : VPX_PLANE_U];
         break;
       case 2:
-        ptr = yuv_frame->planes[
-            yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_U : VPX_PLANE_V];
+        ptr =
+            yuv_frame->planes[yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_U
+                                                                 : VPX_PLANE_V];
         break;
-      default:
-        ptr = yuv_frame->planes[plane];
+      default: ptr = yuv_frame->planes[plane];
     }
 
     for (r = 0; r < h; ++r) {
@@ -134,7 +131,7 @@ int read_yuv_frame(struct VpxInputContext *input_ctx, vpx_image_t *yuv_frame) {
 
 static const VpxInterface vpx_encoders[] = {
 #if CONFIG_VP10_ENCODER
-  {"vp10", VP10_FOURCC, &vpx_codec_vp10_cx},
+  { "vp10", VP10_FOURCC, &vpx_codec_vp10_cx },
 #endif
 };
 
@@ -142,17 +139,14 @@ int get_vpx_encoder_count(void) {
   return sizeof(vpx_encoders) / sizeof(vpx_encoders[0]);
 }
 
-const VpxInterface *get_vpx_encoder_by_index(int i) {
-  return &vpx_encoders[i];
-}
+const VpxInterface *get_vpx_encoder_by_index(int i) { return &vpx_encoders[i]; }
 
 const VpxInterface *get_vpx_encoder_by_name(const char *name) {
   int i;
 
   for (i = 0; i < get_vpx_encoder_count(); ++i) {
     const VpxInterface *encoder = get_vpx_encoder_by_index(i);
-    if (strcmp(encoder->name, name) == 0)
-      return encoder;
+    if (strcmp(encoder->name, name) == 0) return encoder;
   }
 
   return NULL;
@@ -164,7 +158,7 @@ const VpxInterface *get_vpx_encoder_by_name(const char *name) {
 
 static const VpxInterface vpx_decoders[] = {
 #if CONFIG_VP10_DECODER
-  {"vp10", VP10_FOURCC, &vpx_codec_vp10_dx},
+  { "vp10", VP10_FOURCC, &vpx_codec_vp10_dx },
 #endif
 };
 
@@ -172,17 +166,14 @@ int get_vpx_decoder_count(void) {
   return sizeof(vpx_decoders) / sizeof(vpx_decoders[0]);
 }
 
-const VpxInterface *get_vpx_decoder_by_index(int i) {
-  return &vpx_decoders[i];
-}
+const VpxInterface *get_vpx_decoder_by_index(int i) { return &vpx_decoders[i]; }
 
 const VpxInterface *get_vpx_decoder_by_name(const char *name) {
   int i;
 
   for (i = 0; i < get_vpx_decoder_count(); ++i) {
-     const VpxInterface *const decoder = get_vpx_decoder_by_index(i);
-     if (strcmp(decoder->name, name) == 0)
-       return decoder;
+    const VpxInterface *const decoder = get_vpx_decoder_by_index(i);
+    if (strcmp(decoder->name, name) == 0) return decoder;
   }
 
   return NULL;
@@ -193,8 +184,7 @@ const VpxInterface *get_vpx_decoder_by_fourcc(uint32_t fourcc) {
 
   for (i = 0; i < get_vpx_decoder_count(); ++i) {
     const VpxInterface *const decoder = get_vpx_decoder_by_index(i);
-    if (decoder->fourcc == fourcc)
-      return decoder;
+    if (decoder->fourcc == fourcc) return decoder;
   }
 
   return NULL;
@@ -212,7 +202,7 @@ int vpx_img_plane_width(const vpx_image_t *img, int plane) {
 }
 
 int vpx_img_plane_height(const vpx_image_t *img, int plane) {
-  if (plane > 0 &&  img->y_chroma_shift > 0)
+  if (plane > 0 && img->y_chroma_shift > 0)
     return (img->d_h + 1) >> img->y_chroma_shift;
   else
     return img->d_h;
@@ -225,7 +215,7 @@ void vpx_img_write(const vpx_image_t *img, FILE *file) {
     const unsigned char *buf = img->planes[plane];
     const int stride = img->stride[plane];
     const int w = vpx_img_plane_width(img, plane) *
-        ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
+                  ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
     const int h = vpx_img_plane_height(img, plane);
     int y;
 
@@ -243,13 +233,12 @@ int vpx_img_read(vpx_image_t *img, FILE *file) {
     unsigned char *buf = img->planes[plane];
     const int stride = img->stride[plane];
     const int w = vpx_img_plane_width(img, plane) *
-        ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
+                  ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
     const int h = vpx_img_plane_height(img, plane);
     int y;
 
     for (y = 0; y < h; ++y) {
-      if (fread(buf, 1, w, file) != (size_t)w)
-        return 0;
+      if (fread(buf, 1, w, file) != (size_t)w) return 0;
       buf += stride;
     }
   }
@@ -278,19 +267,16 @@ static void highbd_img_upshift(vpx_image_t *dst, vpx_image_t *src,
   int plane;
   if (dst->d_w != src->d_w || dst->d_h != src->d_h ||
       dst->x_chroma_shift != src->x_chroma_shift ||
-      dst->y_chroma_shift != src->y_chroma_shift ||
-      dst->fmt != src->fmt || input_shift < 0) {
+      dst->y_chroma_shift != src->y_chroma_shift || dst->fmt != src->fmt ||
+      input_shift < 0) {
     fatal("Unsupported image conversion");
   }
   switch (src->fmt) {
     case VPX_IMG_FMT_I42016:
     case VPX_IMG_FMT_I42216:
     case VPX_IMG_FMT_I44416:
-    case VPX_IMG_FMT_I44016:
-      break;
-    default:
-      fatal("Unsupported image conversion");
-      break;
+    case VPX_IMG_FMT_I44016: break;
+    default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
     int w = src->d_w;
@@ -305,8 +291,7 @@ static void highbd_img_upshift(vpx_image_t *dst, vpx_image_t *src,
           (uint16_t *)(src->planes[plane] + y * src->stride[plane]);
       uint16_t *p_dst =
           (uint16_t *)(dst->planes[plane] + y * dst->stride[plane]);
-      for (x = 0; x < w; x++)
-        *p_dst++ = (*p_src++ << input_shift) + offset;
+      for (x = 0; x < w; x++) *p_dst++ = (*p_src++ << input_shift) + offset;
     }
   }
 }
@@ -319,19 +304,15 @@ static void lowbd_img_upshift(vpx_image_t *dst, vpx_image_t *src,
   if (dst->d_w != src->d_w || dst->d_h != src->d_h ||
       dst->x_chroma_shift != src->x_chroma_shift ||
       dst->y_chroma_shift != src->y_chroma_shift ||
-      dst->fmt != src->fmt + VPX_IMG_FMT_HIGHBITDEPTH ||
-      input_shift < 0) {
+      dst->fmt != src->fmt + VPX_IMG_FMT_HIGHBITDEPTH || input_shift < 0) {
     fatal("Unsupported image conversion");
   }
   switch (src->fmt) {
     case VPX_IMG_FMT_I420:
     case VPX_IMG_FMT_I422:
     case VPX_IMG_FMT_I444:
-    case VPX_IMG_FMT_I440:
-      break;
-    default:
-      fatal("Unsupported image conversion");
-      break;
+    case VPX_IMG_FMT_I440: break;
+    default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
     int w = src->d_w;
@@ -352,8 +333,7 @@ static void lowbd_img_upshift(vpx_image_t *dst, vpx_image_t *src,
   }
 }
 
-void vpx_img_upshift(vpx_image_t *dst, vpx_image_t *src,
-                     int input_shift) {
+void vpx_img_upshift(vpx_image_t *dst, vpx_image_t *src, int input_shift) {
   if (src->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
     highbd_img_upshift(dst, src, input_shift);
   } else {
@@ -363,9 +343,8 @@ void vpx_img_upshift(vpx_image_t *dst, vpx_image_t *src,
 
 void vpx_img_truncate_16_to_8(vpx_image_t *dst, vpx_image_t *src) {
   int plane;
-  if (dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH != src->fmt ||
-      dst->d_w != src->d_w || dst->d_h != src->d_h ||
-      dst->x_chroma_shift != src->x_chroma_shift ||
+  if (dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH != src->fmt || dst->d_w != src->d_w ||
+      dst->d_h != src->d_h || dst->x_chroma_shift != src->x_chroma_shift ||
       dst->y_chroma_shift != src->y_chroma_shift) {
     fatal("Unsupported image conversion");
   }
@@ -373,11 +352,8 @@ void vpx_img_truncate_16_to_8(vpx_image_t *dst, vpx_image_t *src) {
     case VPX_IMG_FMT_I420:
     case VPX_IMG_FMT_I422:
     case VPX_IMG_FMT_I444:
-    case VPX_IMG_FMT_I440:
-      break;
-    default:
-      fatal("Unsupported image conversion");
-      break;
+    case VPX_IMG_FMT_I440: break;
+    default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
     int w = src->d_w;
@@ -403,19 +379,16 @@ static void highbd_img_downshift(vpx_image_t *dst, vpx_image_t *src,
   int plane;
   if (dst->d_w != src->d_w || dst->d_h != src->d_h ||
       dst->x_chroma_shift != src->x_chroma_shift ||
-      dst->y_chroma_shift != src->y_chroma_shift ||
-      dst->fmt != src->fmt || down_shift < 0) {
+      dst->y_chroma_shift != src->y_chroma_shift || dst->fmt != src->fmt ||
+      down_shift < 0) {
     fatal("Unsupported image conversion");
   }
   switch (src->fmt) {
     case VPX_IMG_FMT_I42016:
     case VPX_IMG_FMT_I42216:
     case VPX_IMG_FMT_I44416:
-    case VPX_IMG_FMT_I44016:
-      break;
-    default:
-      fatal("Unsupported image conversion");
-      break;
+    case VPX_IMG_FMT_I44016: break;
+    default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
     int w = src->d_w;
@@ -430,8 +403,7 @@ static void highbd_img_downshift(vpx_image_t *dst, vpx_image_t *src,
           (uint16_t *)(src->planes[plane] + y * src->stride[plane]);
       uint16_t *p_dst =
           (uint16_t *)(dst->planes[plane] + y * dst->stride[plane]);
-      for (x = 0; x < w; x++)
-        *p_dst++ = *p_src++ >> down_shift;
+      for (x = 0; x < w; x++) *p_dst++ = *p_src++ >> down_shift;
     }
   }
 }
@@ -442,19 +414,15 @@ static void lowbd_img_downshift(vpx_image_t *dst, vpx_image_t *src,
   if (dst->d_w != src->d_w || dst->d_h != src->d_h ||
       dst->x_chroma_shift != src->x_chroma_shift ||
       dst->y_chroma_shift != src->y_chroma_shift ||
-      src->fmt != dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH ||
-      down_shift < 0) {
+      src->fmt != dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH || down_shift < 0) {
     fatal("Unsupported image conversion");
   }
   switch (dst->fmt) {
     case VPX_IMG_FMT_I420:
     case VPX_IMG_FMT_I422:
     case VPX_IMG_FMT_I444:
-    case VPX_IMG_FMT_I440:
-      break;
-    default:
-      fatal("Unsupported image conversion");
-      break;
+    case VPX_IMG_FMT_I440: break;
+    default: fatal("Unsupported image conversion"); break;
   }
   for (plane = 0; plane < 3; plane++) {
     int w = src->d_w;
@@ -475,8 +443,7 @@ static void lowbd_img_downshift(vpx_image_t *dst, vpx_image_t *src,
   }
 }
 
-void vpx_img_downshift(vpx_image_t *dst, vpx_image_t *src,
-                       int down_shift) {
+void vpx_img_downshift(vpx_image_t *dst, vpx_image_t *src, int down_shift) {
   if (dst->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
     highbd_img_downshift(dst, src, down_shift);
   } else {
diff --git a/tools_common.h b/tools_common.h
index 613f26bdbb9c68ef58ae9260a6fa7f88e5b8f978..420af90d2af68f0b5fce041c966d6fc98c2aeda5 100644
--- a/tools_common.h
+++ b/tools_common.h
@@ -30,24 +30,24 @@
 /* MinGW uses f{seek,tell}o64 for large files. */
 #define fseeko fseeko64
 #define ftello ftello64
-#endif  /* _WIN32 */
+#endif /* _WIN32 */
 
 #if CONFIG_OS_SUPPORT
 #if defined(_MSC_VER)
-#include <io.h>  /* NOLINT */
-#define isatty   _isatty
-#define fileno   _fileno
+#include <io.h> /* NOLINT */
+#define isatty _isatty
+#define fileno _fileno
 #else
-#include <unistd.h>  /* NOLINT */
-#endif  /* _MSC_VER */
-#endif  /* CONFIG_OS_SUPPORT */
+#include <unistd.h> /* NOLINT */
+#endif              /* _MSC_VER */
+#endif              /* CONFIG_OS_SUPPORT */
 
 /* Use 32-bit file operations in WebM file format when building ARM
  * executables (.axf) with RVCT. */
 #if !CONFIG_OS_SUPPORT
 #define fseeko fseek
 #define ftello ftell
-#endif  /* CONFIG_OS_SUPPORT */
+#endif /* CONFIG_OS_SUPPORT */
 
 #define LITERALU64(hi, lo) ((((uint64_t)hi) << 32) | lo)
 
@@ -55,7 +55,7 @@
 #define PATH_MAX 512
 #endif
 
-#define IVF_FRAME_HDR_SZ (4 + 8)  /* 4 byte size + 8 byte timestamp */
+#define IVF_FRAME_HDR_SZ (4 + 8) /* 4 byte size + 8 byte timestamp */
 #define IVF_FILE_HDR_SZ 32
 
 #define RAW_FRAME_HDR_SZ sizeof(uint32_t)
@@ -158,7 +158,7 @@ void vpx_img_truncate_16_to_8(vpx_image_t *dst, vpx_image_t *src);
 #endif
 
 #ifdef __cplusplus
-}  /* extern "C" */
+} /* extern "C" */
 #endif
 
 #endif  // TOOLS_COMMON_H_
diff --git a/video_reader.c b/video_reader.c
index 39c7edba1e3deb5f8d3362deeb26ba581f8a5136..a0ba2521c6135f06bff917b920d321eccbcf1f5f 100644
--- a/video_reader.c
+++ b/video_reader.c
@@ -30,21 +30,17 @@ VpxVideoReader *vpx_video_reader_open(const char *filename) {
   char header[32];
   VpxVideoReader *reader = NULL;
   FILE *const file = fopen(filename, "rb");
-  if (!file)
-    return NULL;  // Can't open file
+  if (!file) return NULL;  // Can't open file
 
-  if (fread(header, 1, 32, file) != 32)
-    return NULL;  // Can't read file header
+  if (fread(header, 1, 32, file) != 32) return NULL;  // Can't read file header
 
   if (memcmp(kIVFSignature, header, 4) != 0)
     return NULL;  // Wrong IVF signature
 
-  if (mem_get_le16(header + 4) != 0)
-    return NULL;  // Wrong IVF version
+  if (mem_get_le16(header + 4) != 0) return NULL;  // Wrong IVF version
 
   reader = calloc(1, sizeof(*reader));
-  if (!reader)
-    return NULL;  // Can't allocate VpxVideoReader
+  if (!reader) return NULL;  // Can't allocate VpxVideoReader
 
   reader->file = file;
   reader->info.codec_fourcc = mem_get_le32(header + 8);
@@ -71,8 +67,7 @@ int vpx_video_reader_read_frame(VpxVideoReader *reader) {
 
 const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader,
                                           size_t *size) {
-  if (size)
-    *size = reader->frame_size;
+  if (size) *size = reader->frame_size;
 
   return reader->buffer;
 }
@@ -80,4 +75,3 @@ const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader,
 const VpxVideoInfo *vpx_video_reader_get_info(VpxVideoReader *reader) {
   return &reader->info;
 }
-
diff --git a/video_reader.h b/video_reader.h
index a62c6d7109ad7d6ddeabc23686194033dd5ef298..73c25b00a7d94740dda8e80ece3148d6f5a018a2 100644
--- a/video_reader.h
+++ b/video_reader.h
@@ -39,8 +39,7 @@ int vpx_video_reader_read_frame(VpxVideoReader *reader);
 
 // Returns the pointer to memory buffer with frame data read by last call to
 // vpx_video_reader_read_frame().
-const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader,
-                                          size_t *size);
+const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader, size_t *size);
 
 // Fills VpxVideoInfo with information from opened video file.
 const VpxVideoInfo *vpx_video_reader_get_info(VpxVideoReader *reader);
diff --git a/video_writer.c b/video_writer.c
index 3695236bfa5232801598953b99f511dac538a7ad..56d428b0720f7101451ff3b130fa9d47550d9dea 100644
--- a/video_writer.c
+++ b/video_writer.c
@@ -37,12 +37,10 @@ VpxVideoWriter *vpx_video_writer_open(const char *filename,
   if (container == kContainerIVF) {
     VpxVideoWriter *writer = NULL;
     FILE *const file = fopen(filename, "wb");
-    if (!file)
-      return NULL;
+    if (!file) return NULL;
 
     writer = malloc(sizeof(*writer));
-    if (!writer)
-      return NULL;
+    if (!writer) return NULL;
 
     writer->frame_count = 0;
     writer->info = *info;
@@ -67,12 +65,10 @@ void vpx_video_writer_close(VpxVideoWriter *writer) {
   }
 }
 
-int vpx_video_writer_write_frame(VpxVideoWriter *writer,
-                                 const uint8_t *buffer, size_t size,
-                                 int64_t pts) {
+int vpx_video_writer_write_frame(VpxVideoWriter *writer, const uint8_t *buffer,
+                                 size_t size, int64_t pts) {
   ivf_write_frame_header(writer->file, pts, size);
-  if (fwrite(buffer, 1, size, writer->file) != size)
-    return 0;
+  if (fwrite(buffer, 1, size, writer->file) != size) return 0;
 
   ++writer->frame_count;
 
diff --git a/video_writer.h b/video_writer.h
index 5dbfe52ea00f9c7e8dac94bbe189dd1aa611e69b..a769811c44042c3b96f6e9eea5c8802149facc95 100644
--- a/video_writer.h
+++ b/video_writer.h
@@ -13,9 +13,7 @@
 
 #include "./video_common.h"
 
-typedef enum {
-  kContainerIVF
-} VpxContainer;
+typedef enum { kContainerIVF } VpxContainer;
 
 struct VpxVideoWriterStruct;
 typedef struct VpxVideoWriterStruct VpxVideoWriter;
@@ -36,9 +34,8 @@ VpxVideoWriter *vpx_video_writer_open(const char *filename,
 void vpx_video_writer_close(VpxVideoWriter *writer);
 
 // Writes frame bytes to the file.
-int vpx_video_writer_write_frame(VpxVideoWriter *writer,
-                                 const uint8_t *buffer, size_t size,
-                                 int64_t pts);
+int vpx_video_writer_write_frame(VpxVideoWriter *writer, const uint8_t *buffer,
+                                 size_t size, int64_t pts);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/common/alloccommon.c b/vp10/common/alloccommon.c
index 5469a7aaeae09516d9b0bb3112a2a57a5c392c0d..fb6d81df8bcaf84a29cab0f48379199e16aba72e 100644
--- a/vp10/common/alloccommon.c
+++ b/vp10/common/alloccommon.c
@@ -35,8 +35,7 @@ static int alloc_seg_map(VP10_COMMON *cm, int seg_map_size) {
 
   for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
     cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1);
-    if (cm->seg_map_array[i] == NULL)
-      return 1;
+    if (cm->seg_map_array[i] == NULL) return 1;
   }
   cm->seg_map_alloc_size = seg_map_size;
 
@@ -97,15 +96,13 @@ int vp10_alloc_context_buffers(VP10_COMMON *cm, int width, int height) {
   new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows);
   if (cm->mi_alloc_size < new_mi_size) {
     cm->free_mi(cm);
-    if (cm->alloc_mi(cm, new_mi_size))
-      goto fail;
+    if (cm->alloc_mi(cm, new_mi_size)) goto fail;
   }
 
   if (cm->seg_map_alloc_size < cm->mi_rows * cm->mi_cols) {
     // Create the segmentation map structure and set to 0.
     free_seg_map(cm);
-    if (alloc_seg_map(cm, cm->mi_rows * cm->mi_cols))
-      goto fail;
+    if (alloc_seg_map(cm, cm->mi_rows * cm->mi_cols)) goto fail;
   }
 
   if (cm->above_context_alloc_cols < cm->mi_cols) {
@@ -124,7 +121,7 @@ int vp10_alloc_context_buffers(VP10_COMMON *cm, int width, int height) {
 
   return 0;
 
- fail:
+fail:
   vp10_free_context_buffers(cm);
   return 1;
 }
diff --git a/vp10/common/alloccommon.h b/vp10/common/alloccommon.h
index 9d35e5e559c6d86d4b5a8cb81e3426a06a047913..bbe2ff6a04a1f6f6a6a2f4111e593ae78708debe 100644
--- a/vp10/common/alloccommon.h
+++ b/vp10/common/alloccommon.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_COMMON_ALLOCCOMMON_H_
 #define VP10_COMMON_ALLOCCOMMON_H_
 
diff --git a/vp10/common/arm/neon/iht4x4_add_neon.c b/vp10/common/arm/neon/iht4x4_add_neon.c
index bd3e8b30f4098cea0485f1f16bd8b80c90aadc7e..d074bc91f9fc187fa9f0d3444dad1f9d740802dc 100644
--- a/vp10/common/arm/neon/iht4x4_add_neon.c
+++ b/vp10/common/arm/neon/iht4x4_add_neon.c
@@ -23,226 +23,211 @@ static int16_t cospi_8_64 = 0x3b21;
 static int16_t cospi_16_64 = 0x2d41;
 static int16_t cospi_24_64 = 0x187e;
 
-static INLINE void TRANSPOSE4X4(
-        int16x8_t *q8s16,
-        int16x8_t *q9s16) {
-    int32x4_t q8s32, q9s32;
-    int16x4x2_t d0x2s16, d1x2s16;
-    int32x4x2_t q0x2s32;
-
-    d0x2s16 = vtrn_s16(vget_low_s16(*q8s16), vget_high_s16(*q8s16));
-    d1x2s16 = vtrn_s16(vget_low_s16(*q9s16), vget_high_s16(*q9s16));
-
-    q8s32 = vreinterpretq_s32_s16(vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]));
-    q9s32 = vreinterpretq_s32_s16(vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]));
-    q0x2s32 = vtrnq_s32(q8s32, q9s32);
-
-    *q8s16 = vreinterpretq_s16_s32(q0x2s32.val[0]);
-    *q9s16 = vreinterpretq_s16_s32(q0x2s32.val[1]);
-    return;
+static INLINE void TRANSPOSE4X4(int16x8_t *q8s16, int16x8_t *q9s16) {
+  int32x4_t q8s32, q9s32;
+  int16x4x2_t d0x2s16, d1x2s16;
+  int32x4x2_t q0x2s32;
+
+  d0x2s16 = vtrn_s16(vget_low_s16(*q8s16), vget_high_s16(*q8s16));
+  d1x2s16 = vtrn_s16(vget_low_s16(*q9s16), vget_high_s16(*q9s16));
+
+  q8s32 = vreinterpretq_s32_s16(vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]));
+  q9s32 = vreinterpretq_s32_s16(vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]));
+  q0x2s32 = vtrnq_s32(q8s32, q9s32);
+
+  *q8s16 = vreinterpretq_s16_s32(q0x2s32.val[0]);
+  *q9s16 = vreinterpretq_s16_s32(q0x2s32.val[1]);
+  return;
 }
 
-static INLINE void GENERATE_COSINE_CONSTANTS(
-        int16x4_t *d0s16,
-        int16x4_t *d1s16,
-        int16x4_t *d2s16) {
-    *d0s16 = vdup_n_s16(cospi_8_64);
-    *d1s16 = vdup_n_s16(cospi_16_64);
-    *d2s16 = vdup_n_s16(cospi_24_64);
-    return;
+static INLINE void GENERATE_COSINE_CONSTANTS(int16x4_t *d0s16, int16x4_t *d1s16,
+                                             int16x4_t *d2s16) {
+  *d0s16 = vdup_n_s16(cospi_8_64);
+  *d1s16 = vdup_n_s16(cospi_16_64);
+  *d2s16 = vdup_n_s16(cospi_24_64);
+  return;
 }
 
-static INLINE void GENERATE_SINE_CONSTANTS(
-        int16x4_t *d3s16,
-        int16x4_t *d4s16,
-        int16x4_t *d5s16,
-        int16x8_t *q3s16) {
-    *d3s16 = vdup_n_s16(sinpi_1_9);
-    *d4s16 = vdup_n_s16(sinpi_2_9);
-    *q3s16 = vdupq_n_s16(sinpi_3_9);
-    *d5s16 = vdup_n_s16(sinpi_4_9);
-    return;
+static INLINE void GENERATE_SINE_CONSTANTS(int16x4_t *d3s16, int16x4_t *d4s16,
+                                           int16x4_t *d5s16, int16x8_t *q3s16) {
+  *d3s16 = vdup_n_s16(sinpi_1_9);
+  *d4s16 = vdup_n_s16(sinpi_2_9);
+  *q3s16 = vdupq_n_s16(sinpi_3_9);
+  *d5s16 = vdup_n_s16(sinpi_4_9);
+  return;
 }
 
-static INLINE void IDCT4x4_1D(
-        int16x4_t *d0s16,
-        int16x4_t *d1s16,
-        int16x4_t *d2s16,
-        int16x8_t *q8s16,
-        int16x8_t *q9s16) {
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d23s16, d24s16;
-    int16x4_t d26s16, d27s16, d28s16, d29s16;
-    int32x4_t q10s32, q13s32, q14s32, q15s32;
-    int16x8_t q13s16, q14s16;
-
-    d16s16 = vget_low_s16(*q8s16);
-    d17s16 = vget_high_s16(*q8s16);
-    d18s16 = vget_low_s16(*q9s16);
-    d19s16 = vget_high_s16(*q9s16);
-
-    d23s16 = vadd_s16(d16s16, d18s16);
-    d24s16 = vsub_s16(d16s16, d18s16);
-
-    q15s32 = vmull_s16(d17s16, *d2s16);
-    q10s32 = vmull_s16(d17s16, *d0s16);
-    q13s32 = vmull_s16(d23s16, *d1s16);
-    q14s32 = vmull_s16(d24s16, *d1s16);
-    q15s32 = vmlsl_s16(q15s32, d19s16, *d0s16);
-    q10s32 = vmlal_s16(q10s32, d19s16, *d2s16);
-
-    d26s16 = vqrshrn_n_s32(q13s32, 14);
-    d27s16 = vqrshrn_n_s32(q14s32, 14);
-    d29s16 = vqrshrn_n_s32(q15s32, 14);
-    d28s16 = vqrshrn_n_s32(q10s32, 14);
-
-    q13s16 = vcombine_s16(d26s16, d27s16);
-    q14s16 = vcombine_s16(d28s16, d29s16);
-    *q8s16 = vaddq_s16(q13s16, q14s16);
-    *q9s16 = vsubq_s16(q13s16, q14s16);
-    *q9s16 = vcombine_s16(vget_high_s16(*q9s16),
-                          vget_low_s16(*q9s16));  // vswp
-    return;
+static INLINE void IDCT4x4_1D(int16x4_t *d0s16, int16x4_t *d1s16,
+                              int16x4_t *d2s16, int16x8_t *q8s16,
+                              int16x8_t *q9s16) {
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d23s16, d24s16;
+  int16x4_t d26s16, d27s16, d28s16, d29s16;
+  int32x4_t q10s32, q13s32, q14s32, q15s32;
+  int16x8_t q13s16, q14s16;
+
+  d16s16 = vget_low_s16(*q8s16);
+  d17s16 = vget_high_s16(*q8s16);
+  d18s16 = vget_low_s16(*q9s16);
+  d19s16 = vget_high_s16(*q9s16);
+
+  d23s16 = vadd_s16(d16s16, d18s16);
+  d24s16 = vsub_s16(d16s16, d18s16);
+
+  q15s32 = vmull_s16(d17s16, *d2s16);
+  q10s32 = vmull_s16(d17s16, *d0s16);
+  q13s32 = vmull_s16(d23s16, *d1s16);
+  q14s32 = vmull_s16(d24s16, *d1s16);
+  q15s32 = vmlsl_s16(q15s32, d19s16, *d0s16);
+  q10s32 = vmlal_s16(q10s32, d19s16, *d2s16);
+
+  d26s16 = vqrshrn_n_s32(q13s32, 14);
+  d27s16 = vqrshrn_n_s32(q14s32, 14);
+  d29s16 = vqrshrn_n_s32(q15s32, 14);
+  d28s16 = vqrshrn_n_s32(q10s32, 14);
+
+  q13s16 = vcombine_s16(d26s16, d27s16);
+  q14s16 = vcombine_s16(d28s16, d29s16);
+  *q8s16 = vaddq_s16(q13s16, q14s16);
+  *q9s16 = vsubq_s16(q13s16, q14s16);
+  *q9s16 = vcombine_s16(vget_high_s16(*q9s16), vget_low_s16(*q9s16));  // vswp
+  return;
 }
 
-static INLINE void IADST4x4_1D(
-        int16x4_t *d3s16,
-        int16x4_t *d4s16,
-        int16x4_t *d5s16,
-        int16x8_t *q3s16,
-        int16x8_t *q8s16,
-        int16x8_t *q9s16) {
-    int16x4_t d6s16, d16s16, d17s16, d18s16, d19s16;
-    int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32;
-
-    d6s16 = vget_low_s16(*q3s16);
-
-    d16s16 = vget_low_s16(*q8s16);
-    d17s16 = vget_high_s16(*q8s16);
-    d18s16 = vget_low_s16(*q9s16);
-    d19s16 = vget_high_s16(*q9s16);
-
-    q10s32 = vmull_s16(*d3s16, d16s16);
-    q11s32 = vmull_s16(*d4s16, d16s16);
-    q12s32 = vmull_s16(d6s16, d17s16);
-    q13s32 = vmull_s16(*d5s16, d18s16);
-    q14s32 = vmull_s16(*d3s16, d18s16);
-    q15s32 = vmovl_s16(d16s16);
-    q15s32 = vaddw_s16(q15s32, d19s16);
-    q8s32  = vmull_s16(*d4s16, d19s16);
-    q15s32 = vsubw_s16(q15s32, d18s16);
-    q9s32  = vmull_s16(*d5s16, d19s16);
-
-    q10s32 = vaddq_s32(q10s32, q13s32);
-    q10s32 = vaddq_s32(q10s32, q8s32);
-    q11s32 = vsubq_s32(q11s32, q14s32);
-    q8s32  = vdupq_n_s32(sinpi_3_9);
-    q11s32 = vsubq_s32(q11s32, q9s32);
-    q15s32 = vmulq_s32(q15s32, q8s32);
-
-    q13s32 = vaddq_s32(q10s32, q12s32);
-    q10s32 = vaddq_s32(q10s32, q11s32);
-    q14s32 = vaddq_s32(q11s32, q12s32);
-    q10s32 = vsubq_s32(q10s32, q12s32);
-
-    d16s16 = vqrshrn_n_s32(q13s32, 14);
-    d17s16 = vqrshrn_n_s32(q14s32, 14);
-    d18s16 = vqrshrn_n_s32(q15s32, 14);
-    d19s16 = vqrshrn_n_s32(q10s32, 14);
-
-    *q8s16 = vcombine_s16(d16s16, d17s16);
-    *q9s16 = vcombine_s16(d18s16, d19s16);
-    return;
+static INLINE void IADST4x4_1D(int16x4_t *d3s16, int16x4_t *d4s16,
+                               int16x4_t *d5s16, int16x8_t *q3s16,
+                               int16x8_t *q8s16, int16x8_t *q9s16) {
+  int16x4_t d6s16, d16s16, d17s16, d18s16, d19s16;
+  int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32;
+
+  d6s16 = vget_low_s16(*q3s16);
+
+  d16s16 = vget_low_s16(*q8s16);
+  d17s16 = vget_high_s16(*q8s16);
+  d18s16 = vget_low_s16(*q9s16);
+  d19s16 = vget_high_s16(*q9s16);
+
+  q10s32 = vmull_s16(*d3s16, d16s16);
+  q11s32 = vmull_s16(*d4s16, d16s16);
+  q12s32 = vmull_s16(d6s16, d17s16);
+  q13s32 = vmull_s16(*d5s16, d18s16);
+  q14s32 = vmull_s16(*d3s16, d18s16);
+  q15s32 = vmovl_s16(d16s16);
+  q15s32 = vaddw_s16(q15s32, d19s16);
+  q8s32 = vmull_s16(*d4s16, d19s16);
+  q15s32 = vsubw_s16(q15s32, d18s16);
+  q9s32 = vmull_s16(*d5s16, d19s16);
+
+  q10s32 = vaddq_s32(q10s32, q13s32);
+  q10s32 = vaddq_s32(q10s32, q8s32);
+  q11s32 = vsubq_s32(q11s32, q14s32);
+  q8s32 = vdupq_n_s32(sinpi_3_9);
+  q11s32 = vsubq_s32(q11s32, q9s32);
+  q15s32 = vmulq_s32(q15s32, q8s32);
+
+  q13s32 = vaddq_s32(q10s32, q12s32);
+  q10s32 = vaddq_s32(q10s32, q11s32);
+  q14s32 = vaddq_s32(q11s32, q12s32);
+  q10s32 = vsubq_s32(q10s32, q12s32);
+
+  d16s16 = vqrshrn_n_s32(q13s32, 14);
+  d17s16 = vqrshrn_n_s32(q14s32, 14);
+  d18s16 = vqrshrn_n_s32(q15s32, 14);
+  d19s16 = vqrshrn_n_s32(q10s32, 14);
+
+  *q8s16 = vcombine_s16(d16s16, d17s16);
+  *q9s16 = vcombine_s16(d18s16, d19s16);
+  return;
 }
 
 void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
-                            int dest_stride, int tx_type) {
-    uint8x8_t d26u8, d27u8;
-    int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16;
-    uint32x2_t d26u32, d27u32;
-    int16x8_t q3s16, q8s16, q9s16;
-    uint16x8_t q8u16, q9u16;
-
-    d26u32 = d27u32 = vdup_n_u32(0);
-
-    q8s16 = vld1q_s16(input);
-    q9s16 = vld1q_s16(input + 8);
-
-    TRANSPOSE4X4(&q8s16, &q9s16);
-
-    switch (tx_type) {
-      case 0:  // idct_idct is not supported. Fall back to C
-        vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
-        return;
-        break;
-      case 1:  // iadst_idct
-        // generate constants
-        GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
-        GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
-
-        // first transform rows
-        IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16);
-
-        // transpose the matrix
-        TRANSPOSE4X4(&q8s16, &q9s16);
-
-        // then transform columns
-        IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
-        break;
-      case 2:  // idct_iadst
-        // generate constantsyy
-        GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
-        GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
-
-        // first transform rows
-        IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
-
-        // transpose the matrix
-        TRANSPOSE4X4(&q8s16, &q9s16);
-
-        // then transform columns
-        IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16);
-        break;
-      case 3:  // iadst_iadst
-        // generate constants
-        GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
-
-        // first transform rows
-        IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
-
-        // transpose the matrix
-        TRANSPOSE4X4(&q8s16, &q9s16);
-
-        // then transform columns
-        IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
-        break;
-      default:  // iadst_idct
-        assert(0);
-        break;
-    }
-
-    q8s16 = vrshrq_n_s16(q8s16, 4);
-    q9s16 = vrshrq_n_s16(q9s16, 4);
-
-    d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 0);
-    dest += dest_stride;
-    d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 1);
-    dest += dest_stride;
-    d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 0);
-    dest += dest_stride;
-    d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 1);
-
-    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32));
-    q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32));
-
-    d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-    d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-
-    vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 1);
-    dest -= dest_stride;
-    vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 0);
-    dest -= dest_stride;
-    vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 1);
-    dest -= dest_stride;
-    vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 0);
-    return;
+                             int dest_stride, int tx_type) {
+  uint8x8_t d26u8, d27u8;
+  int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16;
+  uint32x2_t d26u32, d27u32;
+  int16x8_t q3s16, q8s16, q9s16;
+  uint16x8_t q8u16, q9u16;
+
+  d26u32 = d27u32 = vdup_n_u32(0);
+
+  q8s16 = vld1q_s16(input);
+  q9s16 = vld1q_s16(input + 8);
+
+  TRANSPOSE4X4(&q8s16, &q9s16);
+
+  switch (tx_type) {
+    case 0:  // idct_idct is not supported. Fall back to C
+      vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
+      return;
+      break;
+    case 1:  // iadst_idct
+      // generate constants
+      GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
+      GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
+
+      // first transform rows
+      IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16);
+
+      // transpose the matrix
+      TRANSPOSE4X4(&q8s16, &q9s16);
+
+      // then transform columns
+      IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
+      break;
+    case 2:  // idct_iadst
+      // generate constantsyy
+      GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
+      GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
+
+      // first transform rows
+      IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
+
+      // transpose the matrix
+      TRANSPOSE4X4(&q8s16, &q9s16);
+
+      // then transform columns
+      IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16);
+      break;
+    case 3:  // iadst_iadst
+      // generate constants
+      GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
+
+      // first transform rows
+      IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
+
+      // transpose the matrix
+      TRANSPOSE4X4(&q8s16, &q9s16);
+
+      // then transform columns
+      IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
+      break;
+    default:  // iadst_idct
+      assert(0);
+      break;
+  }
+
+  q8s16 = vrshrq_n_s16(q8s16, 4);
+  q9s16 = vrshrq_n_s16(q9s16, 4);
+
+  d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 0);
+  dest += dest_stride;
+  d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 1);
+  dest += dest_stride;
+  d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 0);
+  dest += dest_stride;
+  d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 1);
+
+  q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32));
+  q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32));
+
+  d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
+  d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+
+  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 1);
+  dest -= dest_stride;
+  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 0);
+  dest -= dest_stride;
+  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 1);
+  dest -= dest_stride;
+  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 0);
+  return;
 }
diff --git a/vp10/common/arm/neon/iht8x8_add_neon.c b/vp10/common/arm/neon/iht8x8_add_neon.c
index 82d7ccc612ea9cf1c02ac35c94df9e78b2c3e2f9..7e1c83f41517b65731f4125b6c57bbe869bdf87e 100644
--- a/vp10/common/arm/neon/iht8x8_add_neon.c
+++ b/vp10/common/arm/neon/iht8x8_add_neon.c
@@ -31,594 +31,577 @@ static int16_t cospi_26_64 = 4756;
 static int16_t cospi_28_64 = 3196;
 static int16_t cospi_30_64 = 1606;
 
-static INLINE void TRANSPOSE8X8(
-        int16x8_t *q8s16,
-        int16x8_t *q9s16,
-        int16x8_t *q10s16,
-        int16x8_t *q11s16,
-        int16x8_t *q12s16,
-        int16x8_t *q13s16,
-        int16x8_t *q14s16,
-        int16x8_t *q15s16) {
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-    int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32;
-    int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16;
-
-    d16s16 = vget_low_s16(*q8s16);
-    d17s16 = vget_high_s16(*q8s16);
-    d18s16 = vget_low_s16(*q9s16);
-    d19s16 = vget_high_s16(*q9s16);
-    d20s16 = vget_low_s16(*q10s16);
-    d21s16 = vget_high_s16(*q10s16);
-    d22s16 = vget_low_s16(*q11s16);
-    d23s16 = vget_high_s16(*q11s16);
-    d24s16 = vget_low_s16(*q12s16);
-    d25s16 = vget_high_s16(*q12s16);
-    d26s16 = vget_low_s16(*q13s16);
-    d27s16 = vget_high_s16(*q13s16);
-    d28s16 = vget_low_s16(*q14s16);
-    d29s16 = vget_high_s16(*q14s16);
-    d30s16 = vget_low_s16(*q15s16);
-    d31s16 = vget_high_s16(*q15s16);
-
-    *q8s16  = vcombine_s16(d16s16, d24s16);  // vswp d17, d24
-    *q9s16  = vcombine_s16(d18s16, d26s16);  // vswp d19, d26
-    *q10s16 = vcombine_s16(d20s16, d28s16);  // vswp d21, d28
-    *q11s16 = vcombine_s16(d22s16, d30s16);  // vswp d23, d30
-    *q12s16 = vcombine_s16(d17s16, d25s16);
-    *q13s16 = vcombine_s16(d19s16, d27s16);
-    *q14s16 = vcombine_s16(d21s16, d29s16);
-    *q15s16 = vcombine_s16(d23s16, d31s16);
-
-    q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q8s16),
-                        vreinterpretq_s32_s16(*q10s16));
-    q1x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q9s16),
-                        vreinterpretq_s32_s16(*q11s16));
-    q2x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q12s16),
-                        vreinterpretq_s32_s16(*q14s16));
-    q3x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q13s16),
-                        vreinterpretq_s32_s16(*q15s16));
-
-    q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]),   // q8
-                        vreinterpretq_s16_s32(q1x2s32.val[0]));  // q9
-    q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]),   // q10
-                        vreinterpretq_s16_s32(q1x2s32.val[1]));  // q11
-    q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]),   // q12
-                        vreinterpretq_s16_s32(q3x2s32.val[0]));  // q13
-    q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]),   // q14
-                        vreinterpretq_s16_s32(q3x2s32.val[1]));  // q15
-
-    *q8s16  = q0x2s16.val[0];
-    *q9s16  = q0x2s16.val[1];
-    *q10s16 = q1x2s16.val[0];
-    *q11s16 = q1x2s16.val[1];
-    *q12s16 = q2x2s16.val[0];
-    *q13s16 = q2x2s16.val[1];
-    *q14s16 = q3x2s16.val[0];
-    *q15s16 = q3x2s16.val[1];
-    return;
+static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16,
+                                int16x8_t *q10s16, int16x8_t *q11s16,
+                                int16x8_t *q12s16, int16x8_t *q13s16,
+                                int16x8_t *q14s16, int16x8_t *q15s16) {
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
+  int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32;
+  int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16;
+
+  d16s16 = vget_low_s16(*q8s16);
+  d17s16 = vget_high_s16(*q8s16);
+  d18s16 = vget_low_s16(*q9s16);
+  d19s16 = vget_high_s16(*q9s16);
+  d20s16 = vget_low_s16(*q10s16);
+  d21s16 = vget_high_s16(*q10s16);
+  d22s16 = vget_low_s16(*q11s16);
+  d23s16 = vget_high_s16(*q11s16);
+  d24s16 = vget_low_s16(*q12s16);
+  d25s16 = vget_high_s16(*q12s16);
+  d26s16 = vget_low_s16(*q13s16);
+  d27s16 = vget_high_s16(*q13s16);
+  d28s16 = vget_low_s16(*q14s16);
+  d29s16 = vget_high_s16(*q14s16);
+  d30s16 = vget_low_s16(*q15s16);
+  d31s16 = vget_high_s16(*q15s16);
+
+  *q8s16 = vcombine_s16(d16s16, d24s16);   // vswp d17, d24
+  *q9s16 = vcombine_s16(d18s16, d26s16);   // vswp d19, d26
+  *q10s16 = vcombine_s16(d20s16, d28s16);  // vswp d21, d28
+  *q11s16 = vcombine_s16(d22s16, d30s16);  // vswp d23, d30
+  *q12s16 = vcombine_s16(d17s16, d25s16);
+  *q13s16 = vcombine_s16(d19s16, d27s16);
+  *q14s16 = vcombine_s16(d21s16, d29s16);
+  *q15s16 = vcombine_s16(d23s16, d31s16);
+
+  q0x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q8s16), vreinterpretq_s32_s16(*q10s16));
+  q1x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q9s16), vreinterpretq_s32_s16(*q11s16));
+  q2x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q12s16), vreinterpretq_s32_s16(*q14s16));
+  q3x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q13s16), vreinterpretq_s32_s16(*q15s16));
+
+  q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]),   // q8
+                      vreinterpretq_s16_s32(q1x2s32.val[0]));  // q9
+  q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]),   // q10
+                      vreinterpretq_s16_s32(q1x2s32.val[1]));  // q11
+  q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]),   // q12
+                      vreinterpretq_s16_s32(q3x2s32.val[0]));  // q13
+  q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]),   // q14
+                      vreinterpretq_s16_s32(q3x2s32.val[1]));  // q15
+
+  *q8s16 = q0x2s16.val[0];
+  *q9s16 = q0x2s16.val[1];
+  *q10s16 = q1x2s16.val[0];
+  *q11s16 = q1x2s16.val[1];
+  *q12s16 = q2x2s16.val[0];
+  *q13s16 = q2x2s16.val[1];
+  *q14s16 = q3x2s16.val[0];
+  *q15s16 = q3x2s16.val[1];
+  return;
 }
 
-static INLINE void IDCT8x8_1D(
-        int16x8_t *q8s16,
-        int16x8_t *q9s16,
-        int16x8_t *q10s16,
-        int16x8_t *q11s16,
-        int16x8_t *q12s16,
-        int16x8_t *q13s16,
-        int16x8_t *q14s16,
-        int16x8_t *q15s16) {
-    int16x4_t d0s16, d1s16, d2s16, d3s16;
-    int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-    int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
-    int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32;
-    int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32;
-
-    d0s16 = vdup_n_s16(cospi_28_64);
-    d1s16 = vdup_n_s16(cospi_4_64);
-    d2s16 = vdup_n_s16(cospi_12_64);
-    d3s16 = vdup_n_s16(cospi_20_64);
-
-    d16s16 = vget_low_s16(*q8s16);
-    d17s16 = vget_high_s16(*q8s16);
-    d18s16 = vget_low_s16(*q9s16);
-    d19s16 = vget_high_s16(*q9s16);
-    d20s16 = vget_low_s16(*q10s16);
-    d21s16 = vget_high_s16(*q10s16);
-    d22s16 = vget_low_s16(*q11s16);
-    d23s16 = vget_high_s16(*q11s16);
-    d24s16 = vget_low_s16(*q12s16);
-    d25s16 = vget_high_s16(*q12s16);
-    d26s16 = vget_low_s16(*q13s16);
-    d27s16 = vget_high_s16(*q13s16);
-    d28s16 = vget_low_s16(*q14s16);
-    d29s16 = vget_high_s16(*q14s16);
-    d30s16 = vget_low_s16(*q15s16);
-    d31s16 = vget_high_s16(*q15s16);
-
-    q2s32 = vmull_s16(d18s16, d0s16);
-    q3s32 = vmull_s16(d19s16, d0s16);
-    q5s32 = vmull_s16(d26s16, d2s16);
-    q6s32 = vmull_s16(d27s16, d2s16);
-
-    q2s32 = vmlsl_s16(q2s32, d30s16, d1s16);
-    q3s32 = vmlsl_s16(q3s32, d31s16, d1s16);
-    q5s32 = vmlsl_s16(q5s32, d22s16, d3s16);
-    q6s32 = vmlsl_s16(q6s32, d23s16, d3s16);
-
-    d8s16  = vqrshrn_n_s32(q2s32, 14);
-    d9s16  = vqrshrn_n_s32(q3s32, 14);
-    d10s16 = vqrshrn_n_s32(q5s32, 14);
-    d11s16 = vqrshrn_n_s32(q6s32, 14);
-    q4s16 = vcombine_s16(d8s16, d9s16);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-
-    q2s32 = vmull_s16(d18s16, d1s16);
-    q3s32 = vmull_s16(d19s16, d1s16);
-    q9s32 = vmull_s16(d26s16, d3s16);
-    q13s32 = vmull_s16(d27s16, d3s16);
-
-    q2s32 = vmlal_s16(q2s32, d30s16, d0s16);
-    q3s32 = vmlal_s16(q3s32, d31s16, d0s16);
-    q9s32 = vmlal_s16(q9s32, d22s16, d2s16);
-    q13s32 = vmlal_s16(q13s32, d23s16, d2s16);
-
-    d14s16 = vqrshrn_n_s32(q2s32, 14);
-    d15s16 = vqrshrn_n_s32(q3s32, 14);
-    d12s16 = vqrshrn_n_s32(q9s32, 14);
-    d13s16 = vqrshrn_n_s32(q13s32, 14);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-    q7s16 = vcombine_s16(d14s16, d15s16);
-
-    d0s16 = vdup_n_s16(cospi_16_64);
-
-    q2s32 = vmull_s16(d16s16, d0s16);
-    q3s32 = vmull_s16(d17s16, d0s16);
-    q13s32 = vmull_s16(d16s16, d0s16);
-    q15s32 = vmull_s16(d17s16, d0s16);
-
-    q2s32 = vmlal_s16(q2s32, d24s16, d0s16);
-    q3s32 = vmlal_s16(q3s32, d25s16, d0s16);
-    q13s32 = vmlsl_s16(q13s32, d24s16, d0s16);
-    q15s32 = vmlsl_s16(q15s32, d25s16, d0s16);
-
-    d0s16 = vdup_n_s16(cospi_24_64);
-    d1s16 = vdup_n_s16(cospi_8_64);
-
-    d18s16 = vqrshrn_n_s32(q2s32, 14);
-    d19s16 = vqrshrn_n_s32(q3s32, 14);
-    d22s16 = vqrshrn_n_s32(q13s32, 14);
-    d23s16 = vqrshrn_n_s32(q15s32, 14);
-    *q9s16  = vcombine_s16(d18s16, d19s16);
-    *q11s16 = vcombine_s16(d22s16, d23s16);
-
-    q2s32 = vmull_s16(d20s16, d0s16);
-    q3s32 = vmull_s16(d21s16, d0s16);
-    q8s32 = vmull_s16(d20s16, d1s16);
-    q12s32 = vmull_s16(d21s16, d1s16);
-
-    q2s32 = vmlsl_s16(q2s32, d28s16, d1s16);
-    q3s32 = vmlsl_s16(q3s32, d29s16, d1s16);
-    q8s32 = vmlal_s16(q8s32, d28s16, d0s16);
-    q12s32 = vmlal_s16(q12s32, d29s16, d0s16);
-
-    d26s16 = vqrshrn_n_s32(q2s32, 14);
-    d27s16 = vqrshrn_n_s32(q3s32, 14);
-    d30s16 = vqrshrn_n_s32(q8s32, 14);
-    d31s16 = vqrshrn_n_s32(q12s32, 14);
-    *q13s16 = vcombine_s16(d26s16, d27s16);
-    *q15s16 = vcombine_s16(d30s16, d31s16);
-
-    q0s16 = vaddq_s16(*q9s16, *q15s16);
-    q1s16 = vaddq_s16(*q11s16, *q13s16);
-    q2s16 = vsubq_s16(*q11s16, *q13s16);
-    q3s16 = vsubq_s16(*q9s16, *q15s16);
-
-    *q13s16 = vsubq_s16(q4s16, q5s16);
-    q4s16   = vaddq_s16(q4s16, q5s16);
-    *q14s16 = vsubq_s16(q7s16, q6s16);
-    q7s16   = vaddq_s16(q7s16, q6s16);
-    d26s16 = vget_low_s16(*q13s16);
-    d27s16 = vget_high_s16(*q13s16);
-    d28s16 = vget_low_s16(*q14s16);
-    d29s16 = vget_high_s16(*q14s16);
-
-    d16s16 = vdup_n_s16(cospi_16_64);
-
-    q9s32  = vmull_s16(d28s16, d16s16);
-    q10s32 = vmull_s16(d29s16, d16s16);
-    q11s32 = vmull_s16(d28s16, d16s16);
-    q12s32 = vmull_s16(d29s16, d16s16);
-
-    q9s32  = vmlsl_s16(q9s32,  d26s16, d16s16);
-    q10s32 = vmlsl_s16(q10s32, d27s16, d16s16);
-    q11s32 = vmlal_s16(q11s32, d26s16, d16s16);
-    q12s32 = vmlal_s16(q12s32, d27s16, d16s16);
-
-    d10s16 = vqrshrn_n_s32(q9s32, 14);
-    d11s16 = vqrshrn_n_s32(q10s32, 14);
-    d12s16 = vqrshrn_n_s32(q11s32, 14);
-    d13s16 = vqrshrn_n_s32(q12s32, 14);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    *q8s16  = vaddq_s16(q0s16, q7s16);
-    *q9s16  = vaddq_s16(q1s16, q6s16);
-    *q10s16 = vaddq_s16(q2s16, q5s16);
-    *q11s16 = vaddq_s16(q3s16, q4s16);
-    *q12s16 = vsubq_s16(q3s16, q4s16);
-    *q13s16 = vsubq_s16(q2s16, q5s16);
-    *q14s16 = vsubq_s16(q1s16, q6s16);
-    *q15s16 = vsubq_s16(q0s16, q7s16);
-    return;
+static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
+                              int16x8_t *q10s16, int16x8_t *q11s16,
+                              int16x8_t *q12s16, int16x8_t *q13s16,
+                              int16x8_t *q14s16, int16x8_t *q15s16) {
+  int16x4_t d0s16, d1s16, d2s16, d3s16;
+  int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
+  int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
+  int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32;
+  int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32;
+
+  d0s16 = vdup_n_s16(cospi_28_64);
+  d1s16 = vdup_n_s16(cospi_4_64);
+  d2s16 = vdup_n_s16(cospi_12_64);
+  d3s16 = vdup_n_s16(cospi_20_64);
+
+  d16s16 = vget_low_s16(*q8s16);
+  d17s16 = vget_high_s16(*q8s16);
+  d18s16 = vget_low_s16(*q9s16);
+  d19s16 = vget_high_s16(*q9s16);
+  d20s16 = vget_low_s16(*q10s16);
+  d21s16 = vget_high_s16(*q10s16);
+  d22s16 = vget_low_s16(*q11s16);
+  d23s16 = vget_high_s16(*q11s16);
+  d24s16 = vget_low_s16(*q12s16);
+  d25s16 = vget_high_s16(*q12s16);
+  d26s16 = vget_low_s16(*q13s16);
+  d27s16 = vget_high_s16(*q13s16);
+  d28s16 = vget_low_s16(*q14s16);
+  d29s16 = vget_high_s16(*q14s16);
+  d30s16 = vget_low_s16(*q15s16);
+  d31s16 = vget_high_s16(*q15s16);
+
+  q2s32 = vmull_s16(d18s16, d0s16);
+  q3s32 = vmull_s16(d19s16, d0s16);
+  q5s32 = vmull_s16(d26s16, d2s16);
+  q6s32 = vmull_s16(d27s16, d2s16);
+
+  q2s32 = vmlsl_s16(q2s32, d30s16, d1s16);
+  q3s32 = vmlsl_s16(q3s32, d31s16, d1s16);
+  q5s32 = vmlsl_s16(q5s32, d22s16, d3s16);
+  q6s32 = vmlsl_s16(q6s32, d23s16, d3s16);
+
+  d8s16 = vqrshrn_n_s32(q2s32, 14);
+  d9s16 = vqrshrn_n_s32(q3s32, 14);
+  d10s16 = vqrshrn_n_s32(q5s32, 14);
+  d11s16 = vqrshrn_n_s32(q6s32, 14);
+  q4s16 = vcombine_s16(d8s16, d9s16);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+
+  q2s32 = vmull_s16(d18s16, d1s16);
+  q3s32 = vmull_s16(d19s16, d1s16);
+  q9s32 = vmull_s16(d26s16, d3s16);
+  q13s32 = vmull_s16(d27s16, d3s16);
+
+  q2s32 = vmlal_s16(q2s32, d30s16, d0s16);
+  q3s32 = vmlal_s16(q3s32, d31s16, d0s16);
+  q9s32 = vmlal_s16(q9s32, d22s16, d2s16);
+  q13s32 = vmlal_s16(q13s32, d23s16, d2s16);
+
+  d14s16 = vqrshrn_n_s32(q2s32, 14);
+  d15s16 = vqrshrn_n_s32(q3s32, 14);
+  d12s16 = vqrshrn_n_s32(q9s32, 14);
+  d13s16 = vqrshrn_n_s32(q13s32, 14);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+  q7s16 = vcombine_s16(d14s16, d15s16);
+
+  d0s16 = vdup_n_s16(cospi_16_64);
+
+  q2s32 = vmull_s16(d16s16, d0s16);
+  q3s32 = vmull_s16(d17s16, d0s16);
+  q13s32 = vmull_s16(d16s16, d0s16);
+  q15s32 = vmull_s16(d17s16, d0s16);
+
+  q2s32 = vmlal_s16(q2s32, d24s16, d0s16);
+  q3s32 = vmlal_s16(q3s32, d25s16, d0s16);
+  q13s32 = vmlsl_s16(q13s32, d24s16, d0s16);
+  q15s32 = vmlsl_s16(q15s32, d25s16, d0s16);
+
+  d0s16 = vdup_n_s16(cospi_24_64);
+  d1s16 = vdup_n_s16(cospi_8_64);
+
+  d18s16 = vqrshrn_n_s32(q2s32, 14);
+  d19s16 = vqrshrn_n_s32(q3s32, 14);
+  d22s16 = vqrshrn_n_s32(q13s32, 14);
+  d23s16 = vqrshrn_n_s32(q15s32, 14);
+  *q9s16 = vcombine_s16(d18s16, d19s16);
+  *q11s16 = vcombine_s16(d22s16, d23s16);
+
+  q2s32 = vmull_s16(d20s16, d0s16);
+  q3s32 = vmull_s16(d21s16, d0s16);
+  q8s32 = vmull_s16(d20s16, d1s16);
+  q12s32 = vmull_s16(d21s16, d1s16);
+
+  q2s32 = vmlsl_s16(q2s32, d28s16, d1s16);
+  q3s32 = vmlsl_s16(q3s32, d29s16, d1s16);
+  q8s32 = vmlal_s16(q8s32, d28s16, d0s16);
+  q12s32 = vmlal_s16(q12s32, d29s16, d0s16);
+
+  d26s16 = vqrshrn_n_s32(q2s32, 14);
+  d27s16 = vqrshrn_n_s32(q3s32, 14);
+  d30s16 = vqrshrn_n_s32(q8s32, 14);
+  d31s16 = vqrshrn_n_s32(q12s32, 14);
+  *q13s16 = vcombine_s16(d26s16, d27s16);
+  *q15s16 = vcombine_s16(d30s16, d31s16);
+
+  q0s16 = vaddq_s16(*q9s16, *q15s16);
+  q1s16 = vaddq_s16(*q11s16, *q13s16);
+  q2s16 = vsubq_s16(*q11s16, *q13s16);
+  q3s16 = vsubq_s16(*q9s16, *q15s16);
+
+  *q13s16 = vsubq_s16(q4s16, q5s16);
+  q4s16 = vaddq_s16(q4s16, q5s16);
+  *q14s16 = vsubq_s16(q7s16, q6s16);
+  q7s16 = vaddq_s16(q7s16, q6s16);
+  d26s16 = vget_low_s16(*q13s16);
+  d27s16 = vget_high_s16(*q13s16);
+  d28s16 = vget_low_s16(*q14s16);
+  d29s16 = vget_high_s16(*q14s16);
+
+  d16s16 = vdup_n_s16(cospi_16_64);
+
+  q9s32 = vmull_s16(d28s16, d16s16);
+  q10s32 = vmull_s16(d29s16, d16s16);
+  q11s32 = vmull_s16(d28s16, d16s16);
+  q12s32 = vmull_s16(d29s16, d16s16);
+
+  q9s32 = vmlsl_s16(q9s32, d26s16, d16s16);
+  q10s32 = vmlsl_s16(q10s32, d27s16, d16s16);
+  q11s32 = vmlal_s16(q11s32, d26s16, d16s16);
+  q12s32 = vmlal_s16(q12s32, d27s16, d16s16);
+
+  d10s16 = vqrshrn_n_s32(q9s32, 14);
+  d11s16 = vqrshrn_n_s32(q10s32, 14);
+  d12s16 = vqrshrn_n_s32(q11s32, 14);
+  d13s16 = vqrshrn_n_s32(q12s32, 14);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  *q8s16 = vaddq_s16(q0s16, q7s16);
+  *q9s16 = vaddq_s16(q1s16, q6s16);
+  *q10s16 = vaddq_s16(q2s16, q5s16);
+  *q11s16 = vaddq_s16(q3s16, q4s16);
+  *q12s16 = vsubq_s16(q3s16, q4s16);
+  *q13s16 = vsubq_s16(q2s16, q5s16);
+  *q14s16 = vsubq_s16(q1s16, q6s16);
+  *q15s16 = vsubq_s16(q0s16, q7s16);
+  return;
 }
 
-static INLINE void IADST8X8_1D(
-        int16x8_t *q8s16,
-        int16x8_t *q9s16,
-        int16x8_t *q10s16,
-        int16x8_t *q11s16,
-        int16x8_t *q12s16,
-        int16x8_t *q13s16,
-        int16x8_t *q14s16,
-        int16x8_t *q15s16) {
-    int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
-    int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-    int16x8_t q2s16, q4s16, q5s16, q6s16;
-    int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q7s32, q8s32;
-    int32x4_t q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32;
-
-    d16s16 = vget_low_s16(*q8s16);
-    d17s16 = vget_high_s16(*q8s16);
-    d18s16 = vget_low_s16(*q9s16);
-    d19s16 = vget_high_s16(*q9s16);
-    d20s16 = vget_low_s16(*q10s16);
-    d21s16 = vget_high_s16(*q10s16);
-    d22s16 = vget_low_s16(*q11s16);
-    d23s16 = vget_high_s16(*q11s16);
-    d24s16 = vget_low_s16(*q12s16);
-    d25s16 = vget_high_s16(*q12s16);
-    d26s16 = vget_low_s16(*q13s16);
-    d27s16 = vget_high_s16(*q13s16);
-    d28s16 = vget_low_s16(*q14s16);
-    d29s16 = vget_high_s16(*q14s16);
-    d30s16 = vget_low_s16(*q15s16);
-    d31s16 = vget_high_s16(*q15s16);
-
-    d14s16 = vdup_n_s16(cospi_2_64);
-    d15s16 = vdup_n_s16(cospi_30_64);
-
-    q1s32 = vmull_s16(d30s16, d14s16);
-    q2s32 = vmull_s16(d31s16, d14s16);
-    q3s32 = vmull_s16(d30s16, d15s16);
-    q4s32 = vmull_s16(d31s16, d15s16);
-
-    d30s16 = vdup_n_s16(cospi_18_64);
-    d31s16 = vdup_n_s16(cospi_14_64);
-
-    q1s32 = vmlal_s16(q1s32, d16s16, d15s16);
-    q2s32 = vmlal_s16(q2s32, d17s16, d15s16);
-    q3s32 = vmlsl_s16(q3s32, d16s16, d14s16);
-    q4s32 = vmlsl_s16(q4s32, d17s16, d14s16);
-
-    q5s32 = vmull_s16(d22s16, d30s16);
-    q6s32 = vmull_s16(d23s16, d30s16);
-    q7s32 = vmull_s16(d22s16, d31s16);
-    q8s32 = vmull_s16(d23s16, d31s16);
-
-    q5s32 = vmlal_s16(q5s32, d24s16, d31s16);
-    q6s32 = vmlal_s16(q6s32, d25s16, d31s16);
-    q7s32 = vmlsl_s16(q7s32, d24s16, d30s16);
-    q8s32 = vmlsl_s16(q8s32, d25s16, d30s16);
-
-    q11s32 = vaddq_s32(q1s32, q5s32);
-    q12s32 = vaddq_s32(q2s32, q6s32);
-    q1s32 = vsubq_s32(q1s32, q5s32);
-    q2s32 = vsubq_s32(q2s32, q6s32);
-
-    d22s16 = vqrshrn_n_s32(q11s32, 14);
-    d23s16 = vqrshrn_n_s32(q12s32, 14);
-    *q11s16 = vcombine_s16(d22s16, d23s16);
-
-    q12s32 = vaddq_s32(q3s32, q7s32);
-    q15s32 = vaddq_s32(q4s32, q8s32);
-    q3s32 = vsubq_s32(q3s32, q7s32);
-    q4s32 = vsubq_s32(q4s32, q8s32);
-
-    d2s16  = vqrshrn_n_s32(q1s32, 14);
-    d3s16  = vqrshrn_n_s32(q2s32, 14);
-    d24s16 = vqrshrn_n_s32(q12s32, 14);
-    d25s16 = vqrshrn_n_s32(q15s32, 14);
-    d6s16  = vqrshrn_n_s32(q3s32, 14);
-    d7s16  = vqrshrn_n_s32(q4s32, 14);
-    *q12s16 = vcombine_s16(d24s16, d25s16);
-
-    d0s16 = vdup_n_s16(cospi_10_64);
-    d1s16 = vdup_n_s16(cospi_22_64);
-    q4s32 = vmull_s16(d26s16, d0s16);
-    q5s32 = vmull_s16(d27s16, d0s16);
-    q2s32 = vmull_s16(d26s16, d1s16);
-    q6s32 = vmull_s16(d27s16, d1s16);
-
-    d30s16 = vdup_n_s16(cospi_26_64);
-    d31s16 = vdup_n_s16(cospi_6_64);
-
-    q4s32 = vmlal_s16(q4s32, d20s16, d1s16);
-    q5s32 = vmlal_s16(q5s32, d21s16, d1s16);
-    q2s32 = vmlsl_s16(q2s32, d20s16, d0s16);
-    q6s32 = vmlsl_s16(q6s32, d21s16, d0s16);
-
-    q0s32 = vmull_s16(d18s16, d30s16);
-    q13s32 = vmull_s16(d19s16, d30s16);
-
-    q0s32 = vmlal_s16(q0s32, d28s16, d31s16);
-    q13s32 = vmlal_s16(q13s32, d29s16, d31s16);
-
-    q10s32 = vmull_s16(d18s16, d31s16);
-    q9s32 = vmull_s16(d19s16, d31s16);
-
-    q10s32 = vmlsl_s16(q10s32, d28s16, d30s16);
-    q9s32 = vmlsl_s16(q9s32, d29s16, d30s16);
-
-    q14s32 = vaddq_s32(q2s32, q10s32);
-    q15s32 = vaddq_s32(q6s32, q9s32);
-    q2s32 = vsubq_s32(q2s32, q10s32);
-    q6s32 = vsubq_s32(q6s32, q9s32);
-
-    d28s16 = vqrshrn_n_s32(q14s32, 14);
-    d29s16 = vqrshrn_n_s32(q15s32, 14);
-    d4s16 = vqrshrn_n_s32(q2s32, 14);
-    d5s16 = vqrshrn_n_s32(q6s32, 14);
-    *q14s16 = vcombine_s16(d28s16, d29s16);
-
-    q9s32 = vaddq_s32(q4s32, q0s32);
-    q10s32 = vaddq_s32(q5s32, q13s32);
-    q4s32 = vsubq_s32(q4s32, q0s32);
-    q5s32 = vsubq_s32(q5s32, q13s32);
-
-    d30s16 = vdup_n_s16(cospi_8_64);
-    d31s16 = vdup_n_s16(cospi_24_64);
-
-    d18s16 = vqrshrn_n_s32(q9s32, 14);
-    d19s16 = vqrshrn_n_s32(q10s32, 14);
-    d8s16 = vqrshrn_n_s32(q4s32, 14);
-    d9s16 = vqrshrn_n_s32(q5s32, 14);
-    *q9s16 = vcombine_s16(d18s16, d19s16);
-
-    q5s32 = vmull_s16(d2s16, d30s16);
-    q6s32 = vmull_s16(d3s16, d30s16);
-    q7s32 = vmull_s16(d2s16, d31s16);
-    q0s32 = vmull_s16(d3s16, d31s16);
-
-    q5s32 = vmlal_s16(q5s32, d6s16, d31s16);
-    q6s32 = vmlal_s16(q6s32, d7s16, d31s16);
-    q7s32 = vmlsl_s16(q7s32, d6s16, d30s16);
-    q0s32 = vmlsl_s16(q0s32, d7s16, d30s16);
-
-    q1s32 = vmull_s16(d4s16, d30s16);
-    q3s32 = vmull_s16(d5s16, d30s16);
-    q10s32 = vmull_s16(d4s16, d31s16);
-    q2s32 = vmull_s16(d5s16, d31s16);
-
-    q1s32 = vmlsl_s16(q1s32, d8s16, d31s16);
-    q3s32 = vmlsl_s16(q3s32, d9s16, d31s16);
-    q10s32 = vmlal_s16(q10s32, d8s16, d30s16);
-    q2s32 = vmlal_s16(q2s32, d9s16, d30s16);
-
-    *q8s16 = vaddq_s16(*q11s16, *q9s16);
-    *q11s16 = vsubq_s16(*q11s16, *q9s16);
-    q4s16 = vaddq_s16(*q12s16, *q14s16);
-    *q12s16 = vsubq_s16(*q12s16, *q14s16);
-
-    q14s32 = vaddq_s32(q5s32, q1s32);
-    q15s32 = vaddq_s32(q6s32, q3s32);
-    q5s32 = vsubq_s32(q5s32, q1s32);
-    q6s32 = vsubq_s32(q6s32, q3s32);
-
-    d18s16 = vqrshrn_n_s32(q14s32, 14);
-    d19s16 = vqrshrn_n_s32(q15s32, 14);
-    d10s16 = vqrshrn_n_s32(q5s32, 14);
-    d11s16 = vqrshrn_n_s32(q6s32, 14);
-    *q9s16 = vcombine_s16(d18s16, d19s16);
-
-    q1s32 = vaddq_s32(q7s32, q10s32);
-    q3s32 = vaddq_s32(q0s32, q2s32);
-    q7s32 = vsubq_s32(q7s32, q10s32);
-    q0s32 = vsubq_s32(q0s32, q2s32);
-
-    d28s16 = vqrshrn_n_s32(q1s32, 14);
-    d29s16 = vqrshrn_n_s32(q3s32, 14);
-    d14s16 = vqrshrn_n_s32(q7s32, 14);
-    d15s16 = vqrshrn_n_s32(q0s32, 14);
-    *q14s16 = vcombine_s16(d28s16, d29s16);
-
-    d30s16 = vdup_n_s16(cospi_16_64);
-
-    d22s16 = vget_low_s16(*q11s16);
-    d23s16 = vget_high_s16(*q11s16);
-    q2s32 = vmull_s16(d22s16, d30s16);
-    q3s32 = vmull_s16(d23s16, d30s16);
-    q13s32 = vmull_s16(d22s16, d30s16);
-    q1s32 = vmull_s16(d23s16, d30s16);
-
-    d24s16 = vget_low_s16(*q12s16);
-    d25s16 = vget_high_s16(*q12s16);
-    q2s32 = vmlal_s16(q2s32, d24s16, d30s16);
-    q3s32 = vmlal_s16(q3s32, d25s16, d30s16);
-    q13s32 = vmlsl_s16(q13s32, d24s16, d30s16);
-    q1s32 = vmlsl_s16(q1s32, d25s16, d30s16);
-
-    d4s16 = vqrshrn_n_s32(q2s32, 14);
-    d5s16 = vqrshrn_n_s32(q3s32, 14);
-    d24s16 = vqrshrn_n_s32(q13s32, 14);
-    d25s16 = vqrshrn_n_s32(q1s32, 14);
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    *q12s16 = vcombine_s16(d24s16, d25s16);
-
-    q13s32 = vmull_s16(d10s16, d30s16);
-    q1s32 = vmull_s16(d11s16, d30s16);
-    q11s32 = vmull_s16(d10s16, d30s16);
-    q0s32 = vmull_s16(d11s16, d30s16);
-
-    q13s32 = vmlal_s16(q13s32, d14s16, d30s16);
-    q1s32 = vmlal_s16(q1s32, d15s16, d30s16);
-    q11s32 = vmlsl_s16(q11s32, d14s16, d30s16);
-    q0s32 = vmlsl_s16(q0s32, d15s16, d30s16);
-
-    d20s16 = vqrshrn_n_s32(q13s32, 14);
-    d21s16 = vqrshrn_n_s32(q1s32, 14);
-    d12s16 = vqrshrn_n_s32(q11s32, 14);
-    d13s16 = vqrshrn_n_s32(q0s32, 14);
-    *q10s16 = vcombine_s16(d20s16, d21s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    q5s16 = vdupq_n_s16(0);
-
-    *q9s16  = vsubq_s16(q5s16, *q9s16);
-    *q11s16 = vsubq_s16(q5s16, q2s16);
-    *q13s16 = vsubq_s16(q5s16, q6s16);
-    *q15s16 = vsubq_s16(q5s16, q4s16);
-    return;
+static INLINE void IADST8X8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
+                               int16x8_t *q10s16, int16x8_t *q11s16,
+                               int16x8_t *q12s16, int16x8_t *q13s16,
+                               int16x8_t *q14s16, int16x8_t *q15s16) {
+  int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
+  int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
+  int16x8_t q2s16, q4s16, q5s16, q6s16;
+  int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q7s32, q8s32;
+  int32x4_t q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32;
+
+  d16s16 = vget_low_s16(*q8s16);
+  d17s16 = vget_high_s16(*q8s16);
+  d18s16 = vget_low_s16(*q9s16);
+  d19s16 = vget_high_s16(*q9s16);
+  d20s16 = vget_low_s16(*q10s16);
+  d21s16 = vget_high_s16(*q10s16);
+  d22s16 = vget_low_s16(*q11s16);
+  d23s16 = vget_high_s16(*q11s16);
+  d24s16 = vget_low_s16(*q12s16);
+  d25s16 = vget_high_s16(*q12s16);
+  d26s16 = vget_low_s16(*q13s16);
+  d27s16 = vget_high_s16(*q13s16);
+  d28s16 = vget_low_s16(*q14s16);
+  d29s16 = vget_high_s16(*q14s16);
+  d30s16 = vget_low_s16(*q15s16);
+  d31s16 = vget_high_s16(*q15s16);
+
+  d14s16 = vdup_n_s16(cospi_2_64);
+  d15s16 = vdup_n_s16(cospi_30_64);
+
+  q1s32 = vmull_s16(d30s16, d14s16);
+  q2s32 = vmull_s16(d31s16, d14s16);
+  q3s32 = vmull_s16(d30s16, d15s16);
+  q4s32 = vmull_s16(d31s16, d15s16);
+
+  d30s16 = vdup_n_s16(cospi_18_64);
+  d31s16 = vdup_n_s16(cospi_14_64);
+
+  q1s32 = vmlal_s16(q1s32, d16s16, d15s16);
+  q2s32 = vmlal_s16(q2s32, d17s16, d15s16);
+  q3s32 = vmlsl_s16(q3s32, d16s16, d14s16);
+  q4s32 = vmlsl_s16(q4s32, d17s16, d14s16);
+
+  q5s32 = vmull_s16(d22s16, d30s16);
+  q6s32 = vmull_s16(d23s16, d30s16);
+  q7s32 = vmull_s16(d22s16, d31s16);
+  q8s32 = vmull_s16(d23s16, d31s16);
+
+  q5s32 = vmlal_s16(q5s32, d24s16, d31s16);
+  q6s32 = vmlal_s16(q6s32, d25s16, d31s16);
+  q7s32 = vmlsl_s16(q7s32, d24s16, d30s16);
+  q8s32 = vmlsl_s16(q8s32, d25s16, d30s16);
+
+  q11s32 = vaddq_s32(q1s32, q5s32);
+  q12s32 = vaddq_s32(q2s32, q6s32);
+  q1s32 = vsubq_s32(q1s32, q5s32);
+  q2s32 = vsubq_s32(q2s32, q6s32);
+
+  d22s16 = vqrshrn_n_s32(q11s32, 14);
+  d23s16 = vqrshrn_n_s32(q12s32, 14);
+  *q11s16 = vcombine_s16(d22s16, d23s16);
+
+  q12s32 = vaddq_s32(q3s32, q7s32);
+  q15s32 = vaddq_s32(q4s32, q8s32);
+  q3s32 = vsubq_s32(q3s32, q7s32);
+  q4s32 = vsubq_s32(q4s32, q8s32);
+
+  d2s16 = vqrshrn_n_s32(q1s32, 14);
+  d3s16 = vqrshrn_n_s32(q2s32, 14);
+  d24s16 = vqrshrn_n_s32(q12s32, 14);
+  d25s16 = vqrshrn_n_s32(q15s32, 14);
+  d6s16 = vqrshrn_n_s32(q3s32, 14);
+  d7s16 = vqrshrn_n_s32(q4s32, 14);
+  *q12s16 = vcombine_s16(d24s16, d25s16);
+
+  d0s16 = vdup_n_s16(cospi_10_64);
+  d1s16 = vdup_n_s16(cospi_22_64);
+  q4s32 = vmull_s16(d26s16, d0s16);
+  q5s32 = vmull_s16(d27s16, d0s16);
+  q2s32 = vmull_s16(d26s16, d1s16);
+  q6s32 = vmull_s16(d27s16, d1s16);
+
+  d30s16 = vdup_n_s16(cospi_26_64);
+  d31s16 = vdup_n_s16(cospi_6_64);
+
+  q4s32 = vmlal_s16(q4s32, d20s16, d1s16);
+  q5s32 = vmlal_s16(q5s32, d21s16, d1s16);
+  q2s32 = vmlsl_s16(q2s32, d20s16, d0s16);
+  q6s32 = vmlsl_s16(q6s32, d21s16, d0s16);
+
+  q0s32 = vmull_s16(d18s16, d30s16);
+  q13s32 = vmull_s16(d19s16, d30s16);
+
+  q0s32 = vmlal_s16(q0s32, d28s16, d31s16);
+  q13s32 = vmlal_s16(q13s32, d29s16, d31s16);
+
+  q10s32 = vmull_s16(d18s16, d31s16);
+  q9s32 = vmull_s16(d19s16, d31s16);
+
+  q10s32 = vmlsl_s16(q10s32, d28s16, d30s16);
+  q9s32 = vmlsl_s16(q9s32, d29s16, d30s16);
+
+  q14s32 = vaddq_s32(q2s32, q10s32);
+  q15s32 = vaddq_s32(q6s32, q9s32);
+  q2s32 = vsubq_s32(q2s32, q10s32);
+  q6s32 = vsubq_s32(q6s32, q9s32);
+
+  d28s16 = vqrshrn_n_s32(q14s32, 14);
+  d29s16 = vqrshrn_n_s32(q15s32, 14);
+  d4s16 = vqrshrn_n_s32(q2s32, 14);
+  d5s16 = vqrshrn_n_s32(q6s32, 14);
+  *q14s16 = vcombine_s16(d28s16, d29s16);
+
+  q9s32 = vaddq_s32(q4s32, q0s32);
+  q10s32 = vaddq_s32(q5s32, q13s32);
+  q4s32 = vsubq_s32(q4s32, q0s32);
+  q5s32 = vsubq_s32(q5s32, q13s32);
+
+  d30s16 = vdup_n_s16(cospi_8_64);
+  d31s16 = vdup_n_s16(cospi_24_64);
+
+  d18s16 = vqrshrn_n_s32(q9s32, 14);
+  d19s16 = vqrshrn_n_s32(q10s32, 14);
+  d8s16 = vqrshrn_n_s32(q4s32, 14);
+  d9s16 = vqrshrn_n_s32(q5s32, 14);
+  *q9s16 = vcombine_s16(d18s16, d19s16);
+
+  q5s32 = vmull_s16(d2s16, d30s16);
+  q6s32 = vmull_s16(d3s16, d30s16);
+  q7s32 = vmull_s16(d2s16, d31s16);
+  q0s32 = vmull_s16(d3s16, d31s16);
+
+  q5s32 = vmlal_s16(q5s32, d6s16, d31s16);
+  q6s32 = vmlal_s16(q6s32, d7s16, d31s16);
+  q7s32 = vmlsl_s16(q7s32, d6s16, d30s16);
+  q0s32 = vmlsl_s16(q0s32, d7s16, d30s16);
+
+  q1s32 = vmull_s16(d4s16, d30s16);
+  q3s32 = vmull_s16(d5s16, d30s16);
+  q10s32 = vmull_s16(d4s16, d31s16);
+  q2s32 = vmull_s16(d5s16, d31s16);
+
+  q1s32 = vmlsl_s16(q1s32, d8s16, d31s16);
+  q3s32 = vmlsl_s16(q3s32, d9s16, d31s16);
+  q10s32 = vmlal_s16(q10s32, d8s16, d30s16);
+  q2s32 = vmlal_s16(q2s32, d9s16, d30s16);
+
+  *q8s16 = vaddq_s16(*q11s16, *q9s16);
+  *q11s16 = vsubq_s16(*q11s16, *q9s16);
+  q4s16 = vaddq_s16(*q12s16, *q14s16);
+  *q12s16 = vsubq_s16(*q12s16, *q14s16);
+
+  q14s32 = vaddq_s32(q5s32, q1s32);
+  q15s32 = vaddq_s32(q6s32, q3s32);
+  q5s32 = vsubq_s32(q5s32, q1s32);
+  q6s32 = vsubq_s32(q6s32, q3s32);
+
+  d18s16 = vqrshrn_n_s32(q14s32, 14);
+  d19s16 = vqrshrn_n_s32(q15s32, 14);
+  d10s16 = vqrshrn_n_s32(q5s32, 14);
+  d11s16 = vqrshrn_n_s32(q6s32, 14);
+  *q9s16 = vcombine_s16(d18s16, d19s16);
+
+  q1s32 = vaddq_s32(q7s32, q10s32);
+  q3s32 = vaddq_s32(q0s32, q2s32);
+  q7s32 = vsubq_s32(q7s32, q10s32);
+  q0s32 = vsubq_s32(q0s32, q2s32);
+
+  d28s16 = vqrshrn_n_s32(q1s32, 14);
+  d29s16 = vqrshrn_n_s32(q3s32, 14);
+  d14s16 = vqrshrn_n_s32(q7s32, 14);
+  d15s16 = vqrshrn_n_s32(q0s32, 14);
+  *q14s16 = vcombine_s16(d28s16, d29s16);
+
+  d30s16 = vdup_n_s16(cospi_16_64);
+
+  d22s16 = vget_low_s16(*q11s16);
+  d23s16 = vget_high_s16(*q11s16);
+  q2s32 = vmull_s16(d22s16, d30s16);
+  q3s32 = vmull_s16(d23s16, d30s16);
+  q13s32 = vmull_s16(d22s16, d30s16);
+  q1s32 = vmull_s16(d23s16, d30s16);
+
+  d24s16 = vget_low_s16(*q12s16);
+  d25s16 = vget_high_s16(*q12s16);
+  q2s32 = vmlal_s16(q2s32, d24s16, d30s16);
+  q3s32 = vmlal_s16(q3s32, d25s16, d30s16);
+  q13s32 = vmlsl_s16(q13s32, d24s16, d30s16);
+  q1s32 = vmlsl_s16(q1s32, d25s16, d30s16);
+
+  d4s16 = vqrshrn_n_s32(q2s32, 14);
+  d5s16 = vqrshrn_n_s32(q3s32, 14);
+  d24s16 = vqrshrn_n_s32(q13s32, 14);
+  d25s16 = vqrshrn_n_s32(q1s32, 14);
+  q2s16 = vcombine_s16(d4s16, d5s16);
+  *q12s16 = vcombine_s16(d24s16, d25s16);
+
+  q13s32 = vmull_s16(d10s16, d30s16);
+  q1s32 = vmull_s16(d11s16, d30s16);
+  q11s32 = vmull_s16(d10s16, d30s16);
+  q0s32 = vmull_s16(d11s16, d30s16);
+
+  q13s32 = vmlal_s16(q13s32, d14s16, d30s16);
+  q1s32 = vmlal_s16(q1s32, d15s16, d30s16);
+  q11s32 = vmlsl_s16(q11s32, d14s16, d30s16);
+  q0s32 = vmlsl_s16(q0s32, d15s16, d30s16);
+
+  d20s16 = vqrshrn_n_s32(q13s32, 14);
+  d21s16 = vqrshrn_n_s32(q1s32, 14);
+  d12s16 = vqrshrn_n_s32(q11s32, 14);
+  d13s16 = vqrshrn_n_s32(q0s32, 14);
+  *q10s16 = vcombine_s16(d20s16, d21s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  q5s16 = vdupq_n_s16(0);
+
+  *q9s16 = vsubq_s16(q5s16, *q9s16);
+  *q11s16 = vsubq_s16(q5s16, q2s16);
+  *q13s16 = vsubq_s16(q5s16, q6s16);
+  *q15s16 = vsubq_s16(q5s16, q4s16);
+  return;
 }
 
 void vp10_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
-                            int dest_stride, int tx_type) {
-    int i;
-    uint8_t *d1, *d2;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8;
-    uint64x1_t d0u64, d1u64, d2u64, d3u64;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    uint16x8_t q8u16, q9u16, q10u16, q11u16;
-
-    q8s16  = vld1q_s16(input);
-    q9s16  = vld1q_s16(input + 8);
-    q10s16 = vld1q_s16(input + 8 * 2);
-    q11s16 = vld1q_s16(input + 8 * 3);
-    q12s16 = vld1q_s16(input + 8 * 4);
-    q13s16 = vld1q_s16(input + 8 * 5);
-    q14s16 = vld1q_s16(input + 8 * 6);
-    q15s16 = vld1q_s16(input + 8 * 7);
-
-    TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                 &q12s16, &q13s16, &q14s16, &q15s16);
-
-    switch (tx_type) {
-      case 0:  // idct_idct is not supported. Fall back to C
-        vp10_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
-        return;
-        break;
-      case 1:  // iadst_idct
-        // generate IDCT constants
-        // GENERATE_IDCT_CONSTANTS
-
-        // first transform rows
-        IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16,
-                   &q12s16, &q13s16, &q14s16, &q15s16);
-
-        // transpose the matrix
-        TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                     &q12s16, &q13s16, &q14s16, &q15s16);
-
-        // generate IADST constants
-        // GENERATE_IADST_CONSTANTS
-
-        // then transform columns
-        IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16,
-                    &q12s16, &q13s16, &q14s16, &q15s16);
-        break;
-      case 2:  // idct_iadst
-        // generate IADST constants
-        // GENERATE_IADST_CONSTANTS
-
-        // first transform rows
-        IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16,
-                    &q12s16, &q13s16, &q14s16, &q15s16);
-
-        // transpose the matrix
-        TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                     &q12s16, &q13s16, &q14s16, &q15s16);
-
-        // generate IDCT constants
-        // GENERATE_IDCT_CONSTANTS
-
-        // then transform columns
-        IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16,
-                   &q12s16, &q13s16, &q14s16, &q15s16);
-        break;
-      case 3:  // iadst_iadst
-        // generate IADST constants
-        // GENERATE_IADST_CONSTANTS
-
-        // first transform rows
-        IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16,
-                    &q12s16, &q13s16, &q14s16, &q15s16);
-
-        // transpose the matrix
-        TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                     &q12s16, &q13s16, &q14s16, &q15s16);
-
-        // then transform columns
-        IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16,
-                    &q12s16, &q13s16, &q14s16, &q15s16);
-        break;
-      default:  // iadst_idct
-        assert(0);
-        break;
+                             int dest_stride, int tx_type) {
+  int i;
+  uint8_t *d1, *d2;
+  uint8x8_t d0u8, d1u8, d2u8, d3u8;
+  uint64x1_t d0u64, d1u64, d2u64, d3u64;
+  int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+  uint16x8_t q8u16, q9u16, q10u16, q11u16;
+
+  q8s16 = vld1q_s16(input);
+  q9s16 = vld1q_s16(input + 8);
+  q10s16 = vld1q_s16(input + 8 * 2);
+  q11s16 = vld1q_s16(input + 8 * 3);
+  q12s16 = vld1q_s16(input + 8 * 4);
+  q13s16 = vld1q_s16(input + 8 * 5);
+  q14s16 = vld1q_s16(input + 8 * 6);
+  q15s16 = vld1q_s16(input + 8 * 7);
+
+  TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+               &q15s16);
+
+  switch (tx_type) {
+    case 0:  // idct_idct is not supported. Fall back to C
+      vp10_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
+      return;
+      break;
+    case 1:  // iadst_idct
+      // generate IDCT constants
+      // GENERATE_IDCT_CONSTANTS
+
+      // first transform rows
+      IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+                 &q15s16);
+
+      // transpose the matrix
+      TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+                   &q15s16);
+
+      // generate IADST constants
+      // GENERATE_IADST_CONSTANTS
+
+      // then transform columns
+      IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+                  &q15s16);
+      break;
+    case 2:  // idct_iadst
+      // generate IADST constants
+      // GENERATE_IADST_CONSTANTS
+
+      // first transform rows
+      IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+                  &q15s16);
+
+      // transpose the matrix
+      TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+                   &q15s16);
+
+      // generate IDCT constants
+      // GENERATE_IDCT_CONSTANTS
+
+      // then transform columns
+      IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+                 &q15s16);
+      break;
+    case 3:  // iadst_iadst
+      // generate IADST constants
+      // GENERATE_IADST_CONSTANTS
+
+      // first transform rows
+      IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+                  &q15s16);
+
+      // transpose the matrix
+      TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+                   &q15s16);
+
+      // then transform columns
+      IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+                  &q15s16);
+      break;
+    default:  // iadst_idct
+      assert(0);
+      break;
+  }
+
+  q8s16 = vrshrq_n_s16(q8s16, 5);
+  q9s16 = vrshrq_n_s16(q9s16, 5);
+  q10s16 = vrshrq_n_s16(q10s16, 5);
+  q11s16 = vrshrq_n_s16(q11s16, 5);
+  q12s16 = vrshrq_n_s16(q12s16, 5);
+  q13s16 = vrshrq_n_s16(q13s16, 5);
+  q14s16 = vrshrq_n_s16(q14s16, 5);
+  q15s16 = vrshrq_n_s16(q15s16, 5);
+
+  for (d1 = d2 = dest, i = 0; i < 2; i++) {
+    if (i != 0) {
+      q8s16 = q12s16;
+      q9s16 = q13s16;
+      q10s16 = q14s16;
+      q11s16 = q15s16;
     }
 
-    q8s16 = vrshrq_n_s16(q8s16, 5);
-    q9s16 = vrshrq_n_s16(q9s16, 5);
-    q10s16 = vrshrq_n_s16(q10s16, 5);
-    q11s16 = vrshrq_n_s16(q11s16, 5);
-    q12s16 = vrshrq_n_s16(q12s16, 5);
-    q13s16 = vrshrq_n_s16(q13s16, 5);
-    q14s16 = vrshrq_n_s16(q14s16, 5);
-    q15s16 = vrshrq_n_s16(q15s16, 5);
-
-    for (d1 = d2 = dest, i = 0; i < 2; i++) {
-        if (i != 0) {
-            q8s16 = q12s16;
-            q9s16 = q13s16;
-            q10s16 = q14s16;
-            q11s16 = q15s16;
-        }
-
-        d0u64 = vld1_u64((uint64_t *)d1);
-        d1 += dest_stride;
-        d1u64 = vld1_u64((uint64_t *)d1);
-        d1 += dest_stride;
-        d2u64 = vld1_u64((uint64_t *)d1);
-        d1 += dest_stride;
-        d3u64 = vld1_u64((uint64_t *)d1);
-        d1 += dest_stride;
-
-        q8u16  = vaddw_u8(vreinterpretq_u16_s16(q8s16),
-                          vreinterpret_u8_u64(d0u64));
-        q9u16  = vaddw_u8(vreinterpretq_u16_s16(q9s16),
-                          vreinterpret_u8_u64(d1u64));
-        q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16),
-                          vreinterpret_u8_u64(d2u64));
-        q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16),
-                          vreinterpret_u8_u64(d3u64));
-
-        d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-        d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-        d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
-        d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
-
-        vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
-        d2 += dest_stride;
-        vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
-        d2 += dest_stride;
-        vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
-        d2 += dest_stride;
-        vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
-        d2 += dest_stride;
-    }
-    return;
+    d0u64 = vld1_u64((uint64_t *)d1);
+    d1 += dest_stride;
+    d1u64 = vld1_u64((uint64_t *)d1);
+    d1 += dest_stride;
+    d2u64 = vld1_u64((uint64_t *)d1);
+    d1 += dest_stride;
+    d3u64 = vld1_u64((uint64_t *)d1);
+    d1 += dest_stride;
+
+    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64));
+    q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64));
+    q10u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64));
+    q11u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64));
+
+    d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
+    d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+    d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
+    d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
+
+    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
+    d2 += dest_stride;
+    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
+    d2 += dest_stride;
+    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
+    d2 += dest_stride;
+    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
+    d2 += dest_stride;
+  }
+  return;
 }
diff --git a/vp10/common/blockd.c b/vp10/common/blockd.c
index b6f910ff68f7a800c487e3009c9fa8d73920db3d..031ca1e92c1c0a78b2bfaa5b2fe66b99fe0e434a 100644
--- a/vp10/common/blockd.c
+++ b/vp10/common/blockd.c
@@ -11,10 +11,9 @@
 #include "vp10/common/blockd.h"
 
 PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
-                                    const MODE_INFO *left_mi, int b) {
+                                     const MODE_INFO *left_mi, int b) {
   if (b == 0 || b == 2) {
-    if (!left_mi || is_inter_block(&left_mi->mbmi))
-      return DC_PRED;
+    if (!left_mi || is_inter_block(&left_mi->mbmi)) return DC_PRED;
 
     return get_y_mode(left_mi, b + 1);
   } else {
@@ -24,10 +23,9 @@ PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
 }
 
 PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
-                                     const MODE_INFO *above_mi, int b) {
+                                      const MODE_INFO *above_mi, int b) {
   if (b == 0 || b == 1) {
-    if (!above_mi || is_inter_block(&above_mi->mbmi))
-      return DC_PRED;
+    if (!above_mi || is_inter_block(&above_mi->mbmi)) return DC_PRED;
 
     return get_y_mode(above_mi, b + 2);
   } else {
@@ -40,12 +38,11 @@ void vp10_foreach_transformed_block_in_plane(
     const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
     foreach_transformed_block_visitor visit, void *arg) {
   const struct macroblockd_plane *const pd = &xd->plane[plane];
-  const MB_MODE_INFO* mbmi = &xd->mi[0]->mbmi;
+  const MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
   // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
   // transform size varies per plane, look it up in a common way.
-  const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd)
-                                : mbmi->tx_size;
+  const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
   const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
   const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
   const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
@@ -55,10 +52,13 @@ void vp10_foreach_transformed_block_in_plane(
   // If mb_to_right_edge is < 0 we are in a situation in which
   // the current block size extends into the UMV and we won't
   // visit the sub blocks that are wholly within the UMV.
-  const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 :
-      xd->mb_to_right_edge >> (5 + pd->subsampling_x));
-  const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 :
-      xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+  const int max_blocks_wide =
+      num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >>
+                                                       (5 + pd->subsampling_x));
+  const int max_blocks_high =
+      num_4x4_h + (xd->mb_to_bottom_edge >= 0
+                       ? 0
+                       : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
   const int extra_step = ((num_4x4_w - max_blocks_wide) >> tx_size) * step;
 
   // Keep track of the row and column of the blocks we use so that we know
@@ -73,10 +73,10 @@ void vp10_foreach_transformed_block_in_plane(
   }
 }
 
-void vp10_foreach_transformed_block(const MACROBLOCKD* const xd,
-                                   BLOCK_SIZE bsize,
-                                   foreach_transformed_block_visitor visit,
-                                   void *arg) {
+void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
+                                    BLOCK_SIZE bsize,
+                                    foreach_transformed_block_visitor visit,
+                                    void *arg) {
   int plane;
 
   for (plane = 0; plane < MAX_MB_PLANE; ++plane)
@@ -84,8 +84,8 @@ void vp10_foreach_transformed_block(const MACROBLOCKD* const xd,
 }
 
 void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
-                      BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
-                      int aoff, int loff) {
+                       BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+                       int aoff, int loff) {
   ENTROPY_CONTEXT *const a = pd->above_context + aoff;
   ENTROPY_CONTEXT *const l = pd->left_context + loff;
   const int tx_size_in_blocks = 1 << tx_size;
@@ -99,10 +99,8 @@ void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
     if (above_contexts + aoff > blocks_wide)
       above_contexts = blocks_wide - aoff;
 
-    for (i = 0; i < above_contexts; ++i)
-      a[i] = has_eob;
-    for (i = above_contexts; i < tx_size_in_blocks; ++i)
-      a[i] = 0;
+    for (i = 0; i < above_contexts; ++i) a[i] = has_eob;
+    for (i = above_contexts; i < tx_size_in_blocks; ++i) a[i] = 0;
   } else {
     memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
   }
@@ -113,13 +111,10 @@ void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
     const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] +
                             (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
     int left_contexts = tx_size_in_blocks;
-    if (left_contexts + loff > blocks_high)
-      left_contexts = blocks_high - loff;
+    if (left_contexts + loff > blocks_high) left_contexts = blocks_high - loff;
 
-    for (i = 0; i < left_contexts; ++i)
-      l[i] = has_eob;
-    for (i = left_contexts; i < tx_size_in_blocks; ++i)
-      l[i] = 0;
+    for (i = 0; i < left_contexts; ++i) l[i] = has_eob;
+    for (i = left_contexts; i < tx_size_in_blocks; ++i) l[i] = 0;
   } else {
     memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
   }
diff --git a/vp10/common/blockd.h b/vp10/common/blockd.h
index a4a8f8dd2b5f50ffc56baecbac1ecbe4644e85cd..2b5286027f38447173c064cade5ac5589cb21a74 100644
--- a/vp10/common/blockd.h
+++ b/vp10/common/blockd.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_COMMON_BLOCKD_H_
 #define VP10_COMMON_BLOCKD_H_
 
@@ -55,12 +54,12 @@ typedef struct {
 // decoder implementation modules critically rely on the defined entry values
 // specified herein. They should be refactored concurrently.
 
-#define NONE           -1
-#define INTRA_FRAME     0
-#define LAST_FRAME      1
-#define GOLDEN_FRAME    2
-#define ALTREF_FRAME    3
-#define MAX_REF_FRAMES  4
+#define NONE -1
+#define INTRA_FRAME 0
+#define LAST_FRAME 1
+#define GOLDEN_FRAME 2
+#define ALTREF_FRAME 3
+#define MAX_REF_FRAMES 4
 typedef int8_t MV_REFERENCE_FRAME;
 
 // This structure now relates to 8x8 block regions.
@@ -94,8 +93,7 @@ typedef struct MODE_INFO {
 } MODE_INFO;
 
 static INLINE PREDICTION_MODE get_y_mode(const MODE_INFO *mi, int block) {
-  return mi->mbmi.sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode
-                                      : mi->mbmi.mode;
+  return mi->mbmi.sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode : mi->mbmi.mode;
 }
 
 static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
@@ -107,15 +105,12 @@ static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
 }
 
 PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
-                                    const MODE_INFO *left_mi, int b);
+                                     const MODE_INFO *left_mi, int b);
 
 PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
-                                     const MODE_INFO *above_mi, int b);
+                                      const MODE_INFO *above_mi, int b);
 
-enum mv_precision {
-  MV_PRECISION_Q3,
-  MV_PRECISION_Q4
-};
+enum mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 };
 
 struct buf_2d {
   uint8_t *buf;
@@ -143,7 +138,7 @@ struct macroblockd_plane {
   const int16_t *dequant;
 };
 
-#define BLOCK_OFFSET(x, i) ((x) + (i) * 16)
+#define BLOCK_OFFSET(x, i) ((x) + (i)*16)
 
 typedef struct RefBuffer {
   // TODO(dkovalev): idx is not really required and should be removed, now it
@@ -226,7 +221,7 @@ static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type, const MACROBLOCKD *xd,
   const MODE_INFO *const mi = xd->mi[0];
   const MB_MODE_INFO *const mbmi = &mi->mbmi;
 
-  (void) block_idx;
+  (void)block_idx;
   if (plane_type != PLANE_TYPE_Y || xd->lossless[mbmi->segment_id] ||
       mbmi->tx_size >= TX_32X32)
     return DCT_DCT;
@@ -252,8 +247,8 @@ static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi,
                              pd->subsampling_y);
 }
 
-static INLINE BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize,
-    const struct macroblockd_plane *pd) {
+static INLINE BLOCK_SIZE
+get_plane_block_size(BLOCK_SIZE bsize, const struct macroblockd_plane *pd) {
   return ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y];
 }
 
@@ -272,21 +267,20 @@ static INLINE void reset_skip_context(MACROBLOCKD *xd, BLOCK_SIZE bsize) {
 typedef void (*foreach_transformed_block_visitor)(int plane, int block,
                                                   int blk_row, int blk_col,
                                                   BLOCK_SIZE plane_bsize,
-                                                  TX_SIZE tx_size,
-                                                  void *arg);
+                                                  TX_SIZE tx_size, void *arg);
 
 void vp10_foreach_transformed_block_in_plane(
     const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
     foreach_transformed_block_visitor visit, void *arg);
 
-
-void vp10_foreach_transformed_block(
-    const MACROBLOCKD* const xd, BLOCK_SIZE bsize,
-    foreach_transformed_block_visitor visit, void *arg);
+void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
+                                    BLOCK_SIZE bsize,
+                                    foreach_transformed_block_visitor visit,
+                                    void *arg);
 
 void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
-                      BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
-                      int aoff, int loff);
+                       BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+                       int aoff, int loff);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/common/common.h b/vp10/common/common.h
index a984113c21971b7e59202c0c067bb6dbac3578f8..ed2f4238fd602db6ac4858475efd8e28d48d00f9 100644
--- a/vp10/common/common.h
+++ b/vp10/common/common.h
@@ -26,15 +26,17 @@ extern "C" {
 #endif
 
 // Only need this for fixed-size arrays, for structs just assign.
-#define vp10_copy(dest, src) {            \
+#define vp10_copy(dest, src)             \
+  {                                      \
     assert(sizeof(dest) == sizeof(src)); \
-    memcpy(dest, src, sizeof(src));  \
+    memcpy(dest, src, sizeof(src));      \
   }
 
 // Use this for variably-sized arrays.
-#define vp10_copy_array(dest, src, n) {       \
-    assert(sizeof(*dest) == sizeof(*src));   \
-    memcpy(dest, src, n * sizeof(*src)); \
+#define vp10_copy_array(dest, src, n)      \
+  {                                        \
+    assert(sizeof(*dest) == sizeof(*src)); \
+    memcpy(dest, src, n * sizeof(*src));   \
   }
 
 #define vp10_zero(dest) memset(&(dest), 0, sizeof(dest))
@@ -45,19 +47,21 @@ static INLINE int get_unsigned_bits(unsigned int num_values) {
 }
 
 #if CONFIG_DEBUG
-#define CHECK_MEM_ERROR(cm, lval, expr) do { \
-  lval = (expr); \
-  if (!lval) \
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \
-                       "Failed to allocate "#lval" at %s:%d", \
-                       __FILE__, __LINE__); \
+#define CHECK_MEM_ERROR(cm, lval, expr)                                     \
+  do {                                                                      \
+    lval = (expr);                                                          \
+    if (!lval)                                                              \
+      vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,                   \
+                         "Failed to allocate " #lval " at %s:%d", __FILE__, \
+                         __LINE__);                                         \
   } while (0)
 #else
-#define CHECK_MEM_ERROR(cm, lval, expr) do { \
-  lval = (expr); \
-  if (!lval) \
-    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \
-                       "Failed to allocate "#lval); \
+#define CHECK_MEM_ERROR(cm, lval, expr)                   \
+  do {                                                    \
+    lval = (expr);                                        \
+    if (!lval)                                            \
+      vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \
+                         "Failed to allocate " #lval);    \
   } while (0)
 #endif
 // TODO(yaowu: validate the usage of these codes or develop new ones.)
@@ -67,7 +71,6 @@ static INLINE int get_unsigned_bits(unsigned int num_values) {
 
 #define VPX_FRAME_MARKER 0x2
 
-
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/vp10/common/common_data.h b/vp10/common/common_data.h
index 334489c9d2901b6e03c56c98734484ed9f660a22..8863febfa6f17468dffe919aa9f39458f0e099b9 100644
--- a/vp10/common/common_data.h
+++ b/vp10/common/common_data.h
@@ -20,132 +20,132 @@ extern "C" {
 #endif
 
 // Log 2 conversion lookup tables for block width and height
-static const uint8_t b_width_log2_lookup[BLOCK_SIZES] =
-  {0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4};
-static const uint8_t b_height_log2_lookup[BLOCK_SIZES] =
-  {0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4};
-static const uint8_t num_4x4_blocks_wide_lookup[BLOCK_SIZES] =
-  {1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16};
-static const uint8_t num_4x4_blocks_high_lookup[BLOCK_SIZES] =
-  {1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16};
+static const uint8_t b_width_log2_lookup[BLOCK_SIZES] = { 0, 0, 1, 1, 1, 2, 2,
+                                                          2, 3, 3, 3, 4, 4 };
+static const uint8_t b_height_log2_lookup[BLOCK_SIZES] = { 0, 1, 0, 1, 2, 1, 2,
+                                                           3, 2, 3, 4, 3, 4 };
+static const uint8_t num_4x4_blocks_wide_lookup[BLOCK_SIZES] = {
+  1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16
+};
+static const uint8_t num_4x4_blocks_high_lookup[BLOCK_SIZES] = {
+  1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16
+};
 // Log 2 conversion lookup tables for modeinfo width and height
-static const uint8_t mi_width_log2_lookup[BLOCK_SIZES] =
-  {0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3};
-static const uint8_t mi_height_log2_lookup[BLOCK_SIZES] =
-  {0, 0, 0, 0, 1, 0, 1, 2, 1, 2, 3, 2, 3};
-static const uint8_t num_8x8_blocks_wide_lookup[BLOCK_SIZES] =
-  {1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8};
-static const uint8_t num_8x8_blocks_high_lookup[BLOCK_SIZES] =
-  {1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8};
+static const uint8_t mi_width_log2_lookup[BLOCK_SIZES] = { 0, 0, 0, 0, 0, 1, 1,
+                                                           1, 2, 2, 2, 3, 3 };
+static const uint8_t mi_height_log2_lookup[BLOCK_SIZES] = { 0, 0, 0, 0, 1, 0, 1,
+                                                            2, 1, 2, 3, 2, 3 };
+static const uint8_t num_8x8_blocks_wide_lookup[BLOCK_SIZES] = {
+  1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8
+};
+static const uint8_t num_8x8_blocks_high_lookup[BLOCK_SIZES] = {
+  1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8
+};
 
 // VPXMIN(3, VPXMIN(b_width_log2(bsize), b_height_log2(bsize)))
-static const uint8_t size_group_lookup[BLOCK_SIZES] =
-  {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3};
+static const uint8_t size_group_lookup[BLOCK_SIZES] = { 0, 0, 0, 1, 1, 1, 2,
+                                                        2, 2, 3, 3, 3, 3 };
 
-static const uint8_t num_pels_log2_lookup[BLOCK_SIZES] =
-  {4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12};
+static const uint8_t num_pels_log2_lookup[BLOCK_SIZES] = {
+  4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12
+};
 
 static const PARTITION_TYPE partition_lookup[][BLOCK_SIZES] = {
-  {  // 4X4
+  { // 4X4
     // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
-    PARTITION_NONE, PARTITION_INVALID, PARTITION_INVALID,
-    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
-    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
-    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
-    PARTITION_INVALID
-  }, {  // 8X8
+    PARTITION_NONE, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+    PARTITION_INVALID },
+  { // 8X8
     // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
     PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE,
-    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
-    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
-    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID
-  }, {  // 16X16
+    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+    PARTITION_INVALID },
+  { // 16X16
     // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
     PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
     PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID,
-    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
-    PARTITION_INVALID, PARTITION_INVALID
-  }, {  // 32X32
+    PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+    PARTITION_INVALID },
+  { // 32X32
     // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
     PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
     PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT,
-    PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID,
-    PARTITION_INVALID, PARTITION_INVALID
-  }, {  // 64X64
+    PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID, PARTITION_INVALID,
+    PARTITION_INVALID },
+  { // 64X64
     // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
     PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
     PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
     PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ,
-    PARTITION_NONE
-  }
+    PARTITION_NONE }
 };
 
 static const BLOCK_SIZE subsize_lookup[PARTITION_TYPES][BLOCK_SIZES] = {
-  {     // PARTITION_NONE
-    BLOCK_4X4,   BLOCK_4X8,   BLOCK_8X4,
-    BLOCK_8X8,   BLOCK_8X16,  BLOCK_16X8,
-    BLOCK_16X16, BLOCK_16X32, BLOCK_32X16,
-    BLOCK_32X32, BLOCK_32X64, BLOCK_64X32,
-    BLOCK_64X64,
-  }, {  // PARTITION_HORZ
-    BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_8X4,     BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_16X8,    BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_32X16,   BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_64X32,
-  }, {  // PARTITION_VERT
-    BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_4X8,     BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_8X16,    BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_16X32,   BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_32X64,
-  }, {  // PARTITION_SPLIT
-    BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_4X4,     BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_8X8,     BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_16X16,   BLOCK_INVALID, BLOCK_INVALID,
-    BLOCK_32X32,
+  {
+      // PARTITION_NONE
+      BLOCK_4X4, BLOCK_4X8, BLOCK_8X4, BLOCK_8X8, BLOCK_8X16, BLOCK_16X8,
+      BLOCK_16X16, BLOCK_16X32, BLOCK_32X16, BLOCK_32X32, BLOCK_32X64,
+      BLOCK_64X32, BLOCK_64X64,
+  },
+  {
+      // PARTITION_HORZ
+      BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, BLOCK_8X4, BLOCK_INVALID,
+      BLOCK_INVALID, BLOCK_16X8, BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X16,
+      BLOCK_INVALID, BLOCK_INVALID, BLOCK_64X32,
+  },
+  {
+      // PARTITION_VERT
+      BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, BLOCK_4X8, BLOCK_INVALID,
+      BLOCK_INVALID, BLOCK_8X16, BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X32,
+      BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X64,
+  },
+  {
+      // PARTITION_SPLIT
+      BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, BLOCK_4X4, BLOCK_INVALID,
+      BLOCK_INVALID, BLOCK_8X8, BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X16,
+      BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X32,
   }
 };
 
 static const TX_SIZE max_txsize_lookup[BLOCK_SIZES] = {
-  TX_4X4,   TX_4X4,   TX_4X4,
-  TX_8X8,   TX_8X8,   TX_8X8,
-  TX_16X16, TX_16X16, TX_16X16,
-  TX_32X32, TX_32X32, TX_32X32, TX_32X32
+  TX_4X4,   TX_4X4,   TX_4X4,   TX_8X8,   TX_8X8,   TX_8X8,  TX_16X16,
+  TX_16X16, TX_16X16, TX_32X32, TX_32X32, TX_32X32, TX_32X32
 };
 
 static const BLOCK_SIZE txsize_to_bsize[TX_SIZES] = {
-    BLOCK_4X4,  // TX_4X4
-    BLOCK_8X8,  // TX_8X8
-    BLOCK_16X16,  // TX_16X16
-    BLOCK_32X32,  // TX_32X32
+  BLOCK_4X4,    // TX_4X4
+  BLOCK_8X8,    // TX_8X8
+  BLOCK_16X16,  // TX_16X16
+  BLOCK_32X32,  // TX_32X32
 };
 
 static const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES] = {
-  TX_4X4,  // ONLY_4X4
-  TX_8X8,  // ALLOW_8X8
+  TX_4X4,    // ONLY_4X4
+  TX_8X8,    // ALLOW_8X8
   TX_16X16,  // ALLOW_16X16
   TX_32X32,  // ALLOW_32X32
   TX_32X32,  // TX_MODE_SELECT
 };
 
 static const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2] = {
-//  ss_x == 0    ss_x == 0        ss_x == 1      ss_x == 1
-//  ss_y == 0    ss_y == 1        ss_y == 0      ss_y == 1
-  {{BLOCK_4X4,   BLOCK_INVALID}, {BLOCK_INVALID, BLOCK_INVALID}},
-  {{BLOCK_4X8,   BLOCK_4X4},     {BLOCK_INVALID, BLOCK_INVALID}},
-  {{BLOCK_8X4,   BLOCK_INVALID}, {BLOCK_4X4,     BLOCK_INVALID}},
-  {{BLOCK_8X8,   BLOCK_8X4},     {BLOCK_4X8,     BLOCK_4X4}},
-  {{BLOCK_8X16,  BLOCK_8X8},     {BLOCK_INVALID, BLOCK_4X8}},
-  {{BLOCK_16X8,  BLOCK_INVALID}, {BLOCK_8X8,     BLOCK_8X4}},
-  {{BLOCK_16X16, BLOCK_16X8},    {BLOCK_8X16,    BLOCK_8X8}},
-  {{BLOCK_16X32, BLOCK_16X16},   {BLOCK_INVALID, BLOCK_8X16}},
-  {{BLOCK_32X16, BLOCK_INVALID}, {BLOCK_16X16,   BLOCK_16X8}},
-  {{BLOCK_32X32, BLOCK_32X16},   {BLOCK_16X32,   BLOCK_16X16}},
-  {{BLOCK_32X64, BLOCK_32X32},   {BLOCK_INVALID, BLOCK_16X32}},
-  {{BLOCK_64X32, BLOCK_INVALID}, {BLOCK_32X32,   BLOCK_32X16}},
-  {{BLOCK_64X64, BLOCK_64X32},   {BLOCK_32X64,   BLOCK_32X32}},
+  //  ss_x == 0    ss_x == 0        ss_x == 1      ss_x == 1
+  //  ss_y == 0    ss_y == 1        ss_y == 0      ss_y == 1
+  { { BLOCK_4X4, BLOCK_INVALID }, { BLOCK_INVALID, BLOCK_INVALID } },
+  { { BLOCK_4X8, BLOCK_4X4 }, { BLOCK_INVALID, BLOCK_INVALID } },
+  { { BLOCK_8X4, BLOCK_INVALID }, { BLOCK_4X4, BLOCK_INVALID } },
+  { { BLOCK_8X8, BLOCK_8X4 }, { BLOCK_4X8, BLOCK_4X4 } },
+  { { BLOCK_8X16, BLOCK_8X8 }, { BLOCK_INVALID, BLOCK_4X8 } },
+  { { BLOCK_16X8, BLOCK_INVALID }, { BLOCK_8X8, BLOCK_8X4 } },
+  { { BLOCK_16X16, BLOCK_16X8 }, { BLOCK_8X16, BLOCK_8X8 } },
+  { { BLOCK_16X32, BLOCK_16X16 }, { BLOCK_INVALID, BLOCK_8X16 } },
+  { { BLOCK_32X16, BLOCK_INVALID }, { BLOCK_16X16, BLOCK_16X8 } },
+  { { BLOCK_32X32, BLOCK_32X16 }, { BLOCK_16X32, BLOCK_16X16 } },
+  { { BLOCK_32X64, BLOCK_32X32 }, { BLOCK_INVALID, BLOCK_16X32 } },
+  { { BLOCK_64X32, BLOCK_INVALID }, { BLOCK_32X32, BLOCK_32X16 } },
+  { { BLOCK_64X64, BLOCK_64X32 }, { BLOCK_32X64, BLOCK_32X32 } },
 };
 
 // Generates 4 bit field in which each bit set to 1 represents
@@ -154,20 +154,20 @@ static const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2] = {
 static const struct {
   PARTITION_CONTEXT above;
   PARTITION_CONTEXT left;
-} partition_context_lookup[BLOCK_SIZES]= {
-  {15, 15},  // 4X4   - {0b1111, 0b1111}
-  {15, 14},  // 4X8   - {0b1111, 0b1110}
-  {14, 15},  // 8X4   - {0b1110, 0b1111}
-  {14, 14},  // 8X8   - {0b1110, 0b1110}
-  {14, 12},  // 8X16  - {0b1110, 0b1100}
-  {12, 14},  // 16X8  - {0b1100, 0b1110}
-  {12, 12},  // 16X16 - {0b1100, 0b1100}
-  {12, 8 },  // 16X32 - {0b1100, 0b1000}
-  {8,  12},  // 32X16 - {0b1000, 0b1100}
-  {8,  8 },  // 32X32 - {0b1000, 0b1000}
-  {8,  0 },  // 32X64 - {0b1000, 0b0000}
-  {0,  8 },  // 64X32 - {0b0000, 0b1000}
-  {0,  0 },  // 64X64 - {0b0000, 0b0000}
+} partition_context_lookup[BLOCK_SIZES] = {
+  { 15, 15 },  // 4X4   - {0b1111, 0b1111}
+  { 15, 14 },  // 4X8   - {0b1111, 0b1110}
+  { 14, 15 },  // 8X4   - {0b1110, 0b1111}
+  { 14, 14 },  // 8X8   - {0b1110, 0b1110}
+  { 14, 12 },  // 8X16  - {0b1110, 0b1100}
+  { 12, 14 },  // 16X8  - {0b1100, 0b1110}
+  { 12, 12 },  // 16X16 - {0b1100, 0b1100}
+  { 12, 8 },   // 16X32 - {0b1100, 0b1000}
+  { 8, 12 },   // 32X16 - {0b1000, 0b1100}
+  { 8, 8 },    // 32X32 - {0b1000, 0b1000}
+  { 8, 0 },    // 32X64 - {0b1000, 0b0000}
+  { 0, 8 },    // 64X32 - {0b0000, 0b1000}
+  { 0, 0 },    // 64X64 - {0b0000, 0b0000}
 };
 
 #ifdef __cplusplus
diff --git a/vp10/common/debugmodes.c b/vp10/common/debugmodes.c
index 10fc4d633d3b1b64436193ec32aa296ce8fa8ab0..18bbbec1ccb95a8a77a60ba37c3b2708410be7b4 100644
--- a/vp10/common/debugmodes.c
+++ b/vp10/common/debugmodes.c
@@ -34,9 +34,7 @@ static void print_mi_data(VP10_COMMON *cm, FILE *file, const char *descriptor,
   for (mi_row = 0; mi_row < rows; mi_row++) {
     fprintf(file, "%c ", prefix);
     for (mi_col = 0; mi_col < cols; mi_col++) {
-      fprintf(file, "%2d ",
-              *((int*) ((char *) (&mi[0]->mbmi) +
-                                  member_offset)));
+      fprintf(file, "%2d ", *((int *)((char *)(&mi[0]->mbmi) + member_offset)));
       mi++;
     }
     fprintf(file, "\n");
@@ -79,7 +77,7 @@ void vp10_print_modes_and_motion_vectors(VP10_COMMON *cm, const char *file) {
     fprintf(mvs, "V ");
     for (mi_col = 0; mi_col < cols; mi_col++) {
       fprintf(mvs, "%4d:%4d ", mi[0]->mbmi.mv[0].as_mv.row,
-                               mi[0]->mbmi.mv[0].as_mv.col);
+              mi[0]->mbmi.mv[0].as_mv.col);
       mi++;
     }
     fprintf(mvs, "\n");
diff --git a/vp10/common/entropy.c b/vp10/common/entropy.c
index c5930e6a5bf9d997523a54450b53f2db3519a5c6..e2b73cb2b2bda9d252f7980d04c9b12853a04c11 100644
--- a/vp10/common/entropy.c
+++ b/vp10/common/entropy.c
@@ -17,14 +17,22 @@
 
 // Unconstrained Node Tree
 const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
-  2, 6,                                // 0 = LOW_VAL
-  -TWO_TOKEN, 4,                       // 1 = TWO
-  -THREE_TOKEN, -FOUR_TOKEN,           // 2 = THREE
-  8, 10,                               // 3 = HIGH_LOW
-  -CATEGORY1_TOKEN, -CATEGORY2_TOKEN,  // 4 = CAT_ONE
-  12, 14,                              // 5 = CAT_THREEFOUR
-  -CATEGORY3_TOKEN, -CATEGORY4_TOKEN,  // 6 = CAT_THREE
-  -CATEGORY5_TOKEN, -CATEGORY6_TOKEN   // 7 = CAT_FIVE
+  2,
+  6,  // 0 = LOW_VAL
+  -TWO_TOKEN,
+  4,  // 1 = TWO
+  -THREE_TOKEN,
+  -FOUR_TOKEN,  // 2 = THREE
+  8,
+  10,  // 3 = HIGH_LOW
+  -CATEGORY1_TOKEN,
+  -CATEGORY2_TOKEN,  // 4 = CAT_ONE
+  12,
+  14,  // 5 = CAT_THREEFOUR
+  -CATEGORY3_TOKEN,
+  -CATEGORY4_TOKEN,  // 6 = CAT_THREE
+  -CATEGORY5_TOKEN,
+  -CATEGORY6_TOKEN  // 7 = CAT_FIVE
 };
 
 const vpx_prob vp10_cat1_prob[] = { 159 };
@@ -32,9 +40,8 @@ const vpx_prob vp10_cat2_prob[] = { 165, 145 };
 const vpx_prob vp10_cat3_prob[] = { 173, 148, 140 };
 const vpx_prob vp10_cat4_prob[] = { 176, 155, 140, 135 };
 const vpx_prob vp10_cat5_prob[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob[] = {
-    254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
-};
+const vpx_prob vp10_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
+                                    196, 177, 153, 140, 133, 130, 129 };
 #if CONFIG_VPX_HIGHBITDEPTH
 const vpx_prob vp10_cat1_prob_high10[] = { 159 };
 const vpx_prob vp10_cat2_prob_high10[] = { 165, 145 };
@@ -42,96 +49,68 @@ const vpx_prob vp10_cat3_prob_high10[] = { 173, 148, 140 };
 const vpx_prob vp10_cat4_prob_high10[] = { 176, 155, 140, 135 };
 const vpx_prob vp10_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
 const vpx_prob vp10_cat6_prob_high10[] = {
-    255, 255, 254, 254, 254, 252, 249, 243,
-    230, 196, 177, 153, 140, 133, 130, 129
+  255, 255, 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
 };
 const vpx_prob vp10_cat1_prob_high12[] = { 159 };
 const vpx_prob vp10_cat2_prob_high12[] = { 165, 145 };
 const vpx_prob vp10_cat3_prob_high12[] = { 173, 148, 140 };
 const vpx_prob vp10_cat4_prob_high12[] = { 176, 155, 140, 135 };
 const vpx_prob vp10_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
-const vpx_prob vp10_cat6_prob_high12[] = {
-    255, 255, 255, 255, 254, 254, 254, 252, 249,
-    243, 230, 196, 177, 153, 140, 133, 130, 129
-};
+const vpx_prob vp10_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
+                                           254, 252, 249, 243, 230, 196,
+                                           177, 153, 140, 133, 130, 129 };
 #endif
 
 const uint8_t vp10_coefband_trans_8x8plus[1024] = {
-  0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
-  4, 4, 4, 4, 4, 5,
+  0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
   // beyond MAXBAND_INDEX+1 all values are filled as 5
-                    5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
 };
 
 const uint8_t vp10_coefband_trans_4x4[16] = {
   0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
 };
 
-const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = {
-  0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 5
-};
+const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
+                                                       4, 5, 5, 5, 5, 5 };
 
 // Model obtained from a 2-sided zero-centerd distribuition derived
 // from a Pareto distribution. The cdf of the distribution is:
@@ -148,597 +127,921 @@ const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = {
 // vp10_pareto8_full[l][node] = (vp10_pareto8_full[l-1][node] +
 //                              vp10_pareto8_full[l+1][node] ) >> 1;
 const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
-  {  3,  86, 128,   6,  86,  23,  88,  29},
-  {  6,  86, 128,  11,  87,  42,  91,  52},
-  {  9,  86, 129,  17,  88,  61,  94,  76},
-  { 12,  86, 129,  22,  88,  77,  97,  93},
-  { 15,  87, 129,  28,  89,  93, 100, 110},
-  { 17,  87, 129,  33,  90, 105, 103, 123},
-  { 20,  88, 130,  38,  91, 118, 106, 136},
-  { 23,  88, 130,  43,  91, 128, 108, 146},
-  { 26,  89, 131,  48,  92, 139, 111, 156},
-  { 28,  89, 131,  53,  93, 147, 114, 163},
-  { 31,  90, 131,  58,  94, 156, 117, 171},
-  { 34,  90, 131,  62,  94, 163, 119, 177},
-  { 37,  90, 132,  66,  95, 171, 122, 184},
-  { 39,  90, 132,  70,  96, 177, 124, 189},
-  { 42,  91, 132,  75,  97, 183, 127, 194},
-  { 44,  91, 132,  79,  97, 188, 129, 198},
-  { 47,  92, 133,  83,  98, 193, 132, 202},
-  { 49,  92, 133,  86,  99, 197, 134, 205},
-  { 52,  93, 133,  90, 100, 201, 137, 208},
-  { 54,  93, 133,  94, 100, 204, 139, 211},
-  { 57,  94, 134,  98, 101, 208, 142, 214},
-  { 59,  94, 134, 101, 102, 211, 144, 216},
-  { 62,  94, 135, 105, 103, 214, 146, 218},
-  { 64,  94, 135, 108, 103, 216, 148, 220},
-  { 66,  95, 135, 111, 104, 219, 151, 222},
-  { 68,  95, 135, 114, 105, 221, 153, 223},
-  { 71,  96, 136, 117, 106, 224, 155, 225},
-  { 73,  96, 136, 120, 106, 225, 157, 226},
-  { 76,  97, 136, 123, 107, 227, 159, 228},
-  { 78,  97, 136, 126, 108, 229, 160, 229},
-  { 80,  98, 137, 129, 109, 231, 162, 231},
-  { 82,  98, 137, 131, 109, 232, 164, 232},
-  { 84,  98, 138, 134, 110, 234, 166, 233},
-  { 86,  98, 138, 137, 111, 235, 168, 234},
-  { 89,  99, 138, 140, 112, 236, 170, 235},
-  { 91,  99, 138, 142, 112, 237, 171, 235},
-  { 93, 100, 139, 145, 113, 238, 173, 236},
-  { 95, 100, 139, 147, 114, 239, 174, 237},
-  { 97, 101, 140, 149, 115, 240, 176, 238},
-  { 99, 101, 140, 151, 115, 241, 177, 238},
-  {101, 102, 140, 154, 116, 242, 179, 239},
-  {103, 102, 140, 156, 117, 242, 180, 239},
-  {105, 103, 141, 158, 118, 243, 182, 240},
-  {107, 103, 141, 160, 118, 243, 183, 240},
-  {109, 104, 141, 162, 119, 244, 185, 241},
-  {111, 104, 141, 164, 119, 244, 186, 241},
-  {113, 104, 142, 166, 120, 245, 187, 242},
-  {114, 104, 142, 168, 121, 245, 188, 242},
-  {116, 105, 143, 170, 122, 246, 190, 243},
-  {118, 105, 143, 171, 122, 246, 191, 243},
-  {120, 106, 143, 173, 123, 247, 192, 244},
-  {121, 106, 143, 175, 124, 247, 193, 244},
-  {123, 107, 144, 177, 125, 248, 195, 244},
-  {125, 107, 144, 178, 125, 248, 196, 244},
-  {127, 108, 145, 180, 126, 249, 197, 245},
-  {128, 108, 145, 181, 127, 249, 198, 245},
-  {130, 109, 145, 183, 128, 249, 199, 245},
-  {132, 109, 145, 184, 128, 249, 200, 245},
-  {134, 110, 146, 186, 129, 250, 201, 246},
-  {135, 110, 146, 187, 130, 250, 202, 246},
-  {137, 111, 147, 189, 131, 251, 203, 246},
-  {138, 111, 147, 190, 131, 251, 204, 246},
-  {140, 112, 147, 192, 132, 251, 205, 247},
-  {141, 112, 147, 193, 132, 251, 206, 247},
-  {143, 113, 148, 194, 133, 251, 207, 247},
-  {144, 113, 148, 195, 134, 251, 207, 247},
-  {146, 114, 149, 197, 135, 252, 208, 248},
-  {147, 114, 149, 198, 135, 252, 209, 248},
-  {149, 115, 149, 199, 136, 252, 210, 248},
-  {150, 115, 149, 200, 137, 252, 210, 248},
-  {152, 115, 150, 201, 138, 252, 211, 248},
-  {153, 115, 150, 202, 138, 252, 212, 248},
-  {155, 116, 151, 204, 139, 253, 213, 249},
-  {156, 116, 151, 205, 139, 253, 213, 249},
-  {158, 117, 151, 206, 140, 253, 214, 249},
-  {159, 117, 151, 207, 141, 253, 215, 249},
-  {161, 118, 152, 208, 142, 253, 216, 249},
-  {162, 118, 152, 209, 142, 253, 216, 249},
-  {163, 119, 153, 210, 143, 253, 217, 249},
-  {164, 119, 153, 211, 143, 253, 217, 249},
-  {166, 120, 153, 212, 144, 254, 218, 250},
-  {167, 120, 153, 212, 145, 254, 219, 250},
-  {168, 121, 154, 213, 146, 254, 220, 250},
-  {169, 121, 154, 214, 146, 254, 220, 250},
-  {171, 122, 155, 215, 147, 254, 221, 250},
-  {172, 122, 155, 216, 147, 254, 221, 250},
-  {173, 123, 155, 217, 148, 254, 222, 250},
-  {174, 123, 155, 217, 149, 254, 222, 250},
-  {176, 124, 156, 218, 150, 254, 223, 250},
-  {177, 124, 156, 219, 150, 254, 223, 250},
-  {178, 125, 157, 220, 151, 254, 224, 251},
-  {179, 125, 157, 220, 151, 254, 224, 251},
-  {180, 126, 157, 221, 152, 254, 225, 251},
-  {181, 126, 157, 221, 152, 254, 225, 251},
-  {183, 127, 158, 222, 153, 254, 226, 251},
-  {184, 127, 158, 223, 154, 254, 226, 251},
-  {185, 128, 159, 224, 155, 255, 227, 251},
-  {186, 128, 159, 224, 155, 255, 227, 251},
-  {187, 129, 160, 225, 156, 255, 228, 251},
-  {188, 130, 160, 225, 156, 255, 228, 251},
-  {189, 131, 160, 226, 157, 255, 228, 251},
-  {190, 131, 160, 226, 158, 255, 228, 251},
-  {191, 132, 161, 227, 159, 255, 229, 251},
-  {192, 132, 161, 227, 159, 255, 229, 251},
-  {193, 133, 162, 228, 160, 255, 230, 252},
-  {194, 133, 162, 229, 160, 255, 230, 252},
-  {195, 134, 163, 230, 161, 255, 231, 252},
-  {196, 134, 163, 230, 161, 255, 231, 252},
-  {197, 135, 163, 231, 162, 255, 231, 252},
-  {198, 135, 163, 231, 162, 255, 231, 252},
-  {199, 136, 164, 232, 163, 255, 232, 252},
-  {200, 136, 164, 232, 164, 255, 232, 252},
-  {201, 137, 165, 233, 165, 255, 233, 252},
-  {201, 137, 165, 233, 165, 255, 233, 252},
-  {202, 138, 166, 233, 166, 255, 233, 252},
-  {203, 138, 166, 233, 166, 255, 233, 252},
-  {204, 139, 166, 234, 167, 255, 234, 252},
-  {205, 139, 166, 234, 167, 255, 234, 252},
-  {206, 140, 167, 235, 168, 255, 235, 252},
-  {206, 140, 167, 235, 168, 255, 235, 252},
-  {207, 141, 168, 236, 169, 255, 235, 252},
-  {208, 141, 168, 236, 170, 255, 235, 252},
-  {209, 142, 169, 237, 171, 255, 236, 252},
-  {209, 143, 169, 237, 171, 255, 236, 252},
-  {210, 144, 169, 237, 172, 255, 236, 252},
-  {211, 144, 169, 237, 172, 255, 236, 252},
-  {212, 145, 170, 238, 173, 255, 237, 252},
-  {213, 145, 170, 238, 173, 255, 237, 252},
-  {214, 146, 171, 239, 174, 255, 237, 253},
-  {214, 146, 171, 239, 174, 255, 237, 253},
-  {215, 147, 172, 240, 175, 255, 238, 253},
-  {215, 147, 172, 240, 175, 255, 238, 253},
-  {216, 148, 173, 240, 176, 255, 238, 253},
-  {217, 148, 173, 240, 176, 255, 238, 253},
-  {218, 149, 173, 241, 177, 255, 239, 253},
-  {218, 149, 173, 241, 178, 255, 239, 253},
-  {219, 150, 174, 241, 179, 255, 239, 253},
-  {219, 151, 174, 241, 179, 255, 239, 253},
-  {220, 152, 175, 242, 180, 255, 240, 253},
-  {221, 152, 175, 242, 180, 255, 240, 253},
-  {222, 153, 176, 242, 181, 255, 240, 253},
-  {222, 153, 176, 242, 181, 255, 240, 253},
-  {223, 154, 177, 243, 182, 255, 240, 253},
-  {223, 154, 177, 243, 182, 255, 240, 253},
-  {224, 155, 178, 244, 183, 255, 241, 253},
-  {224, 155, 178, 244, 183, 255, 241, 253},
-  {225, 156, 178, 244, 184, 255, 241, 253},
-  {225, 157, 178, 244, 184, 255, 241, 253},
-  {226, 158, 179, 244, 185, 255, 242, 253},
-  {227, 158, 179, 244, 185, 255, 242, 253},
-  {228, 159, 180, 245, 186, 255, 242, 253},
-  {228, 159, 180, 245, 186, 255, 242, 253},
-  {229, 160, 181, 245, 187, 255, 242, 253},
-  {229, 160, 181, 245, 187, 255, 242, 253},
-  {230, 161, 182, 246, 188, 255, 243, 253},
-  {230, 162, 182, 246, 188, 255, 243, 253},
-  {231, 163, 183, 246, 189, 255, 243, 253},
-  {231, 163, 183, 246, 189, 255, 243, 253},
-  {232, 164, 184, 247, 190, 255, 243, 253},
-  {232, 164, 184, 247, 190, 255, 243, 253},
-  {233, 165, 185, 247, 191, 255, 244, 253},
-  {233, 165, 185, 247, 191, 255, 244, 253},
-  {234, 166, 185, 247, 192, 255, 244, 253},
-  {234, 167, 185, 247, 192, 255, 244, 253},
-  {235, 168, 186, 248, 193, 255, 244, 253},
-  {235, 168, 186, 248, 193, 255, 244, 253},
-  {236, 169, 187, 248, 194, 255, 244, 253},
-  {236, 169, 187, 248, 194, 255, 244, 253},
-  {236, 170, 188, 248, 195, 255, 245, 253},
-  {236, 170, 188, 248, 195, 255, 245, 253},
-  {237, 171, 189, 249, 196, 255, 245, 254},
-  {237, 172, 189, 249, 196, 255, 245, 254},
-  {238, 173, 190, 249, 197, 255, 245, 254},
-  {238, 173, 190, 249, 197, 255, 245, 254},
-  {239, 174, 191, 249, 198, 255, 245, 254},
-  {239, 174, 191, 249, 198, 255, 245, 254},
-  {240, 175, 192, 249, 199, 255, 246, 254},
-  {240, 176, 192, 249, 199, 255, 246, 254},
-  {240, 177, 193, 250, 200, 255, 246, 254},
-  {240, 177, 193, 250, 200, 255, 246, 254},
-  {241, 178, 194, 250, 201, 255, 246, 254},
-  {241, 178, 194, 250, 201, 255, 246, 254},
-  {242, 179, 195, 250, 202, 255, 246, 254},
-  {242, 180, 195, 250, 202, 255, 246, 254},
-  {242, 181, 196, 250, 203, 255, 247, 254},
-  {242, 181, 196, 250, 203, 255, 247, 254},
-  {243, 182, 197, 251, 204, 255, 247, 254},
-  {243, 183, 197, 251, 204, 255, 247, 254},
-  {244, 184, 198, 251, 205, 255, 247, 254},
-  {244, 184, 198, 251, 205, 255, 247, 254},
-  {244, 185, 199, 251, 206, 255, 247, 254},
-  {244, 185, 199, 251, 206, 255, 247, 254},
-  {245, 186, 200, 251, 207, 255, 247, 254},
-  {245, 187, 200, 251, 207, 255, 247, 254},
-  {246, 188, 201, 252, 207, 255, 248, 254},
-  {246, 188, 201, 252, 207, 255, 248, 254},
-  {246, 189, 202, 252, 208, 255, 248, 254},
-  {246, 190, 202, 252, 208, 255, 248, 254},
-  {247, 191, 203, 252, 209, 255, 248, 254},
-  {247, 191, 203, 252, 209, 255, 248, 254},
-  {247, 192, 204, 252, 210, 255, 248, 254},
-  {247, 193, 204, 252, 210, 255, 248, 254},
-  {248, 194, 205, 252, 211, 255, 248, 254},
-  {248, 194, 205, 252, 211, 255, 248, 254},
-  {248, 195, 206, 252, 212, 255, 249, 254},
-  {248, 196, 206, 252, 212, 255, 249, 254},
-  {249, 197, 207, 253, 213, 255, 249, 254},
-  {249, 197, 207, 253, 213, 255, 249, 254},
-  {249, 198, 208, 253, 214, 255, 249, 254},
-  {249, 199, 209, 253, 214, 255, 249, 254},
-  {250, 200, 210, 253, 215, 255, 249, 254},
-  {250, 200, 210, 253, 215, 255, 249, 254},
-  {250, 201, 211, 253, 215, 255, 249, 254},
-  {250, 202, 211, 253, 215, 255, 249, 254},
-  {250, 203, 212, 253, 216, 255, 249, 254},
-  {250, 203, 212, 253, 216, 255, 249, 254},
-  {251, 204, 213, 253, 217, 255, 250, 254},
-  {251, 205, 213, 253, 217, 255, 250, 254},
-  {251, 206, 214, 254, 218, 255, 250, 254},
-  {251, 206, 215, 254, 218, 255, 250, 254},
-  {252, 207, 216, 254, 219, 255, 250, 254},
-  {252, 208, 216, 254, 219, 255, 250, 254},
-  {252, 209, 217, 254, 220, 255, 250, 254},
-  {252, 210, 217, 254, 220, 255, 250, 254},
-  {252, 211, 218, 254, 221, 255, 250, 254},
-  {252, 212, 218, 254, 221, 255, 250, 254},
-  {253, 213, 219, 254, 222, 255, 250, 254},
-  {253, 213, 220, 254, 222, 255, 250, 254},
-  {253, 214, 221, 254, 223, 255, 250, 254},
-  {253, 215, 221, 254, 223, 255, 250, 254},
-  {253, 216, 222, 254, 224, 255, 251, 254},
-  {253, 217, 223, 254, 224, 255, 251, 254},
-  {253, 218, 224, 254, 225, 255, 251, 254},
-  {253, 219, 224, 254, 225, 255, 251, 254},
-  {254, 220, 225, 254, 225, 255, 251, 254},
-  {254, 221, 226, 254, 225, 255, 251, 254},
-  {254, 222, 227, 255, 226, 255, 251, 254},
-  {254, 223, 227, 255, 226, 255, 251, 254},
-  {254, 224, 228, 255, 227, 255, 251, 254},
-  {254, 225, 229, 255, 227, 255, 251, 254},
-  {254, 226, 230, 255, 228, 255, 251, 254},
-  {254, 227, 230, 255, 229, 255, 251, 254},
-  {255, 228, 231, 255, 230, 255, 251, 254},
-  {255, 229, 232, 255, 230, 255, 251, 254},
-  {255, 230, 233, 255, 231, 255, 252, 254},
-  {255, 231, 234, 255, 231, 255, 252, 254},
-  {255, 232, 235, 255, 232, 255, 252, 254},
-  {255, 233, 236, 255, 232, 255, 252, 254},
-  {255, 235, 237, 255, 233, 255, 252, 254},
-  {255, 236, 238, 255, 234, 255, 252, 254},
-  {255, 238, 240, 255, 235, 255, 252, 255},
-  {255, 239, 241, 255, 235, 255, 252, 254},
-  {255, 241, 243, 255, 236, 255, 252, 254},
-  {255, 243, 245, 255, 237, 255, 252, 254},
-  {255, 246, 247, 255, 239, 255, 253, 255},
+  { 3, 86, 128, 6, 86, 23, 88, 29 },
+  { 6, 86, 128, 11, 87, 42, 91, 52 },
+  { 9, 86, 129, 17, 88, 61, 94, 76 },
+  { 12, 86, 129, 22, 88, 77, 97, 93 },
+  { 15, 87, 129, 28, 89, 93, 100, 110 },
+  { 17, 87, 129, 33, 90, 105, 103, 123 },
+  { 20, 88, 130, 38, 91, 118, 106, 136 },
+  { 23, 88, 130, 43, 91, 128, 108, 146 },
+  { 26, 89, 131, 48, 92, 139, 111, 156 },
+  { 28, 89, 131, 53, 93, 147, 114, 163 },
+  { 31, 90, 131, 58, 94, 156, 117, 171 },
+  { 34, 90, 131, 62, 94, 163, 119, 177 },
+  { 37, 90, 132, 66, 95, 171, 122, 184 },
+  { 39, 90, 132, 70, 96, 177, 124, 189 },
+  { 42, 91, 132, 75, 97, 183, 127, 194 },
+  { 44, 91, 132, 79, 97, 188, 129, 198 },
+  { 47, 92, 133, 83, 98, 193, 132, 202 },
+  { 49, 92, 133, 86, 99, 197, 134, 205 },
+  { 52, 93, 133, 90, 100, 201, 137, 208 },
+  { 54, 93, 133, 94, 100, 204, 139, 211 },
+  { 57, 94, 134, 98, 101, 208, 142, 214 },
+  { 59, 94, 134, 101, 102, 211, 144, 216 },
+  { 62, 94, 135, 105, 103, 214, 146, 218 },
+  { 64, 94, 135, 108, 103, 216, 148, 220 },
+  { 66, 95, 135, 111, 104, 219, 151, 222 },
+  { 68, 95, 135, 114, 105, 221, 153, 223 },
+  { 71, 96, 136, 117, 106, 224, 155, 225 },
+  { 73, 96, 136, 120, 106, 225, 157, 226 },
+  { 76, 97, 136, 123, 107, 227, 159, 228 },
+  { 78, 97, 136, 126, 108, 229, 160, 229 },
+  { 80, 98, 137, 129, 109, 231, 162, 231 },
+  { 82, 98, 137, 131, 109, 232, 164, 232 },
+  { 84, 98, 138, 134, 110, 234, 166, 233 },
+  { 86, 98, 138, 137, 111, 235, 168, 234 },
+  { 89, 99, 138, 140, 112, 236, 170, 235 },
+  { 91, 99, 138, 142, 112, 237, 171, 235 },
+  { 93, 100, 139, 145, 113, 238, 173, 236 },
+  { 95, 100, 139, 147, 114, 239, 174, 237 },
+  { 97, 101, 140, 149, 115, 240, 176, 238 },
+  { 99, 101, 140, 151, 115, 241, 177, 238 },
+  { 101, 102, 140, 154, 116, 242, 179, 239 },
+  { 103, 102, 140, 156, 117, 242, 180, 239 },
+  { 105, 103, 141, 158, 118, 243, 182, 240 },
+  { 107, 103, 141, 160, 118, 243, 183, 240 },
+  { 109, 104, 141, 162, 119, 244, 185, 241 },
+  { 111, 104, 141, 164, 119, 244, 186, 241 },
+  { 113, 104, 142, 166, 120, 245, 187, 242 },
+  { 114, 104, 142, 168, 121, 245, 188, 242 },
+  { 116, 105, 143, 170, 122, 246, 190, 243 },
+  { 118, 105, 143, 171, 122, 246, 191, 243 },
+  { 120, 106, 143, 173, 123, 247, 192, 244 },
+  { 121, 106, 143, 175, 124, 247, 193, 244 },
+  { 123, 107, 144, 177, 125, 248, 195, 244 },
+  { 125, 107, 144, 178, 125, 248, 196, 244 },
+  { 127, 108, 145, 180, 126, 249, 197, 245 },
+  { 128, 108, 145, 181, 127, 249, 198, 245 },
+  { 130, 109, 145, 183, 128, 249, 199, 245 },
+  { 132, 109, 145, 184, 128, 249, 200, 245 },
+  { 134, 110, 146, 186, 129, 250, 201, 246 },
+  { 135, 110, 146, 187, 130, 250, 202, 246 },
+  { 137, 111, 147, 189, 131, 251, 203, 246 },
+  { 138, 111, 147, 190, 131, 251, 204, 246 },
+  { 140, 112, 147, 192, 132, 251, 205, 247 },
+  { 141, 112, 147, 193, 132, 251, 206, 247 },
+  { 143, 113, 148, 194, 133, 251, 207, 247 },
+  { 144, 113, 148, 195, 134, 251, 207, 247 },
+  { 146, 114, 149, 197, 135, 252, 208, 248 },
+  { 147, 114, 149, 198, 135, 252, 209, 248 },
+  { 149, 115, 149, 199, 136, 252, 210, 248 },
+  { 150, 115, 149, 200, 137, 252, 210, 248 },
+  { 152, 115, 150, 201, 138, 252, 211, 248 },
+  { 153, 115, 150, 202, 138, 252, 212, 248 },
+  { 155, 116, 151, 204, 139, 253, 213, 249 },
+  { 156, 116, 151, 205, 139, 253, 213, 249 },
+  { 158, 117, 151, 206, 140, 253, 214, 249 },
+  { 159, 117, 151, 207, 141, 253, 215, 249 },
+  { 161, 118, 152, 208, 142, 253, 216, 249 },
+  { 162, 118, 152, 209, 142, 253, 216, 249 },
+  { 163, 119, 153, 210, 143, 253, 217, 249 },
+  { 164, 119, 153, 211, 143, 253, 217, 249 },
+  { 166, 120, 153, 212, 144, 254, 218, 250 },
+  { 167, 120, 153, 212, 145, 254, 219, 250 },
+  { 168, 121, 154, 213, 146, 254, 220, 250 },
+  { 169, 121, 154, 214, 146, 254, 220, 250 },
+  { 171, 122, 155, 215, 147, 254, 221, 250 },
+  { 172, 122, 155, 216, 147, 254, 221, 250 },
+  { 173, 123, 155, 217, 148, 254, 222, 250 },
+  { 174, 123, 155, 217, 149, 254, 222, 250 },
+  { 176, 124, 156, 218, 150, 254, 223, 250 },
+  { 177, 124, 156, 219, 150, 254, 223, 250 },
+  { 178, 125, 157, 220, 151, 254, 224, 251 },
+  { 179, 125, 157, 220, 151, 254, 224, 251 },
+  { 180, 126, 157, 221, 152, 254, 225, 251 },
+  { 181, 126, 157, 221, 152, 254, 225, 251 },
+  { 183, 127, 158, 222, 153, 254, 226, 251 },
+  { 184, 127, 158, 223, 154, 254, 226, 251 },
+  { 185, 128, 159, 224, 155, 255, 227, 251 },
+  { 186, 128, 159, 224, 155, 255, 227, 251 },
+  { 187, 129, 160, 225, 156, 255, 228, 251 },
+  { 188, 130, 160, 225, 156, 255, 228, 251 },
+  { 189, 131, 160, 226, 157, 255, 228, 251 },
+  { 190, 131, 160, 226, 158, 255, 228, 251 },
+  { 191, 132, 161, 227, 159, 255, 229, 251 },
+  { 192, 132, 161, 227, 159, 255, 229, 251 },
+  { 193, 133, 162, 228, 160, 255, 230, 252 },
+  { 194, 133, 162, 229, 160, 255, 230, 252 },
+  { 195, 134, 163, 230, 161, 255, 231, 252 },
+  { 196, 134, 163, 230, 161, 255, 231, 252 },
+  { 197, 135, 163, 231, 162, 255, 231, 252 },
+  { 198, 135, 163, 231, 162, 255, 231, 252 },
+  { 199, 136, 164, 232, 163, 255, 232, 252 },
+  { 200, 136, 164, 232, 164, 255, 232, 252 },
+  { 201, 137, 165, 233, 165, 255, 233, 252 },
+  { 201, 137, 165, 233, 165, 255, 233, 252 },
+  { 202, 138, 166, 233, 166, 255, 233, 252 },
+  { 203, 138, 166, 233, 166, 255, 233, 252 },
+  { 204, 139, 166, 234, 167, 255, 234, 252 },
+  { 205, 139, 166, 234, 167, 255, 234, 252 },
+  { 206, 140, 167, 235, 168, 255, 235, 252 },
+  { 206, 140, 167, 235, 168, 255, 235, 252 },
+  { 207, 141, 168, 236, 169, 255, 235, 252 },
+  { 208, 141, 168, 236, 170, 255, 235, 252 },
+  { 209, 142, 169, 237, 171, 255, 236, 252 },
+  { 209, 143, 169, 237, 171, 255, 236, 252 },
+  { 210, 144, 169, 237, 172, 255, 236, 252 },
+  { 211, 144, 169, 237, 172, 255, 236, 252 },
+  { 212, 145, 170, 238, 173, 255, 237, 252 },
+  { 213, 145, 170, 238, 173, 255, 237, 252 },
+  { 214, 146, 171, 239, 174, 255, 237, 253 },
+  { 214, 146, 171, 239, 174, 255, 237, 253 },
+  { 215, 147, 172, 240, 175, 255, 238, 253 },
+  { 215, 147, 172, 240, 175, 255, 238, 253 },
+  { 216, 148, 173, 240, 176, 255, 238, 253 },
+  { 217, 148, 173, 240, 176, 255, 238, 253 },
+  { 218, 149, 173, 241, 177, 255, 239, 253 },
+  { 218, 149, 173, 241, 178, 255, 239, 253 },
+  { 219, 150, 174, 241, 179, 255, 239, 253 },
+  { 219, 151, 174, 241, 179, 255, 239, 253 },
+  { 220, 152, 175, 242, 180, 255, 240, 253 },
+  { 221, 152, 175, 242, 180, 255, 240, 253 },
+  { 222, 153, 176, 242, 181, 255, 240, 253 },
+  { 222, 153, 176, 242, 181, 255, 240, 253 },
+  { 223, 154, 177, 243, 182, 255, 240, 253 },
+  { 223, 154, 177, 243, 182, 255, 240, 253 },
+  { 224, 155, 178, 244, 183, 255, 241, 253 },
+  { 224, 155, 178, 244, 183, 255, 241, 253 },
+  { 225, 156, 178, 244, 184, 255, 241, 253 },
+  { 225, 157, 178, 244, 184, 255, 241, 253 },
+  { 226, 158, 179, 244, 185, 255, 242, 253 },
+  { 227, 158, 179, 244, 185, 255, 242, 253 },
+  { 228, 159, 180, 245, 186, 255, 242, 253 },
+  { 228, 159, 180, 245, 186, 255, 242, 253 },
+  { 229, 160, 181, 245, 187, 255, 242, 253 },
+  { 229, 160, 181, 245, 187, 255, 242, 253 },
+  { 230, 161, 182, 246, 188, 255, 243, 253 },
+  { 230, 162, 182, 246, 188, 255, 243, 253 },
+  { 231, 163, 183, 246, 189, 255, 243, 253 },
+  { 231, 163, 183, 246, 189, 255, 243, 253 },
+  { 232, 164, 184, 247, 190, 255, 243, 253 },
+  { 232, 164, 184, 247, 190, 255, 243, 253 },
+  { 233, 165, 185, 247, 191, 255, 244, 253 },
+  { 233, 165, 185, 247, 191, 255, 244, 253 },
+  { 234, 166, 185, 247, 192, 255, 244, 253 },
+  { 234, 167, 185, 247, 192, 255, 244, 253 },
+  { 235, 168, 186, 248, 193, 255, 244, 253 },
+  { 235, 168, 186, 248, 193, 255, 244, 253 },
+  { 236, 169, 187, 248, 194, 255, 244, 253 },
+  { 236, 169, 187, 248, 194, 255, 244, 253 },
+  { 236, 170, 188, 248, 195, 255, 245, 253 },
+  { 236, 170, 188, 248, 195, 255, 245, 253 },
+  { 237, 171, 189, 249, 196, 255, 245, 254 },
+  { 237, 172, 189, 249, 196, 255, 245, 254 },
+  { 238, 173, 190, 249, 197, 255, 245, 254 },
+  { 238, 173, 190, 249, 197, 255, 245, 254 },
+  { 239, 174, 191, 249, 198, 255, 245, 254 },
+  { 239, 174, 191, 249, 198, 255, 245, 254 },
+  { 240, 175, 192, 249, 199, 255, 246, 254 },
+  { 240, 176, 192, 249, 199, 255, 246, 254 },
+  { 240, 177, 193, 250, 200, 255, 246, 254 },
+  { 240, 177, 193, 250, 200, 255, 246, 254 },
+  { 241, 178, 194, 250, 201, 255, 246, 254 },
+  { 241, 178, 194, 250, 201, 255, 246, 254 },
+  { 242, 179, 195, 250, 202, 255, 246, 254 },
+  { 242, 180, 195, 250, 202, 255, 246, 254 },
+  { 242, 181, 196, 250, 203, 255, 247, 254 },
+  { 242, 181, 196, 250, 203, 255, 247, 254 },
+  { 243, 182, 197, 251, 204, 255, 247, 254 },
+  { 243, 183, 197, 251, 204, 255, 247, 254 },
+  { 244, 184, 198, 251, 205, 255, 247, 254 },
+  { 244, 184, 198, 251, 205, 255, 247, 254 },
+  { 244, 185, 199, 251, 206, 255, 247, 254 },
+  { 244, 185, 199, 251, 206, 255, 247, 254 },
+  { 245, 186, 200, 251, 207, 255, 247, 254 },
+  { 245, 187, 200, 251, 207, 255, 247, 254 },
+  { 246, 188, 201, 252, 207, 255, 248, 254 },
+  { 246, 188, 201, 252, 207, 255, 248, 254 },
+  { 246, 189, 202, 252, 208, 255, 248, 254 },
+  { 246, 190, 202, 252, 208, 255, 248, 254 },
+  { 247, 191, 203, 252, 209, 255, 248, 254 },
+  { 247, 191, 203, 252, 209, 255, 248, 254 },
+  { 247, 192, 204, 252, 210, 255, 248, 254 },
+  { 247, 193, 204, 252, 210, 255, 248, 254 },
+  { 248, 194, 205, 252, 211, 255, 248, 254 },
+  { 248, 194, 205, 252, 211, 255, 248, 254 },
+  { 248, 195, 206, 252, 212, 255, 249, 254 },
+  { 248, 196, 206, 252, 212, 255, 249, 254 },
+  { 249, 197, 207, 253, 213, 255, 249, 254 },
+  { 249, 197, 207, 253, 213, 255, 249, 254 },
+  { 249, 198, 208, 253, 214, 255, 249, 254 },
+  { 249, 199, 209, 253, 214, 255, 249, 254 },
+  { 250, 200, 210, 253, 215, 255, 249, 254 },
+  { 250, 200, 210, 253, 215, 255, 249, 254 },
+  { 250, 201, 211, 253, 215, 255, 249, 254 },
+  { 250, 202, 211, 253, 215, 255, 249, 254 },
+  { 250, 203, 212, 253, 216, 255, 249, 254 },
+  { 250, 203, 212, 253, 216, 255, 249, 254 },
+  { 251, 204, 213, 253, 217, 255, 250, 254 },
+  { 251, 205, 213, 253, 217, 255, 250, 254 },
+  { 251, 206, 214, 254, 218, 255, 250, 254 },
+  { 251, 206, 215, 254, 218, 255, 250, 254 },
+  { 252, 207, 216, 254, 219, 255, 250, 254 },
+  { 252, 208, 216, 254, 219, 255, 250, 254 },
+  { 252, 209, 217, 254, 220, 255, 250, 254 },
+  { 252, 210, 217, 254, 220, 255, 250, 254 },
+  { 252, 211, 218, 254, 221, 255, 250, 254 },
+  { 252, 212, 218, 254, 221, 255, 250, 254 },
+  { 253, 213, 219, 254, 222, 255, 250, 254 },
+  { 253, 213, 220, 254, 222, 255, 250, 254 },
+  { 253, 214, 221, 254, 223, 255, 250, 254 },
+  { 253, 215, 221, 254, 223, 255, 250, 254 },
+  { 253, 216, 222, 254, 224, 255, 251, 254 },
+  { 253, 217, 223, 254, 224, 255, 251, 254 },
+  { 253, 218, 224, 254, 225, 255, 251, 254 },
+  { 253, 219, 224, 254, 225, 255, 251, 254 },
+  { 254, 220, 225, 254, 225, 255, 251, 254 },
+  { 254, 221, 226, 254, 225, 255, 251, 254 },
+  { 254, 222, 227, 255, 226, 255, 251, 254 },
+  { 254, 223, 227, 255, 226, 255, 251, 254 },
+  { 254, 224, 228, 255, 227, 255, 251, 254 },
+  { 254, 225, 229, 255, 227, 255, 251, 254 },
+  { 254, 226, 230, 255, 228, 255, 251, 254 },
+  { 254, 227, 230, 255, 229, 255, 251, 254 },
+  { 255, 228, 231, 255, 230, 255, 251, 254 },
+  { 255, 229, 232, 255, 230, 255, 251, 254 },
+  { 255, 230, 233, 255, 231, 255, 252, 254 },
+  { 255, 231, 234, 255, 231, 255, 252, 254 },
+  { 255, 232, 235, 255, 232, 255, 252, 254 },
+  { 255, 233, 236, 255, 232, 255, 252, 254 },
+  { 255, 235, 237, 255, 233, 255, 252, 254 },
+  { 255, 236, 238, 255, 234, 255, 252, 254 },
+  { 255, 238, 240, 255, 235, 255, 252, 255 },
+  { 255, 239, 241, 255, 235, 255, 252, 254 },
+  { 255, 241, 243, 255, 236, 255, 252, 254 },
+  { 255, 243, 245, 255, 237, 255, 252, 254 },
+  { 255, 246, 247, 255, 239, 255, 253, 255 },
 };
 
 static const vp10_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
-  {  // Y plane
-    {  // Intra
-      {  // Band 0
-        { 195,  29, 183 }, {  84,  49, 136 }, {   8,  42,  71 }
-      }, {  // Band 1
-        {  31, 107, 169 }, {  35,  99, 159 }, {  17,  82, 140 },
-        {   8,  66, 114 }, {   2,  44,  76 }, {   1,  19,  32 }
-      }, {  // Band 2
-        {  40, 132, 201 }, {  29, 114, 187 }, {  13,  91, 157 },
-        {   7,  75, 127 }, {   3,  58,  95 }, {   1,  28,  47 }
-      }, {  // Band 3
-        {  69, 142, 221 }, {  42, 122, 201 }, {  15,  91, 159 },
-        {   6,  67, 121 }, {   1,  42,  77 }, {   1,  17,  31 }
-      }, {  // Band 4
-        { 102, 148, 228 }, {  67, 117, 204 }, {  17,  82, 154 },
-        {   6,  59, 114 }, {   2,  39,  75 }, {   1,  15,  29 }
-      }, {  // Band 5
-        { 156,  57, 233 }, { 119,  57, 212 }, {  58,  48, 163 },
-        {  29,  40, 124 }, {  12,  30,  81 }, {   3,  12,  31 }
-      }
-    }, {  // Inter
-      {  // Band 0
-        { 191, 107, 226 }, { 124, 117, 204 }, {  25,  99, 155 }
-      }, {  // Band 1
-        {  29, 148, 210 }, {  37, 126, 194 }, {   8,  93, 157 },
-        {   2,  68, 118 }, {   1,  39,  69 }, {   1,  17,  33 }
-      }, {  // Band 2
-        {  41, 151, 213 }, {  27, 123, 193 }, {   3,  82, 144 },
-        {   1,  58, 105 }, {   1,  32,  60 }, {   1,  13,  26 }
-      }, {  // Band 3
-        {  59, 159, 220 }, {  23, 126, 198 }, {   4,  88, 151 },
-        {   1,  66, 114 }, {   1,  38,  71 }, {   1,  18,  34 }
-      }, {  // Band 4
-        { 114, 136, 232 }, {  51, 114, 207 }, {  11,  83, 155 },
-        {   3,  56, 105 }, {   1,  33,  65 }, {   1,  17,  34 }
-      }, {  // Band 5
-        { 149,  65, 234 }, { 121,  57, 215 }, {  61,  49, 166 },
-        {  28,  36, 114 }, {  12,  25,  76 }, {   3,  16,  42 }
-      }
-    }
-  }, {  // UV plane
-    {  // Intra
-      {  // Band 0
-        { 214,  49, 220 }, { 132,  63, 188 }, {  42,  65, 137 }
-      }, {  // Band 1
-        {  85, 137, 221 }, { 104, 131, 216 }, {  49, 111, 192 },
-        {  21,  87, 155 }, {   2,  49,  87 }, {   1,  16,  28 }
-      }, {  // Band 2
-        {  89, 163, 230 }, {  90, 137, 220 }, {  29, 100, 183 },
-        {  10,  70, 135 }, {   2,  42,  81 }, {   1,  17,  33 }
-      }, {  // Band 3
-        { 108, 167, 237 }, {  55, 133, 222 }, {  15,  97, 179 },
-        {   4,  72, 135 }, {   1,  45,  85 }, {   1,  19,  38 }
-      }, {  // Band 4
-        { 124, 146, 240 }, {  66, 124, 224 }, {  17,  88, 175 },
-        {   4,  58, 122 }, {   1,  36,  75 }, {   1,  18,  37 }
-      }, {  //  Band 5
-        { 141,  79, 241 }, { 126,  70, 227 }, {  66,  58, 182 },
-        {  30,  44, 136 }, {  12,  34,  96 }, {   2,  20,  47 }
-      }
-    }, {  // Inter
-      {  // Band 0
-        { 229,  99, 249 }, { 143, 111, 235 }, {  46, 109, 192 }
-      }, {  // Band 1
-        {  82, 158, 236 }, {  94, 146, 224 }, {  25, 117, 191 },
-        {   9,  87, 149 }, {   3,  56,  99 }, {   1,  33,  57 }
-      }, {  // Band 2
-        {  83, 167, 237 }, {  68, 145, 222 }, {  10, 103, 177 },
-        {   2,  72, 131 }, {   1,  41,  79 }, {   1,  20,  39 }
-      }, {  // Band 3
-        {  99, 167, 239 }, {  47, 141, 224 }, {  10, 104, 178 },
-        {   2,  73, 133 }, {   1,  44,  85 }, {   1,  22,  47 }
-      }, {  // Band 4
-        { 127, 145, 243 }, {  71, 129, 228 }, {  17,  93, 177 },
-        {   3,  61, 124 }, {   1,  41,  84 }, {   1,  21,  52 }
-      }, {  // Band 5
-        { 157,  78, 244 }, { 140,  72, 231 }, {  69,  58, 184 },
-        {  31,  44, 137 }, {  14,  38, 105 }, {   8,  23,  61 }
-      }
-    }
-  }
+  {     // Y plane
+    {   // Intra
+      { // Band 0
+        { 195, 29, 183 },
+        { 84, 49, 136 },
+        { 8, 42, 71 } },
+      { // Band 1
+        { 31, 107, 169 },
+        { 35, 99, 159 },
+        { 17, 82, 140 },
+        { 8, 66, 114 },
+        { 2, 44, 76 },
+        { 1, 19, 32 } },
+      { // Band 2
+        { 40, 132, 201 },
+        { 29, 114, 187 },
+        { 13, 91, 157 },
+        { 7, 75, 127 },
+        { 3, 58, 95 },
+        { 1, 28, 47 } },
+      { // Band 3
+        { 69, 142, 221 },
+        { 42, 122, 201 },
+        { 15, 91, 159 },
+        { 6, 67, 121 },
+        { 1, 42, 77 },
+        { 1, 17, 31 } },
+      { // Band 4
+        { 102, 148, 228 },
+        { 67, 117, 204 },
+        { 17, 82, 154 },
+        { 6, 59, 114 },
+        { 2, 39, 75 },
+        { 1, 15, 29 } },
+      { // Band 5
+        { 156, 57, 233 },
+        { 119, 57, 212 },
+        { 58, 48, 163 },
+        { 29, 40, 124 },
+        { 12, 30, 81 },
+        { 3, 12, 31 } } },
+    {   // Inter
+      { // Band 0
+        { 191, 107, 226 },
+        { 124, 117, 204 },
+        { 25, 99, 155 } },
+      { // Band 1
+        { 29, 148, 210 },
+        { 37, 126, 194 },
+        { 8, 93, 157 },
+        { 2, 68, 118 },
+        { 1, 39, 69 },
+        { 1, 17, 33 } },
+      { // Band 2
+        { 41, 151, 213 },
+        { 27, 123, 193 },
+        { 3, 82, 144 },
+        { 1, 58, 105 },
+        { 1, 32, 60 },
+        { 1, 13, 26 } },
+      { // Band 3
+        { 59, 159, 220 },
+        { 23, 126, 198 },
+        { 4, 88, 151 },
+        { 1, 66, 114 },
+        { 1, 38, 71 },
+        { 1, 18, 34 } },
+      { // Band 4
+        { 114, 136, 232 },
+        { 51, 114, 207 },
+        { 11, 83, 155 },
+        { 3, 56, 105 },
+        { 1, 33, 65 },
+        { 1, 17, 34 } },
+      { // Band 5
+        { 149, 65, 234 },
+        { 121, 57, 215 },
+        { 61, 49, 166 },
+        { 28, 36, 114 },
+        { 12, 25, 76 },
+        { 3, 16, 42 } } } },
+  {     // UV plane
+    {   // Intra
+      { // Band 0
+        { 214, 49, 220 },
+        { 132, 63, 188 },
+        { 42, 65, 137 } },
+      { // Band 1
+        { 85, 137, 221 },
+        { 104, 131, 216 },
+        { 49, 111, 192 },
+        { 21, 87, 155 },
+        { 2, 49, 87 },
+        { 1, 16, 28 } },
+      { // Band 2
+        { 89, 163, 230 },
+        { 90, 137, 220 },
+        { 29, 100, 183 },
+        { 10, 70, 135 },
+        { 2, 42, 81 },
+        { 1, 17, 33 } },
+      { // Band 3
+        { 108, 167, 237 },
+        { 55, 133, 222 },
+        { 15, 97, 179 },
+        { 4, 72, 135 },
+        { 1, 45, 85 },
+        { 1, 19, 38 } },
+      { // Band 4
+        { 124, 146, 240 },
+        { 66, 124, 224 },
+        { 17, 88, 175 },
+        { 4, 58, 122 },
+        { 1, 36, 75 },
+        { 1, 18, 37 } },
+      { //  Band 5
+        { 141, 79, 241 },
+        { 126, 70, 227 },
+        { 66, 58, 182 },
+        { 30, 44, 136 },
+        { 12, 34, 96 },
+        { 2, 20, 47 } } },
+    {   // Inter
+      { // Band 0
+        { 229, 99, 249 },
+        { 143, 111, 235 },
+        { 46, 109, 192 } },
+      { // Band 1
+        { 82, 158, 236 },
+        { 94, 146, 224 },
+        { 25, 117, 191 },
+        { 9, 87, 149 },
+        { 3, 56, 99 },
+        { 1, 33, 57 } },
+      { // Band 2
+        { 83, 167, 237 },
+        { 68, 145, 222 },
+        { 10, 103, 177 },
+        { 2, 72, 131 },
+        { 1, 41, 79 },
+        { 1, 20, 39 } },
+      { // Band 3
+        { 99, 167, 239 },
+        { 47, 141, 224 },
+        { 10, 104, 178 },
+        { 2, 73, 133 },
+        { 1, 44, 85 },
+        { 1, 22, 47 } },
+      { // Band 4
+        { 127, 145, 243 },
+        { 71, 129, 228 },
+        { 17, 93, 177 },
+        { 3, 61, 124 },
+        { 1, 41, 84 },
+        { 1, 21, 52 } },
+      { // Band 5
+        { 157, 78, 244 },
+        { 140, 72, 231 },
+        { 69, 58, 184 },
+        { 31, 44, 137 },
+        { 14, 38, 105 },
+        { 8, 23, 61 } } } }
 };
 
 static const vp10_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
-  {  // Y plane
-    {  // Intra
-      {  // Band 0
-        { 125,  34, 187 }, {  52,  41, 133 }, {   6,  31,  56 }
-      }, {  // Band 1
-        {  37, 109, 153 }, {  51, 102, 147 }, {  23,  87, 128 },
-        {   8,  67, 101 }, {   1,  41,  63 }, {   1,  19,  29 }
-      }, {  // Band 2
-        {  31, 154, 185 }, {  17, 127, 175 }, {   6,  96, 145 },
-        {   2,  73, 114 }, {   1,  51,  82 }, {   1,  28,  45 }
-      }, {  // Band 3
-        {  23, 163, 200 }, {  10, 131, 185 }, {   2,  93, 148 },
-        {   1,  67, 111 }, {   1,  41,  69 }, {   1,  14,  24 }
-      }, {  // Band 4
-        {  29, 176, 217 }, {  12, 145, 201 }, {   3, 101, 156 },
-        {   1,  69, 111 }, {   1,  39,  63 }, {   1,  14,  23 }
-      }, {  // Band 5
-        {  57, 192, 233 }, {  25, 154, 215 }, {   6, 109, 167 },
-        {   3,  78, 118 }, {   1,  48,  69 }, {   1,  21,  29 }
-      }
-    }, {  // Inter
-      {  // Band 0
-        { 202, 105, 245 }, { 108, 106, 216 }, {  18,  90, 144 }
-      }, {  // Band 1
-        {  33, 172, 219 }, {  64, 149, 206 }, {  14, 117, 177 },
-        {   5,  90, 141 }, {   2,  61,  95 }, {   1,  37,  57 }
-      }, {  // Band 2
-        {  33, 179, 220 }, {  11, 140, 198 }, {   1,  89, 148 },
-        {   1,  60, 104 }, {   1,  33,  57 }, {   1,  12,  21 }
-      }, {  // Band 3
-        {  30, 181, 221 }, {   8, 141, 198 }, {   1,  87, 145 },
-        {   1,  58, 100 }, {   1,  31,  55 }, {   1,  12,  20 }
-      }, {  // Band 4
-        {  32, 186, 224 }, {   7, 142, 198 }, {   1,  86, 143 },
-        {   1,  58, 100 }, {   1,  31,  55 }, {   1,  12,  22 }
-      }, {  // Band 5
-        {  57, 192, 227 }, {  20, 143, 204 }, {   3,  96, 154 },
-        {   1,  68, 112 }, {   1,  42,  69 }, {   1,  19,  32 }
-      }
-    }
-  }, {  // UV plane
-    {  // Intra
-      {  // Band 0
-        { 212,  35, 215 }, { 113,  47, 169 }, {  29,  48, 105 }
-      }, {  // Band 1
-        {  74, 129, 203 }, { 106, 120, 203 }, {  49, 107, 178 },
-        {  19,  84, 144 }, {   4,  50,  84 }, {   1,  15,  25 }
-      }, {  // Band 2
-        {  71, 172, 217 }, {  44, 141, 209 }, {  15, 102, 173 },
-        {   6,  76, 133 }, {   2,  51,  89 }, {   1,  24,  42 }
-      }, {  // Band 3
-        {  64, 185, 231 }, {  31, 148, 216 }, {   8, 103, 175 },
-        {   3,  74, 131 }, {   1,  46,  81 }, {   1,  18,  30 }
-      }, {  // Band 4
-        {  65, 196, 235 }, {  25, 157, 221 }, {   5, 105, 174 },
-        {   1,  67, 120 }, {   1,  38,  69 }, {   1,  15,  30 }
-      }, {  // Band 5
-        {  65, 204, 238 }, {  30, 156, 224 }, {   7, 107, 177 },
-        {   2,  70, 124 }, {   1,  42,  73 }, {   1,  18,  34 }
-      }
-    }, {  // Inter
-      {  // Band 0
-        { 225,  86, 251 }, { 144, 104, 235 }, {  42,  99, 181 }
-      }, {  // Band 1
-        {  85, 175, 239 }, { 112, 165, 229 }, {  29, 136, 200 },
-        {  12, 103, 162 }, {   6,  77, 123 }, {   2,  53,  84 }
-      }, {  // Band 2
-        {  75, 183, 239 }, {  30, 155, 221 }, {   3, 106, 171 },
-        {   1,  74, 128 }, {   1,  44,  76 }, {   1,  17,  28 }
-      }, {  // Band 3
-        {  73, 185, 240 }, {  27, 159, 222 }, {   2, 107, 172 },
-        {   1,  75, 127 }, {   1,  42,  73 }, {   1,  17,  29 }
-      }, {  // Band 4
-        {  62, 190, 238 }, {  21, 159, 222 }, {   2, 107, 172 },
-        {   1,  72, 122 }, {   1,  40,  71 }, {   1,  18,  32 }
-      }, {  // Band 5
-        {  61, 199, 240 }, {  27, 161, 226 }, {   4, 113, 180 },
-        {   1,  76, 129 }, {   1,  46,  80 }, {   1,  23,  41 }
-      }
-    }
-  }
+  {     // Y plane
+    {   // Intra
+      { // Band 0
+        { 125, 34, 187 },
+        { 52, 41, 133 },
+        { 6, 31, 56 } },
+      { // Band 1
+        { 37, 109, 153 },
+        { 51, 102, 147 },
+        { 23, 87, 128 },
+        { 8, 67, 101 },
+        { 1, 41, 63 },
+        { 1, 19, 29 } },
+      { // Band 2
+        { 31, 154, 185 },
+        { 17, 127, 175 },
+        { 6, 96, 145 },
+        { 2, 73, 114 },
+        { 1, 51, 82 },
+        { 1, 28, 45 } },
+      { // Band 3
+        { 23, 163, 200 },
+        { 10, 131, 185 },
+        { 2, 93, 148 },
+        { 1, 67, 111 },
+        { 1, 41, 69 },
+        { 1, 14, 24 } },
+      { // Band 4
+        { 29, 176, 217 },
+        { 12, 145, 201 },
+        { 3, 101, 156 },
+        { 1, 69, 111 },
+        { 1, 39, 63 },
+        { 1, 14, 23 } },
+      { // Band 5
+        { 57, 192, 233 },
+        { 25, 154, 215 },
+        { 6, 109, 167 },
+        { 3, 78, 118 },
+        { 1, 48, 69 },
+        { 1, 21, 29 } } },
+    {   // Inter
+      { // Band 0
+        { 202, 105, 245 },
+        { 108, 106, 216 },
+        { 18, 90, 144 } },
+      { // Band 1
+        { 33, 172, 219 },
+        { 64, 149, 206 },
+        { 14, 117, 177 },
+        { 5, 90, 141 },
+        { 2, 61, 95 },
+        { 1, 37, 57 } },
+      { // Band 2
+        { 33, 179, 220 },
+        { 11, 140, 198 },
+        { 1, 89, 148 },
+        { 1, 60, 104 },
+        { 1, 33, 57 },
+        { 1, 12, 21 } },
+      { // Band 3
+        { 30, 181, 221 },
+        { 8, 141, 198 },
+        { 1, 87, 145 },
+        { 1, 58, 100 },
+        { 1, 31, 55 },
+        { 1, 12, 20 } },
+      { // Band 4
+        { 32, 186, 224 },
+        { 7, 142, 198 },
+        { 1, 86, 143 },
+        { 1, 58, 100 },
+        { 1, 31, 55 },
+        { 1, 12, 22 } },
+      { // Band 5
+        { 57, 192, 227 },
+        { 20, 143, 204 },
+        { 3, 96, 154 },
+        { 1, 68, 112 },
+        { 1, 42, 69 },
+        { 1, 19, 32 } } } },
+  {     // UV plane
+    {   // Intra
+      { // Band 0
+        { 212, 35, 215 },
+        { 113, 47, 169 },
+        { 29, 48, 105 } },
+      { // Band 1
+        { 74, 129, 203 },
+        { 106, 120, 203 },
+        { 49, 107, 178 },
+        { 19, 84, 144 },
+        { 4, 50, 84 },
+        { 1, 15, 25 } },
+      { // Band 2
+        { 71, 172, 217 },
+        { 44, 141, 209 },
+        { 15, 102, 173 },
+        { 6, 76, 133 },
+        { 2, 51, 89 },
+        { 1, 24, 42 } },
+      { // Band 3
+        { 64, 185, 231 },
+        { 31, 148, 216 },
+        { 8, 103, 175 },
+        { 3, 74, 131 },
+        { 1, 46, 81 },
+        { 1, 18, 30 } },
+      { // Band 4
+        { 65, 196, 235 },
+        { 25, 157, 221 },
+        { 5, 105, 174 },
+        { 1, 67, 120 },
+        { 1, 38, 69 },
+        { 1, 15, 30 } },
+      { // Band 5
+        { 65, 204, 238 },
+        { 30, 156, 224 },
+        { 7, 107, 177 },
+        { 2, 70, 124 },
+        { 1, 42, 73 },
+        { 1, 18, 34 } } },
+    {   // Inter
+      { // Band 0
+        { 225, 86, 251 },
+        { 144, 104, 235 },
+        { 42, 99, 181 } },
+      { // Band 1
+        { 85, 175, 239 },
+        { 112, 165, 229 },
+        { 29, 136, 200 },
+        { 12, 103, 162 },
+        { 6, 77, 123 },
+        { 2, 53, 84 } },
+      { // Band 2
+        { 75, 183, 239 },
+        { 30, 155, 221 },
+        { 3, 106, 171 },
+        { 1, 74, 128 },
+        { 1, 44, 76 },
+        { 1, 17, 28 } },
+      { // Band 3
+        { 73, 185, 240 },
+        { 27, 159, 222 },
+        { 2, 107, 172 },
+        { 1, 75, 127 },
+        { 1, 42, 73 },
+        { 1, 17, 29 } },
+      { // Band 4
+        { 62, 190, 238 },
+        { 21, 159, 222 },
+        { 2, 107, 172 },
+        { 1, 72, 122 },
+        { 1, 40, 71 },
+        { 1, 18, 32 } },
+      { // Band 5
+        { 61, 199, 240 },
+        { 27, 161, 226 },
+        { 4, 113, 180 },
+        { 1, 76, 129 },
+        { 1, 46, 80 },
+        { 1, 23, 41 } } } }
 };
 
 static const vp10_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
-  {  // Y plane
-    {  // Intra
-      {  // Band 0
-        {   7,  27, 153 }, {   5,  30,  95 }, {   1,  16,  30 }
-      }, {  // Band 1
-        {  50,  75, 127 }, {  57,  75, 124 }, {  27,  67, 108 },
-        {  10,  54,  86 }, {   1,  33,  52 }, {   1,  12,  18 }
-      }, {  // Band 2
-        {  43, 125, 151 }, {  26, 108, 148 }, {   7,  83, 122 },
-        {   2,  59,  89 }, {   1,  38,  60 }, {   1,  17,  27 }
-      }, {  // Band 3
-        {  23, 144, 163 }, {  13, 112, 154 }, {   2,  75, 117 },
-        {   1,  50,  81 }, {   1,  31,  51 }, {   1,  14,  23 }
-      }, {  // Band 4
-        {  18, 162, 185 }, {   6, 123, 171 }, {   1,  78, 125 },
-        {   1,  51,  86 }, {   1,  31,  54 }, {   1,  14,  23 }
-      }, {  // Band 5
-        {  15, 199, 227 }, {   3, 150, 204 }, {   1,  91, 146 },
-        {   1,  55,  95 }, {   1,  30,  53 }, {   1,  11,  20 }
-      }
-    }, {  // Inter
-      {  // Band 0
-        {  19,  55, 240 }, {  19,  59, 196 }, {   3,  52, 105 }
-      }, {  // Band 1
-        {  41, 166, 207 }, { 104, 153, 199 }, {  31, 123, 181 },
-        {  14, 101, 152 }, {   5,  72, 106 }, {   1,  36,  52 }
-      }, {  // Band 2
-        {  35, 176, 211 }, {  12, 131, 190 }, {   2,  88, 144 },
-        {   1,  60, 101 }, {   1,  36,  60 }, {   1,  16,  28 }
-      }, {  // Band 3
-        {  28, 183, 213 }, {   8, 134, 191 }, {   1,  86, 142 },
-        {   1,  56,  96 }, {   1,  30,  53 }, {   1,  12,  20 }
-      }, {  // Band 4
-        {  20, 190, 215 }, {   4, 135, 192 }, {   1,  84, 139 },
-        {   1,  53,  91 }, {   1,  28,  49 }, {   1,  11,  20 }
-      }, {  // Band 5
-        {  13, 196, 216 }, {   2, 137, 192 }, {   1,  86, 143 },
-        {   1,  57,  99 }, {   1,  32,  56 }, {   1,  13,  24 }
-      }
-    }
-  }, {  // UV plane
-    {  // Intra
-      {  // Band 0
-        { 211,  29, 217 }, {  96,  47, 156 }, {  22,  43,  87 }
-      }, {  // Band 1
-        {  78, 120, 193 }, { 111, 116, 186 }, {  46, 102, 164 },
-        {  15,  80, 128 }, {   2,  49,  76 }, {   1,  18,  28 }
-      }, {  // Band 2
-        {  71, 161, 203 }, {  42, 132, 192 }, {  10,  98, 150 },
-        {   3,  69, 109 }, {   1,  44,  70 }, {   1,  18,  29 }
-      }, {  // Band 3
-        {  57, 186, 211 }, {  30, 140, 196 }, {   4,  93, 146 },
-        {   1,  62, 102 }, {   1,  38,  65 }, {   1,  16,  27 }
-      }, {  // Band 4
-        {  47, 199, 217 }, {  14, 145, 196 }, {   1,  88, 142 },
-        {   1,  57,  98 }, {   1,  36,  62 }, {   1,  15,  26 }
-      }, {  // Band 5
-        {  26, 219, 229 }, {   5, 155, 207 }, {   1,  94, 151 },
-        {   1,  60, 104 }, {   1,  36,  62 }, {   1,  16,  28 }
-      }
-    }, {  // Inter
-      {  // Band 0
-        { 233,  29, 248 }, { 146,  47, 220 }, {  43,  52, 140 }
-      }, {  // Band 1
-        { 100, 163, 232 }, { 179, 161, 222 }, {  63, 142, 204 },
-        {  37, 113, 174 }, {  26,  89, 137 }, {  18,  68,  97 }
-      }, {  // Band 2
-        {  85, 181, 230 }, {  32, 146, 209 }, {   7, 100, 164 },
-        {   3,  71, 121 }, {   1,  45,  77 }, {   1,  18,  30 }
-      }, {  // Band 3
-        {  65, 187, 230 }, {  20, 148, 207 }, {   2,  97, 159 },
-        {   1,  68, 116 }, {   1,  40,  70 }, {   1,  14,  29 }
-      }, {  // Band 4
-        {  40, 194, 227 }, {   8, 147, 204 }, {   1,  94, 155 },
-        {   1,  65, 112 }, {   1,  39,  66 }, {   1,  14,  26 }
-      }, {  // Band 5
-        {  16, 208, 228 }, {   3, 151, 207 }, {   1,  98, 160 },
-        {   1,  67, 117 }, {   1,  41,  74 }, {   1,  17,  31 }
-      }
-    }
-  }
+  {     // Y plane
+    {   // Intra
+      { // Band 0
+        { 7, 27, 153 },
+        { 5, 30, 95 },
+        { 1, 16, 30 } },
+      { // Band 1
+        { 50, 75, 127 },
+        { 57, 75, 124 },
+        { 27, 67, 108 },
+        { 10, 54, 86 },
+        { 1, 33, 52 },
+        { 1, 12, 18 } },
+      { // Band 2
+        { 43, 125, 151 },
+        { 26, 108, 148 },
+        { 7, 83, 122 },
+        { 2, 59, 89 },
+        { 1, 38, 60 },
+        { 1, 17, 27 } },
+      { // Band 3
+        { 23, 144, 163 },
+        { 13, 112, 154 },
+        { 2, 75, 117 },
+        { 1, 50, 81 },
+        { 1, 31, 51 },
+        { 1, 14, 23 } },
+      { // Band 4
+        { 18, 162, 185 },
+        { 6, 123, 171 },
+        { 1, 78, 125 },
+        { 1, 51, 86 },
+        { 1, 31, 54 },
+        { 1, 14, 23 } },
+      { // Band 5
+        { 15, 199, 227 },
+        { 3, 150, 204 },
+        { 1, 91, 146 },
+        { 1, 55, 95 },
+        { 1, 30, 53 },
+        { 1, 11, 20 } } },
+    {   // Inter
+      { // Band 0
+        { 19, 55, 240 },
+        { 19, 59, 196 },
+        { 3, 52, 105 } },
+      { // Band 1
+        { 41, 166, 207 },
+        { 104, 153, 199 },
+        { 31, 123, 181 },
+        { 14, 101, 152 },
+        { 5, 72, 106 },
+        { 1, 36, 52 } },
+      { // Band 2
+        { 35, 176, 211 },
+        { 12, 131, 190 },
+        { 2, 88, 144 },
+        { 1, 60, 101 },
+        { 1, 36, 60 },
+        { 1, 16, 28 } },
+      { // Band 3
+        { 28, 183, 213 },
+        { 8, 134, 191 },
+        { 1, 86, 142 },
+        { 1, 56, 96 },
+        { 1, 30, 53 },
+        { 1, 12, 20 } },
+      { // Band 4
+        { 20, 190, 215 },
+        { 4, 135, 192 },
+        { 1, 84, 139 },
+        { 1, 53, 91 },
+        { 1, 28, 49 },
+        { 1, 11, 20 } },
+      { // Band 5
+        { 13, 196, 216 },
+        { 2, 137, 192 },
+        { 1, 86, 143 },
+        { 1, 57, 99 },
+        { 1, 32, 56 },
+        { 1, 13, 24 } } } },
+  {     // UV plane
+    {   // Intra
+      { // Band 0
+        { 211, 29, 217 },
+        { 96, 47, 156 },
+        { 22, 43, 87 } },
+      { // Band 1
+        { 78, 120, 193 },
+        { 111, 116, 186 },
+        { 46, 102, 164 },
+        { 15, 80, 128 },
+        { 2, 49, 76 },
+        { 1, 18, 28 } },
+      { // Band 2
+        { 71, 161, 203 },
+        { 42, 132, 192 },
+        { 10, 98, 150 },
+        { 3, 69, 109 },
+        { 1, 44, 70 },
+        { 1, 18, 29 } },
+      { // Band 3
+        { 57, 186, 211 },
+        { 30, 140, 196 },
+        { 4, 93, 146 },
+        { 1, 62, 102 },
+        { 1, 38, 65 },
+        { 1, 16, 27 } },
+      { // Band 4
+        { 47, 199, 217 },
+        { 14, 145, 196 },
+        { 1, 88, 142 },
+        { 1, 57, 98 },
+        { 1, 36, 62 },
+        { 1, 15, 26 } },
+      { // Band 5
+        { 26, 219, 229 },
+        { 5, 155, 207 },
+        { 1, 94, 151 },
+        { 1, 60, 104 },
+        { 1, 36, 62 },
+        { 1, 16, 28 } } },
+    {   // Inter
+      { // Band 0
+        { 233, 29, 248 },
+        { 146, 47, 220 },
+        { 43, 52, 140 } },
+      { // Band 1
+        { 100, 163, 232 },
+        { 179, 161, 222 },
+        { 63, 142, 204 },
+        { 37, 113, 174 },
+        { 26, 89, 137 },
+        { 18, 68, 97 } },
+      { // Band 2
+        { 85, 181, 230 },
+        { 32, 146, 209 },
+        { 7, 100, 164 },
+        { 3, 71, 121 },
+        { 1, 45, 77 },
+        { 1, 18, 30 } },
+      { // Band 3
+        { 65, 187, 230 },
+        { 20, 148, 207 },
+        { 2, 97, 159 },
+        { 1, 68, 116 },
+        { 1, 40, 70 },
+        { 1, 14, 29 } },
+      { // Band 4
+        { 40, 194, 227 },
+        { 8, 147, 204 },
+        { 1, 94, 155 },
+        { 1, 65, 112 },
+        { 1, 39, 66 },
+        { 1, 14, 26 } },
+      { // Band 5
+        { 16, 208, 228 },
+        { 3, 151, 207 },
+        { 1, 98, 160 },
+        { 1, 67, 117 },
+        { 1, 41, 74 },
+        { 1, 17, 31 } } } }
 };
 
 static const vp10_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
-  {  // Y plane
-    {  // Intra
-      {  // Band 0
-        {  17,  38, 140 }, {   7,  34,  80 }, {   1,  17,  29 }
-      }, {  // Band 1
-        {  37,  75, 128 }, {  41,  76, 128 }, {  26,  66, 116 },
-        {  12,  52,  94 }, {   2,  32,  55 }, {   1,  10,  16 }
-      }, {  // Band 2
-        {  50, 127, 154 }, {  37, 109, 152 }, {  16,  82, 121 },
-        {   5,  59,  85 }, {   1,  35,  54 }, {   1,  13,  20 }
-      }, {  // Band 3
-        {  40, 142, 167 }, {  17, 110, 157 }, {   2,  71, 112 },
-        {   1,  44,  72 }, {   1,  27,  45 }, {   1,  11,  17 }
-      }, {  // Band 4
-        {  30, 175, 188 }, {   9, 124, 169 }, {   1,  74, 116 },
-        {   1,  48,  78 }, {   1,  30,  49 }, {   1,  11,  18 }
-      }, {  // Band 5
-        {  10, 222, 223 }, {   2, 150, 194 }, {   1,  83, 128 },
-        {   1,  48,  79 }, {   1,  27,  45 }, {   1,  11,  17 }
-      }
-    }, {  // Inter
-      {  // Band 0
-        {  36,  41, 235 }, {  29,  36, 193 }, {  10,  27, 111 }
-      }, {  // Band 1
-        {  85, 165, 222 }, { 177, 162, 215 }, { 110, 135, 195 },
-        {  57, 113, 168 }, {  23,  83, 120 }, {  10,  49,  61 }
-      }, {  // Band 2
-        {  85, 190, 223 }, {  36, 139, 200 }, {   5,  90, 146 },
-        {   1,  60, 103 }, {   1,  38,  65 }, {   1,  18,  30 }
-      }, {  // Band 3
-        {  72, 202, 223 }, {  23, 141, 199 }, {   2,  86, 140 },
-        {   1,  56,  97 }, {   1,  36,  61 }, {   1,  16,  27 }
-      }, {  // Band 4
-        {  55, 218, 225 }, {  13, 145, 200 }, {   1,  86, 141 },
-        {   1,  57,  99 }, {   1,  35,  61 }, {   1,  13,  22 }
-      }, {  // Band 5
-        {  15, 235, 212 }, {   1, 132, 184 }, {   1,  84, 139 },
-        {   1,  57,  97 }, {   1,  34,  56 }, {   1,  14,  23 }
-      }
-    }
-  }, {  // UV plane
-    {  // Intra
-      {  // Band 0
-        { 181,  21, 201 }, {  61,  37, 123 }, {  10,  38,  71 }
-      }, {  // Band 1
-        {  47, 106, 172 }, {  95, 104, 173 }, {  42,  93, 159 },
-        {  18,  77, 131 }, {   4,  50,  81 }, {   1,  17,  23 }
-      }, {  // Band 2
-        {  62, 147, 199 }, {  44, 130, 189 }, {  28, 102, 154 },
-        {  18,  75, 115 }, {   2,  44,  65 }, {   1,  12,  19 }
-      }, {  // Band 3
-        {  55, 153, 210 }, {  24, 130, 194 }, {   3,  93, 146 },
-        {   1,  61,  97 }, {   1,  31,  50 }, {   1,  10,  16 }
-      }, {  // Band 4
-        {  49, 186, 223 }, {  17, 148, 204 }, {   1,  96, 142 },
-        {   1,  53,  83 }, {   1,  26,  44 }, {   1,  11,  17 }
-      }, {  // Band 5
-        {  13, 217, 212 }, {   2, 136, 180 }, {   1,  78, 124 },
-        {   1,  50,  83 }, {   1,  29,  49 }, {   1,  14,  23 }
-      }
-    }, {  // Inter
-      {  // Band 0
-        { 197,  13, 247 }, {  82,  17, 222 }, {  25,  17, 162 }
-      }, {  // Band 1
-        { 126, 186, 247 }, { 234, 191, 243 }, { 176, 177, 234 },
-        { 104, 158, 220 }, {  66, 128, 186 }, {  55,  90, 137 }
-      }, {  // Band 2
-        { 111, 197, 242 }, {  46, 158, 219 }, {   9, 104, 171 },
-        {   2,  65, 125 }, {   1,  44,  80 }, {   1,  17,  91 }
-      }, {  // Band 3
-        { 104, 208, 245 }, {  39, 168, 224 }, {   3, 109, 162 },
-        {   1,  79, 124 }, {   1,  50, 102 }, {   1,  43, 102 }
-      }, {  // Band 4
-        {  84, 220, 246 }, {  31, 177, 231 }, {   2, 115, 180 },
-        {   1,  79, 134 }, {   1,  55,  77 }, {   1,  60,  79 }
-      }, {  // Band 5
-        {  43, 243, 240 }, {   8, 180, 217 }, {   1, 115, 166 },
-        {   1,  84, 121 }, {   1,  51,  67 }, {   1,  16,   6 }
-      }
-    }
-  }
+  {     // Y plane
+    {   // Intra
+      { // Band 0
+        { 17, 38, 140 },
+        { 7, 34, 80 },
+        { 1, 17, 29 } },
+      { // Band 1
+        { 37, 75, 128 },
+        { 41, 76, 128 },
+        { 26, 66, 116 },
+        { 12, 52, 94 },
+        { 2, 32, 55 },
+        { 1, 10, 16 } },
+      { // Band 2
+        { 50, 127, 154 },
+        { 37, 109, 152 },
+        { 16, 82, 121 },
+        { 5, 59, 85 },
+        { 1, 35, 54 },
+        { 1, 13, 20 } },
+      { // Band 3
+        { 40, 142, 167 },
+        { 17, 110, 157 },
+        { 2, 71, 112 },
+        { 1, 44, 72 },
+        { 1, 27, 45 },
+        { 1, 11, 17 } },
+      { // Band 4
+        { 30, 175, 188 },
+        { 9, 124, 169 },
+        { 1, 74, 116 },
+        { 1, 48, 78 },
+        { 1, 30, 49 },
+        { 1, 11, 18 } },
+      { // Band 5
+        { 10, 222, 223 },
+        { 2, 150, 194 },
+        { 1, 83, 128 },
+        { 1, 48, 79 },
+        { 1, 27, 45 },
+        { 1, 11, 17 } } },
+    {   // Inter
+      { // Band 0
+        { 36, 41, 235 },
+        { 29, 36, 193 },
+        { 10, 27, 111 } },
+      { // Band 1
+        { 85, 165, 222 },
+        { 177, 162, 215 },
+        { 110, 135, 195 },
+        { 57, 113, 168 },
+        { 23, 83, 120 },
+        { 10, 49, 61 } },
+      { // Band 2
+        { 85, 190, 223 },
+        { 36, 139, 200 },
+        { 5, 90, 146 },
+        { 1, 60, 103 },
+        { 1, 38, 65 },
+        { 1, 18, 30 } },
+      { // Band 3
+        { 72, 202, 223 },
+        { 23, 141, 199 },
+        { 2, 86, 140 },
+        { 1, 56, 97 },
+        { 1, 36, 61 },
+        { 1, 16, 27 } },
+      { // Band 4
+        { 55, 218, 225 },
+        { 13, 145, 200 },
+        { 1, 86, 141 },
+        { 1, 57, 99 },
+        { 1, 35, 61 },
+        { 1, 13, 22 } },
+      { // Band 5
+        { 15, 235, 212 },
+        { 1, 132, 184 },
+        { 1, 84, 139 },
+        { 1, 57, 97 },
+        { 1, 34, 56 },
+        { 1, 14, 23 } } } },
+  {     // UV plane
+    {   // Intra
+      { // Band 0
+        { 181, 21, 201 },
+        { 61, 37, 123 },
+        { 10, 38, 71 } },
+      { // Band 1
+        { 47, 106, 172 },
+        { 95, 104, 173 },
+        { 42, 93, 159 },
+        { 18, 77, 131 },
+        { 4, 50, 81 },
+        { 1, 17, 23 } },
+      { // Band 2
+        { 62, 147, 199 },
+        { 44, 130, 189 },
+        { 28, 102, 154 },
+        { 18, 75, 115 },
+        { 2, 44, 65 },
+        { 1, 12, 19 } },
+      { // Band 3
+        { 55, 153, 210 },
+        { 24, 130, 194 },
+        { 3, 93, 146 },
+        { 1, 61, 97 },
+        { 1, 31, 50 },
+        { 1, 10, 16 } },
+      { // Band 4
+        { 49, 186, 223 },
+        { 17, 148, 204 },
+        { 1, 96, 142 },
+        { 1, 53, 83 },
+        { 1, 26, 44 },
+        { 1, 11, 17 } },
+      { // Band 5
+        { 13, 217, 212 },
+        { 2, 136, 180 },
+        { 1, 78, 124 },
+        { 1, 50, 83 },
+        { 1, 29, 49 },
+        { 1, 14, 23 } } },
+    {   // Inter
+      { // Band 0
+        { 197, 13, 247 },
+        { 82, 17, 222 },
+        { 25, 17, 162 } },
+      { // Band 1
+        { 126, 186, 247 },
+        { 234, 191, 243 },
+        { 176, 177, 234 },
+        { 104, 158, 220 },
+        { 66, 128, 186 },
+        { 55, 90, 137 } },
+      { // Band 2
+        { 111, 197, 242 },
+        { 46, 158, 219 },
+        { 9, 104, 171 },
+        { 2, 65, 125 },
+        { 1, 44, 80 },
+        { 1, 17, 91 } },
+      { // Band 3
+        { 104, 208, 245 },
+        { 39, 168, 224 },
+        { 3, 109, 162 },
+        { 1, 79, 124 },
+        { 1, 50, 102 },
+        { 1, 43, 102 } },
+      { // Band 4
+        { 84, 220, 246 },
+        { 31, 177, 231 },
+        { 2, 115, 180 },
+        { 1, 79, 134 },
+        { 1, 55, 77 },
+        { 1, 60, 79 } },
+      { // Band 5
+        { 43, 243, 240 },
+        { 8, 180, 217 },
+        { 1, 115, 166 },
+        { 1, 84, 121 },
+        { 1, 51, 67 },
+        { 1, 16, 6 } } } }
 };
 
 static void extend_to_full_distribution(vpx_prob *probs, vpx_prob p) {
@@ -775,7 +1078,7 @@ static void adapt_coef_probs(VP10_COMMON *cm, TX_SIZE tx_size,
   vp10_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
   const vp10_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
   vp10_coeff_count_model *counts = cm->counts.coef[tx_size];
-  unsigned int (*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
+  unsigned int(*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
       cm->counts.eob_branch[tx_size];
   int i, j, k, l, m;
 
@@ -788,14 +1091,12 @@ static void adapt_coef_probs(VP10_COMMON *cm, TX_SIZE tx_size,
           const int n2 = counts[i][j][k][l][TWO_TOKEN];
           const int neob = counts[i][j][k][l][EOB_MODEL_TOKEN];
           const unsigned int branch_ct[UNCONSTRAINED_NODES][2] = {
-            { neob, eob_counts[i][j][k][l] - neob },
-            { n0, n1 + n2 },
-            { n1, n2 }
+            { neob, eob_counts[i][j][k][l] - neob }, { n0, n1 + n2 }, { n1, n2 }
           };
           for (m = 0; m < UNCONSTRAINED_NODES; ++m)
-            probs[i][j][k][l][m] = merge_probs(pre_probs[i][j][k][l][m],
-                                               branch_ct[m],
-                                               count_sat, update_factor);
+            probs[i][j][k][l][m] =
+                merge_probs(pre_probs[i][j][k][l][m], branch_ct[m], count_sat,
+                            update_factor);
         }
 }
 
@@ -807,7 +1108,7 @@ void vp10_adapt_coef_probs(VP10_COMMON *cm) {
     update_factor = COEF_MAX_UPDATE_FACTOR_KEY;
     count_sat = COEF_COUNT_SAT_KEY;
   } else if (cm->last_frame_type == KEY_FRAME) {
-    update_factor = COEF_MAX_UPDATE_FACTOR_AFTER_KEY;  /* adapt quickly */
+    update_factor = COEF_MAX_UPDATE_FACTOR_AFTER_KEY; /* adapt quickly */
     count_sat = COEF_COUNT_SAT_AFTER_KEY;
   } else {
     update_factor = COEF_MAX_UPDATE_FACTOR;
diff --git a/vp10/common/entropy.h b/vp10/common/entropy.h
index dd62ed2e8b7d37f0d00674c33521da8ff4f3d60e..3dd137c4c930365915733c05686fb86594159df5 100644
--- a/vp10/common/entropy.h
+++ b/vp10/common/entropy.h
@@ -21,22 +21,22 @@
 extern "C" {
 #endif
 
-#define DIFF_UPDATE_PROB        252
-#define GROUP_DIFF_UPDATE_PROB  252
+#define DIFF_UPDATE_PROB 252
+#define GROUP_DIFF_UPDATE_PROB 252
 
 // Coefficient token alphabet
-#define ZERO_TOKEN      0   // 0     Extra Bits 0+0
-#define ONE_TOKEN       1   // 1     Extra Bits 0+1
-#define TWO_TOKEN       2   // 2     Extra Bits 0+1
-#define THREE_TOKEN     3   // 3     Extra Bits 0+1
-#define FOUR_TOKEN      4   // 4     Extra Bits 0+1
+#define ZERO_TOKEN 0        // 0     Extra Bits 0+0
+#define ONE_TOKEN 1         // 1     Extra Bits 0+1
+#define TWO_TOKEN 2         // 2     Extra Bits 0+1
+#define THREE_TOKEN 3       // 3     Extra Bits 0+1
+#define FOUR_TOKEN 4        // 4     Extra Bits 0+1
 #define CATEGORY1_TOKEN 5   // 5-6   Extra Bits 1+1
 #define CATEGORY2_TOKEN 6   // 7-10  Extra Bits 2+1
 #define CATEGORY3_TOKEN 7   // 11-18 Extra Bits 3+1
 #define CATEGORY4_TOKEN 8   // 19-34 Extra Bits 4+1
 #define CATEGORY5_TOKEN 9   // 35-66 Extra Bits 5+1
 #define CATEGORY6_TOKEN 10  // 67+   Extra Bits 14+1
-#define EOB_TOKEN       11  // EOB   Extra Bits 0+0
+#define EOB_TOKEN 11        // EOB   Extra Bits 0+0
 
 #define ENTROPY_TOKENS 12
 
@@ -44,12 +44,12 @@ extern "C" {
 
 DECLARE_ALIGNED(16, extern const uint8_t, vp10_pt_energy_class[ENTROPY_TOKENS]);
 
-#define CAT1_MIN_VAL    5
-#define CAT2_MIN_VAL    7
-#define CAT3_MIN_VAL   11
-#define CAT4_MIN_VAL   19
-#define CAT5_MIN_VAL   35
-#define CAT6_MIN_VAL   67
+#define CAT1_MIN_VAL 5
+#define CAT2_MIN_VAL 7
+#define CAT3_MIN_VAL 11
+#define CAT4_MIN_VAL 19
+#define CAT5_MIN_VAL 35
+#define CAT6_MIN_VAL 67
 
 // Extra bit probabilities.
 DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob[1]);
@@ -91,10 +91,10 @@ extern const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS];
 extern const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS];
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-#define DCT_MAX_VALUE           16384
+#define DCT_MAX_VALUE 16384
 #if CONFIG_VPX_HIGHBITDEPTH
-#define DCT_MAX_VALUE_HIGH10    65536
-#define DCT_MAX_VALUE_HIGH12   262144
+#define DCT_MAX_VALUE_HIGH10 65536
+#define DCT_MAX_VALUE_HIGH12 262144
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 /* Coefficients are predicted via a 3-dimensional probability table. */
@@ -125,13 +125,13 @@ extern const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS];
 
 // #define ENTROPY_STATS
 
-typedef unsigned int vp10_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
-                                    [ENTROPY_TOKENS];
-typedef unsigned int vp10_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
-                                    [ENTROPY_NODES][2];
+typedef unsigned int
+    vp10_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
+typedef unsigned int
+    vp10_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
 
-#define SUBEXP_PARAM                4   /* Subexponential code parameter */
-#define MODULUS_PARAM               13  /* Modulus parameter */
+#define SUBEXP_PARAM 4   /* Subexponential code parameter */
+#define MODULUS_PARAM 13 /* Modulus parameter */
 
 struct VP10Common;
 void vp10_default_coef_probs(struct VP10Common *cm);
@@ -156,20 +156,19 @@ static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) {
 
 #define COEFF_PROB_MODELS 255
 
-#define UNCONSTRAINED_NODES         3
+#define UNCONSTRAINED_NODES 3
 
-#define PIVOT_NODE                  2   // which node is pivot
+#define PIVOT_NODE 2  // which node is pivot
 
 #define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
 extern const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
 extern const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
 
-typedef vpx_prob vp10_coeff_probs_model[REF_TYPES][COEF_BANDS]
-                                      [COEFF_CONTEXTS][UNCONSTRAINED_NODES];
+typedef vpx_prob vp10_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
+                                       [UNCONSTRAINED_NODES];
 
-typedef unsigned int vp10_coeff_count_model[REF_TYPES][COEF_BANDS]
-                                          [COEFF_CONTEXTS]
-                                          [UNCONSTRAINED_NODES + 1];
+typedef unsigned int vp10_coeff_count_model
+    [REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
 
 void vp10_model_to_full_probs(const vpx_prob *model, vpx_prob *full);
 
@@ -191,19 +190,17 @@ static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a,
       break;
     case TX_8X8:
       above_ec = !!*(const uint16_t *)a;
-      left_ec  = !!*(const uint16_t *)l;
+      left_ec = !!*(const uint16_t *)l;
       break;
     case TX_16X16:
       above_ec = !!*(const uint32_t *)a;
-      left_ec  = !!*(const uint32_t *)l;
+      left_ec = !!*(const uint32_t *)l;
       break;
     case TX_32X32:
       above_ec = !!*(const uint64_t *)a;
-      left_ec  = !!*(const uint64_t *)l;
-      break;
-    default:
-      assert(0 && "Invalid transform size.");
+      left_ec = !!*(const uint64_t *)l;
       break;
+    default: assert(0 && "Invalid transform size."); break;
   }
 
   return combine_entropy_contexts(above_ec, left_ec);
diff --git a/vp10/common/entropymode.c b/vp10/common/entropymode.c
index 78f3650f8b5dbae0a53b652c345636a77925167f..b9ad5c944a183e670a8b23847686855356832145 100644
--- a/vp10/common/entropymode.c
+++ b/vp10/common/entropymode.c
@@ -13,239 +13,254 @@
 #include "vp10/common/onyxc_int.h"
 #include "vp10/common/seg_common.h"
 
-const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] = {
-  {  // above = dc
-    { 137,  30,  42, 148, 151, 207,  70,  52,  91 },  // left = dc
-    {  92,  45, 102, 136, 116, 180,  74,  90, 100 },  // left = v
-    {  73,  32,  19, 187, 222, 215,  46,  34, 100 },  // left = h
-    {  91,  30,  32, 116, 121, 186,  93,  86,  94 },  // left = d45
-    {  72,  35,  36, 149,  68, 206,  68,  63, 105 },  // left = d135
-    {  73,  31,  28, 138,  57, 124,  55, 122, 151 },  // left = d117
-    {  67,  23,  21, 140, 126, 197,  40,  37, 171 },  // left = d153
-    {  86,  27,  28, 128, 154, 212,  45,  43,  53 },  // left = d207
-    {  74,  32,  27, 107,  86, 160,  63, 134, 102 },  // left = d63
-    {  59,  67,  44, 140, 161, 202,  78,  67, 119 }   // left = tm
-  }, {  // above = v
-    {  63,  36, 126, 146, 123, 158,  60,  90,  96 },  // left = dc
-    {  43,  46, 168, 134, 107, 128,  69, 142,  92 },  // left = v
-    {  44,  29,  68, 159, 201, 177,  50,  57,  77 },  // left = h
-    {  58,  38,  76, 114,  97, 172,  78, 133,  92 },  // left = d45
-    {  46,  41,  76, 140,  63, 184,  69, 112,  57 },  // left = d135
-    {  38,  32,  85, 140,  46, 112,  54, 151, 133 },  // left = d117
-    {  39,  27,  61, 131, 110, 175,  44,  75, 136 },  // left = d153
-    {  52,  30,  74, 113, 130, 175,  51,  64,  58 },  // left = d207
-    {  47,  35,  80, 100,  74, 143,  64, 163,  74 },  // left = d63
-    {  36,  61, 116, 114, 128, 162,  80, 125,  82 }   // left = tm
-  }, {  // above = h
-    {  82,  26,  26, 171, 208, 204,  44,  32, 105 },  // left = dc
-    {  55,  44,  68, 166, 179, 192,  57,  57, 108 },  // left = v
-    {  42,  26,  11, 199, 241, 228,  23,  15,  85 },  // left = h
-    {  68,  42,  19, 131, 160, 199,  55,  52,  83 },  // left = d45
-    {  58,  50,  25, 139, 115, 232,  39,  52, 118 },  // left = d135
-    {  50,  35,  33, 153, 104, 162,  64,  59, 131 },  // left = d117
-    {  44,  24,  16, 150, 177, 202,  33,  19, 156 },  // left = d153
-    {  55,  27,  12, 153, 203, 218,  26,  27,  49 },  // left = d207
-    {  53,  49,  21, 110, 116, 168,  59,  80,  76 },  // left = d63
-    {  38,  72,  19, 168, 203, 212,  50,  50, 107 }   // left = tm
-  }, {  // above = d45
-    { 103,  26,  36, 129, 132, 201,  83,  80,  93 },  // left = dc
-    {  59,  38,  83, 112, 103, 162,  98, 136,  90 },  // left = v
-    {  62,  30,  23, 158, 200, 207,  59,  57,  50 },  // left = h
-    {  67,  30,  29,  84,  86, 191, 102,  91,  59 },  // left = d45
-    {  60,  32,  33, 112,  71, 220,  64,  89, 104 },  // left = d135
-    {  53,  26,  34, 130,  56, 149,  84, 120, 103 },  // left = d117
-    {  53,  21,  23, 133, 109, 210,  56,  77, 172 },  // left = d153
-    {  77,  19,  29, 112, 142, 228,  55,  66,  36 },  // left = d207
-    {  61,  29,  29,  93,  97, 165,  83, 175, 162 },  // left = d63
-    {  47,  47,  43, 114, 137, 181, 100,  99,  95 }   // left = tm
-  }, {  // above = d135
-    {  69,  23,  29, 128,  83, 199,  46,  44, 101 },  // left = dc
-    {  53,  40,  55, 139,  69, 183,  61,  80, 110 },  // left = v
-    {  40,  29,  19, 161, 180, 207,  43,  24,  91 },  // left = h
-    {  60,  34,  19, 105,  61, 198,  53,  64,  89 },  // left = d45
-    {  52,  31,  22, 158,  40, 209,  58,  62,  89 },  // left = d135
-    {  44,  31,  29, 147,  46, 158,  56, 102, 198 },  // left = d117
-    {  35,  19,  12, 135,  87, 209,  41,  45, 167 },  // left = d153
-    {  55,  25,  21, 118,  95, 215,  38,  39,  66 },  // left = d207
-    {  51,  38,  25, 113,  58, 164,  70,  93,  97 },  // left = d63
-    {  47,  54,  34, 146, 108, 203,  72, 103, 151 }   // left = tm
-  }, {  // above = d117
-    {  64,  19,  37, 156,  66, 138,  49,  95, 133 },  // left = dc
-    {  46,  27,  80, 150,  55, 124,  55, 121, 135 },  // left = v
-    {  36,  23,  27, 165, 149, 166,  54,  64, 118 },  // left = h
-    {  53,  21,  36, 131,  63, 163,  60, 109,  81 },  // left = d45
-    {  40,  26,  35, 154,  40, 185,  51,  97, 123 },  // left = d135
-    {  35,  19,  34, 179,  19,  97,  48, 129, 124 },  // left = d117
-    {  36,  20,  26, 136,  62, 164,  33,  77, 154 },  // left = d153
-    {  45,  18,  32, 130,  90, 157,  40,  79,  91 },  // left = d207
-    {  45,  26,  28, 129,  45, 129,  49, 147, 123 },  // left = d63
-    {  38,  44,  51, 136,  74, 162,  57,  97, 121 }   // left = tm
-  }, {  // above = d153
-    {  75,  17,  22, 136, 138, 185,  32,  34, 166 },  // left = dc
-    {  56,  39,  58, 133, 117, 173,  48,  53, 187 },  // left = v
-    {  35,  21,  12, 161, 212, 207,  20,  23, 145 },  // left = h
-    {  56,  29,  19, 117, 109, 181,  55,  68, 112 },  // left = d45
-    {  47,  29,  17, 153,  64, 220,  59,  51, 114 },  // left = d135
-    {  46,  16,  24, 136,  76, 147,  41,  64, 172 },  // left = d117
-    {  34,  17,  11, 108, 152, 187,  13,  15, 209 },  // left = d153
-    {  51,  24,  14, 115, 133, 209,  32,  26, 104 },  // left = d207
-    {  55,  30,  18, 122,  79, 179,  44,  88, 116 },  // left = d63
-    {  37,  49,  25, 129, 168, 164,  41,  54, 148 }   // left = tm
-  }, {  // above = d207
-    {  82,  22,  32, 127, 143, 213,  39,  41,  70 },  // left = dc
-    {  62,  44,  61, 123, 105, 189,  48,  57,  64 },  // left = v
-    {  47,  25,  17, 175, 222, 220,  24,  30,  86 },  // left = h
-    {  68,  36,  17, 106, 102, 206,  59,  74,  74 },  // left = d45
-    {  57,  39,  23, 151,  68, 216,  55,  63,  58 },  // left = d135
-    {  49,  30,  35, 141,  70, 168,  82,  40, 115 },  // left = d117
-    {  51,  25,  15, 136, 129, 202,  38,  35, 139 },  // left = d153
-    {  68,  26,  16, 111, 141, 215,  29,  28,  28 },  // left = d207
-    {  59,  39,  19, 114,  75, 180,  77, 104,  42 },  // left = d63
-    {  40,  61,  26, 126, 152, 206,  61,  59,  93 }   // left = tm
-  }, {  // above = d63
-    {  78,  23,  39, 111, 117, 170,  74, 124,  94 },  // left = dc
-    {  48,  34,  86, 101,  92, 146,  78, 179, 134 },  // left = v
-    {  47,  22,  24, 138, 187, 178,  68,  69,  59 },  // left = h
-    {  56,  25,  33, 105, 112, 187,  95, 177, 129 },  // left = d45
-    {  48,  31,  27, 114,  63, 183,  82, 116,  56 },  // left = d135
-    {  43,  28,  37, 121,  63, 123,  61, 192, 169 },  // left = d117
-    {  42,  17,  24, 109,  97, 177,  56,  76, 122 },  // left = d153
-    {  58,  18,  28, 105, 139, 182,  70,  92,  63 },  // left = d207
-    {  46,  23,  32,  74,  86, 150,  67, 183,  88 },  // left = d63
-    {  36,  38,  48,  92, 122, 165,  88, 137,  91 }   // left = tm
-  }, {  // above = tm
-    {  65,  70,  60, 155, 159, 199,  61,  60,  81 },  // left = dc
-    {  44,  78, 115, 132, 119, 173,  71, 112,  93 },  // left = v
-    {  39,  38,  21, 184, 227, 206,  42,  32,  64 },  // left = h
-    {  58,  47,  36, 124, 137, 193,  80,  82,  78 },  // left = d45
-    {  49,  50,  35, 144,  95, 205,  63,  78,  59 },  // left = d135
-    {  41,  53,  52, 148,  71, 142,  65, 128,  51 },  // left = d117
-    {  40,  36,  28, 143, 143, 202,  40,  55, 137 },  // left = d153
-    {  52,  34,  29, 129, 183, 227,  42,  35,  43 },  // left = d207
-    {  42,  44,  44, 104, 105, 164,  64, 130,  80 },  // left = d63
-    {  43,  81,  53, 140, 169, 204,  68,  84,  72 }   // left = tm
-  }
-};
+const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
+    { {
+          // above = dc
+          { 137, 30, 42, 148, 151, 207, 70, 52, 91 },   // left = dc
+          { 92, 45, 102, 136, 116, 180, 74, 90, 100 },  // left = v
+          { 73, 32, 19, 187, 222, 215, 46, 34, 100 },   // left = h
+          { 91, 30, 32, 116, 121, 186, 93, 86, 94 },    // left = d45
+          { 72, 35, 36, 149, 68, 206, 68, 63, 105 },    // left = d135
+          { 73, 31, 28, 138, 57, 124, 55, 122, 151 },   // left = d117
+          { 67, 23, 21, 140, 126, 197, 40, 37, 171 },   // left = d153
+          { 86, 27, 28, 128, 154, 212, 45, 43, 53 },    // left = d207
+          { 74, 32, 27, 107, 86, 160, 63, 134, 102 },   // left = d63
+          { 59, 67, 44, 140, 161, 202, 78, 67, 119 }    // left = tm
+      },
+      {
+          // above = v
+          { 63, 36, 126, 146, 123, 158, 60, 90, 96 },   // left = dc
+          { 43, 46, 168, 134, 107, 128, 69, 142, 92 },  // left = v
+          { 44, 29, 68, 159, 201, 177, 50, 57, 77 },    // left = h
+          { 58, 38, 76, 114, 97, 172, 78, 133, 92 },    // left = d45
+          { 46, 41, 76, 140, 63, 184, 69, 112, 57 },    // left = d135
+          { 38, 32, 85, 140, 46, 112, 54, 151, 133 },   // left = d117
+          { 39, 27, 61, 131, 110, 175, 44, 75, 136 },   // left = d153
+          { 52, 30, 74, 113, 130, 175, 51, 64, 58 },    // left = d207
+          { 47, 35, 80, 100, 74, 143, 64, 163, 74 },    // left = d63
+          { 36, 61, 116, 114, 128, 162, 80, 125, 82 }   // left = tm
+      },
+      {
+          // above = h
+          { 82, 26, 26, 171, 208, 204, 44, 32, 105 },  // left = dc
+          { 55, 44, 68, 166, 179, 192, 57, 57, 108 },  // left = v
+          { 42, 26, 11, 199, 241, 228, 23, 15, 85 },   // left = h
+          { 68, 42, 19, 131, 160, 199, 55, 52, 83 },   // left = d45
+          { 58, 50, 25, 139, 115, 232, 39, 52, 118 },  // left = d135
+          { 50, 35, 33, 153, 104, 162, 64, 59, 131 },  // left = d117
+          { 44, 24, 16, 150, 177, 202, 33, 19, 156 },  // left = d153
+          { 55, 27, 12, 153, 203, 218, 26, 27, 49 },   // left = d207
+          { 53, 49, 21, 110, 116, 168, 59, 80, 76 },   // left = d63
+          { 38, 72, 19, 168, 203, 212, 50, 50, 107 }   // left = tm
+      },
+      {
+          // above = d45
+          { 103, 26, 36, 129, 132, 201, 83, 80, 93 },  // left = dc
+          { 59, 38, 83, 112, 103, 162, 98, 136, 90 },  // left = v
+          { 62, 30, 23, 158, 200, 207, 59, 57, 50 },   // left = h
+          { 67, 30, 29, 84, 86, 191, 102, 91, 59 },    // left = d45
+          { 60, 32, 33, 112, 71, 220, 64, 89, 104 },   // left = d135
+          { 53, 26, 34, 130, 56, 149, 84, 120, 103 },  // left = d117
+          { 53, 21, 23, 133, 109, 210, 56, 77, 172 },  // left = d153
+          { 77, 19, 29, 112, 142, 228, 55, 66, 36 },   // left = d207
+          { 61, 29, 29, 93, 97, 165, 83, 175, 162 },   // left = d63
+          { 47, 47, 43, 114, 137, 181, 100, 99, 95 }   // left = tm
+      },
+      {
+          // above = d135
+          { 69, 23, 29, 128, 83, 199, 46, 44, 101 },   // left = dc
+          { 53, 40, 55, 139, 69, 183, 61, 80, 110 },   // left = v
+          { 40, 29, 19, 161, 180, 207, 43, 24, 91 },   // left = h
+          { 60, 34, 19, 105, 61, 198, 53, 64, 89 },    // left = d45
+          { 52, 31, 22, 158, 40, 209, 58, 62, 89 },    // left = d135
+          { 44, 31, 29, 147, 46, 158, 56, 102, 198 },  // left = d117
+          { 35, 19, 12, 135, 87, 209, 41, 45, 167 },   // left = d153
+          { 55, 25, 21, 118, 95, 215, 38, 39, 66 },    // left = d207
+          { 51, 38, 25, 113, 58, 164, 70, 93, 97 },    // left = d63
+          { 47, 54, 34, 146, 108, 203, 72, 103, 151 }  // left = tm
+      },
+      {
+          // above = d117
+          { 64, 19, 37, 156, 66, 138, 49, 95, 133 },   // left = dc
+          { 46, 27, 80, 150, 55, 124, 55, 121, 135 },  // left = v
+          { 36, 23, 27, 165, 149, 166, 54, 64, 118 },  // left = h
+          { 53, 21, 36, 131, 63, 163, 60, 109, 81 },   // left = d45
+          { 40, 26, 35, 154, 40, 185, 51, 97, 123 },   // left = d135
+          { 35, 19, 34, 179, 19, 97, 48, 129, 124 },   // left = d117
+          { 36, 20, 26, 136, 62, 164, 33, 77, 154 },   // left = d153
+          { 45, 18, 32, 130, 90, 157, 40, 79, 91 },    // left = d207
+          { 45, 26, 28, 129, 45, 129, 49, 147, 123 },  // left = d63
+          { 38, 44, 51, 136, 74, 162, 57, 97, 121 }    // left = tm
+      },
+      {
+          // above = d153
+          { 75, 17, 22, 136, 138, 185, 32, 34, 166 },  // left = dc
+          { 56, 39, 58, 133, 117, 173, 48, 53, 187 },  // left = v
+          { 35, 21, 12, 161, 212, 207, 20, 23, 145 },  // left = h
+          { 56, 29, 19, 117, 109, 181, 55, 68, 112 },  // left = d45
+          { 47, 29, 17, 153, 64, 220, 59, 51, 114 },   // left = d135
+          { 46, 16, 24, 136, 76, 147, 41, 64, 172 },   // left = d117
+          { 34, 17, 11, 108, 152, 187, 13, 15, 209 },  // left = d153
+          { 51, 24, 14, 115, 133, 209, 32, 26, 104 },  // left = d207
+          { 55, 30, 18, 122, 79, 179, 44, 88, 116 },   // left = d63
+          { 37, 49, 25, 129, 168, 164, 41, 54, 148 }   // left = tm
+      },
+      {
+          // above = d207
+          { 82, 22, 32, 127, 143, 213, 39, 41, 70 },   // left = dc
+          { 62, 44, 61, 123, 105, 189, 48, 57, 64 },   // left = v
+          { 47, 25, 17, 175, 222, 220, 24, 30, 86 },   // left = h
+          { 68, 36, 17, 106, 102, 206, 59, 74, 74 },   // left = d45
+          { 57, 39, 23, 151, 68, 216, 55, 63, 58 },    // left = d135
+          { 49, 30, 35, 141, 70, 168, 82, 40, 115 },   // left = d117
+          { 51, 25, 15, 136, 129, 202, 38, 35, 139 },  // left = d153
+          { 68, 26, 16, 111, 141, 215, 29, 28, 28 },   // left = d207
+          { 59, 39, 19, 114, 75, 180, 77, 104, 42 },   // left = d63
+          { 40, 61, 26, 126, 152, 206, 61, 59, 93 }    // left = tm
+      },
+      {
+          // above = d63
+          { 78, 23, 39, 111, 117, 170, 74, 124, 94 },   // left = dc
+          { 48, 34, 86, 101, 92, 146, 78, 179, 134 },   // left = v
+          { 47, 22, 24, 138, 187, 178, 68, 69, 59 },    // left = h
+          { 56, 25, 33, 105, 112, 187, 95, 177, 129 },  // left = d45
+          { 48, 31, 27, 114, 63, 183, 82, 116, 56 },    // left = d135
+          { 43, 28, 37, 121, 63, 123, 61, 192, 169 },   // left = d117
+          { 42, 17, 24, 109, 97, 177, 56, 76, 122 },    // left = d153
+          { 58, 18, 28, 105, 139, 182, 70, 92, 63 },    // left = d207
+          { 46, 23, 32, 74, 86, 150, 67, 183, 88 },     // left = d63
+          { 36, 38, 48, 92, 122, 165, 88, 137, 91 }     // left = tm
+      },
+      {
+          // above = tm
+          { 65, 70, 60, 155, 159, 199, 61, 60, 81 },    // left = dc
+          { 44, 78, 115, 132, 119, 173, 71, 112, 93 },  // left = v
+          { 39, 38, 21, 184, 227, 206, 42, 32, 64 },    // left = h
+          { 58, 47, 36, 124, 137, 193, 80, 82, 78 },    // left = d45
+          { 49, 50, 35, 144, 95, 205, 63, 78, 59 },     // left = d135
+          { 41, 53, 52, 148, 71, 142, 65, 128, 51 },    // left = d117
+          { 40, 36, 28, 143, 143, 202, 40, 55, 137 },   // left = d153
+          { 52, 34, 29, 129, 183, 227, 42, 35, 43 },    // left = d207
+          { 42, 44, 44, 104, 105, 164, 64, 130, 80 },   // left = d63
+          { 43, 81, 53, 140, 169, 204, 68, 84, 72 }     // left = tm
+      } };
 
 #if !CONFIG_MISC_FIXES
 const vpx_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1] = {
-  { 144,  11,  54, 157, 195, 130,  46,  58, 108 },  // y = dc
-  { 118,  15, 123, 148, 131, 101,  44,  93, 131 },  // y = v
-  { 113,  12,  23, 188, 226, 142,  26,  32, 125 },  // y = h
-  { 120,  11,  50, 123, 163, 135,  64,  77, 103 },  // y = d45
-  { 113,   9,  36, 155, 111, 157,  32,  44, 161 },  // y = d135
-  { 116,   9,  55, 176,  76,  96,  37,  61, 149 },  // y = d117
-  { 115,   9,  28, 141, 161, 167,  21,  25, 193 },  // y = d153
-  { 120,  12,  32, 145, 195, 142,  32,  38,  86 },  // y = d207
-  { 116,  12,  64, 120, 140, 125,  49, 115, 121 },  // y = d63
-  { 102,  19,  66, 162, 182, 122,  35,  59, 128 }   // y = tm
+  { 144, 11, 54, 157, 195, 130, 46, 58, 108 },   // y = dc
+  { 118, 15, 123, 148, 131, 101, 44, 93, 131 },  // y = v
+  { 113, 12, 23, 188, 226, 142, 26, 32, 125 },   // y = h
+  { 120, 11, 50, 123, 163, 135, 64, 77, 103 },   // y = d45
+  { 113, 9, 36, 155, 111, 157, 32, 44, 161 },    // y = d135
+  { 116, 9, 55, 176, 76, 96, 37, 61, 149 },      // y = d117
+  { 115, 9, 28, 141, 161, 167, 21, 25, 193 },    // y = d153
+  { 120, 12, 32, 145, 195, 142, 32, 38, 86 },    // y = d207
+  { 116, 12, 64, 120, 140, 125, 49, 115, 121 },  // y = d63
+  { 102, 19, 66, 162, 182, 122, 35, 59, 128 }    // y = tm
 };
 #endif
 
 static const vpx_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
-  {  65,  32,  18, 144, 162, 194,  41,  51,  98 },  // block_size < 8x8
-  { 132,  68,  18, 165, 217, 196,  45,  40,  78 },  // block_size < 16x16
-  { 173,  80,  19, 176, 240, 193,  64,  35,  46 },  // block_size < 32x32
-  { 221, 135,  38, 194, 248, 121,  96,  85,  29 }   // block_size >= 32x32
+  { 65, 32, 18, 144, 162, 194, 41, 51, 98 },   // block_size < 8x8
+  { 132, 68, 18, 165, 217, 196, 45, 40, 78 },  // block_size < 16x16
+  { 173, 80, 19, 176, 240, 193, 64, 35, 46 },  // block_size < 32x32
+  { 221, 135, 38, 194, 248, 121, 96, 85, 29 }  // block_size >= 32x32
 };
 
 static const vpx_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
-  { 120,   7,  76, 176, 208, 126,  28,  54, 103 },  // y = dc
-  {  48,  12, 154, 155, 139,  90,  34, 117, 119 },  // y = v
-  {  67,   6,  25, 204, 243, 158,  13,  21,  96 },  // y = h
-  {  97,   5,  44, 131, 176, 139,  48,  68,  97 },  // y = d45
-  {  83,   5,  42, 156, 111, 152,  26,  49, 152 },  // y = d135
-  {  80,   5,  58, 178,  74,  83,  33,  62, 145 },  // y = d117
-  {  86,   5,  32, 154, 192, 168,  14,  22, 163 },  // y = d153
-  {  85,   5,  32, 156, 216, 148,  19,  29,  73 },  // y = d207
-  {  77,   7,  64, 116, 132, 122,  37, 126, 120 },  // y = d63
-  { 101,  21, 107, 181, 192, 103,  19,  67, 125 }   // y = tm
+  { 120, 7, 76, 176, 208, 126, 28, 54, 103 },   // y = dc
+  { 48, 12, 154, 155, 139, 90, 34, 117, 119 },  // y = v
+  { 67, 6, 25, 204, 243, 158, 13, 21, 96 },     // y = h
+  { 97, 5, 44, 131, 176, 139, 48, 68, 97 },     // y = d45
+  { 83, 5, 42, 156, 111, 152, 26, 49, 152 },    // y = d135
+  { 80, 5, 58, 178, 74, 83, 33, 62, 145 },      // y = d117
+  { 86, 5, 32, 154, 192, 168, 14, 22, 163 },    // y = d153
+  { 85, 5, 32, 156, 216, 148, 19, 29, 73 },     // y = d207
+  { 77, 7, 64, 116, 132, 122, 37, 126, 120 },   // y = d63
+  { 101, 21, 107, 181, 192, 103, 19, 67, 125 }  // y = tm
 };
 
 #if !CONFIG_MISC_FIXES
-const vpx_prob vp10_kf_partition_probs[PARTITION_CONTEXTS]
-                                     [PARTITION_TYPES - 1] = {
-  // 8x8 -> 4x4
-  { 158,  97,  94 },  // a/l both not split
-  {  93,  24,  99 },  // a split, l not split
-  {  85, 119,  44 },  // l split, a not split
-  {  62,  59,  67 },  // a/l both split
-  // 16x16 -> 8x8
-  { 149,  53,  53 },  // a/l both not split
-  {  94,  20,  48 },  // a split, l not split
-  {  83,  53,  24 },  // l split, a not split
-  {  52,  18,  18 },  // a/l both split
-  // 32x32 -> 16x16
-  { 150,  40,  39 },  // a/l both not split
-  {  78,  12,  26 },  // a split, l not split
-  {  67,  33,  11 },  // l split, a not split
-  {  24,   7,   5 },  // a/l both split
-  // 64x64 -> 32x32
-  { 174,  35,  49 },  // a/l both not split
-  {  68,  11,  27 },  // a split, l not split
-  {  57,  15,   9 },  // l split, a not split
-  {  12,   3,   3 },  // a/l both split
-};
+const vpx_prob
+    vp10_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = {
+      // 8x8 -> 4x4
+      { 158, 97, 94 },  // a/l both not split
+      { 93, 24, 99 },   // a split, l not split
+      { 85, 119, 44 },  // l split, a not split
+      { 62, 59, 67 },   // a/l both split
+      // 16x16 -> 8x8
+      { 149, 53, 53 },  // a/l both not split
+      { 94, 20, 48 },   // a split, l not split
+      { 83, 53, 24 },   // l split, a not split
+      { 52, 18, 18 },   // a/l both split
+      // 32x32 -> 16x16
+      { 150, 40, 39 },  // a/l both not split
+      { 78, 12, 26 },   // a split, l not split
+      { 67, 33, 11 },   // l split, a not split
+      { 24, 7, 5 },     // a/l both split
+      // 64x64 -> 32x32
+      { 174, 35, 49 },  // a/l both not split
+      { 68, 11, 27 },   // a split, l not split
+      { 57, 15, 9 },    // l split, a not split
+      { 12, 3, 3 },     // a/l both split
+    };
 #endif
 
-static const vpx_prob default_partition_probs[PARTITION_CONTEXTS]
-                                             [PARTITION_TYPES - 1] = {
-  // 8x8 -> 4x4
-  { 199, 122, 141 },  // a/l both not split
-  { 147,  63, 159 },  // a split, l not split
-  { 148, 133, 118 },  // l split, a not split
-  { 121, 104, 114 },  // a/l both split
-  // 16x16 -> 8x8
-  { 174,  73,  87 },  // a/l both not split
-  {  92,  41,  83 },  // a split, l not split
-  {  82,  99,  50 },  // l split, a not split
-  {  53,  39,  39 },  // a/l both split
-  // 32x32 -> 16x16
-  { 177,  58,  59 },  // a/l both not split
-  {  68,  26,  63 },  // a split, l not split
-  {  52,  79,  25 },  // l split, a not split
-  {  17,  14,  12 },  // a/l both split
-  // 64x64 -> 32x32
-  { 222,  34,  30 },  // a/l both not split
-  {  72,  16,  44 },  // a split, l not split
-  {  58,  32,  12 },  // l split, a not split
-  {  10,   7,   6 },  // a/l both split
-};
-
-static const vpx_prob default_inter_mode_probs[INTER_MODE_CONTEXTS]
-                                              [INTER_MODES - 1] = {
-  {2,       173,   34},  // 0 = both zero mv
-  {7,       145,   85},  // 1 = one zero mv + one a predicted mv
-  {7,       166,   63},  // 2 = two predicted mvs
-  {7,       94,    66},  // 3 = one predicted/zero and one new mv
-  {8,       64,    46},  // 4 = two new mvs
-  {17,      81,    31},  // 5 = one intra neighbour + x
-  {25,      29,    30},  // 6 = two intra neighbours
-};
+static const vpx_prob
+    default_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = {
+      // 8x8 -> 4x4
+      { 199, 122, 141 },  // a/l both not split
+      { 147, 63, 159 },   // a split, l not split
+      { 148, 133, 118 },  // l split, a not split
+      { 121, 104, 114 },  // a/l both split
+      // 16x16 -> 8x8
+      { 174, 73, 87 },  // a/l both not split
+      { 92, 41, 83 },   // a split, l not split
+      { 82, 99, 50 },   // l split, a not split
+      { 53, 39, 39 },   // a/l both split
+      // 32x32 -> 16x16
+      { 177, 58, 59 },  // a/l both not split
+      { 68, 26, 63 },   // a split, l not split
+      { 52, 79, 25 },   // l split, a not split
+      { 17, 14, 12 },   // a/l both split
+      // 64x64 -> 32x32
+      { 222, 34, 30 },  // a/l both not split
+      { 72, 16, 44 },   // a split, l not split
+      { 58, 32, 12 },   // l split, a not split
+      { 10, 7, 6 },     // a/l both split
+    };
+
+static const vpx_prob
+    default_inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1] = {
+      { 2, 173, 34 },  // 0 = both zero mv
+      { 7, 145, 85 },  // 1 = one zero mv + one a predicted mv
+      { 7, 166, 63 },  // 2 = two predicted mvs
+      { 7, 94, 66 },   // 3 = one predicted/zero and one new mv
+      { 8, 64, 46 },   // 4 = two new mvs
+      { 17, 81, 31 },  // 5 = one intra neighbour + x
+      { 25, 29, 30 },  // 6 = two intra neighbours
+    };
 
 /* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
 const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
-  -DC_PRED, 2,                      /* 0 = DC_NODE */
-  -TM_PRED, 4,                      /* 1 = TM_NODE */
-  -V_PRED, 6,                       /* 2 = V_NODE */
-  8, 12,                            /* 3 = COM_NODE */
-  -H_PRED, 10,                      /* 4 = H_NODE */
-  -D135_PRED, -D117_PRED,           /* 5 = D135_NODE */
-  -D45_PRED, 14,                    /* 6 = D45_NODE */
-  -D63_PRED, 16,                    /* 7 = D63_NODE */
-  -D153_PRED, -D207_PRED             /* 8 = D153_NODE */
+  -DC_PRED,   2,          /* 0 = DC_NODE */
+  -TM_PRED,   4,          /* 1 = TM_NODE */
+  -V_PRED,    6,          /* 2 = V_NODE */
+  8,          12,         /* 3 = COM_NODE */
+  -H_PRED,    10,         /* 4 = H_NODE */
+  -D135_PRED, -D117_PRED, /* 5 = D135_NODE */
+  -D45_PRED,  14,         /* 6 = D45_NODE */
+  -D63_PRED,  16,         /* 7 = D63_NODE */
+  -D153_PRED, -D207_PRED  /* 8 = D153_NODE */
 };
 
 const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
-  -INTER_OFFSET(ZEROMV), 2,
-  -INTER_OFFSET(NEARESTMV), 4,
-  -INTER_OFFSET(NEARMV), -INTER_OFFSET(NEWMV)
+  -INTER_OFFSET(ZEROMV), 2, -INTER_OFFSET(NEARESTMV), 4, -INTER_OFFSET(NEARMV),
+  -INTER_OFFSET(NEWMV)
 };
 
 const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
-  -PARTITION_NONE, 2,
-  -PARTITION_HORZ, 4,
-  -PARTITION_VERT, -PARTITION_SPLIT
+  -PARTITION_NONE, 2, -PARTITION_HORZ, 4, -PARTITION_VERT, -PARTITION_SPLIT
 };
 
 static const vpx_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
@@ -253,47 +268,36 @@ static const vpx_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
 };
 
 static const vpx_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = {
-  239, 183, 119,  96,  41
+  239, 183, 119, 96, 41
 };
 
-static const vpx_prob default_comp_ref_p[REF_CONTEXTS] = {
-  50, 126, 123, 221, 226
-};
+static const vpx_prob default_comp_ref_p[REF_CONTEXTS] = { 50, 126, 123, 221,
+                                                           226 };
 
 static const vpx_prob default_single_ref_p[REF_CONTEXTS][2] = {
-  {  33,  16 },
-  {  77,  74 },
-  { 142, 142 },
-  { 172, 170 },
-  { 238, 247 }
+  { 33, 16 }, { 77, 74 }, { 142, 142 }, { 172, 170 }, { 238, 247 }
 };
 
-static const struct tx_probs default_tx_probs = {
-  { { 3, 136, 37 },
-    { 5, 52,  13 } },
+static const struct tx_probs default_tx_probs = { { { 3, 136, 37 },
+                                                    { 5, 52, 13 } },
 
-  { { 20, 152 },
-    { 15, 101 } },
+                                                  { { 20, 152 }, { 15, 101 } },
 
-  { { 100 },
-    { 66  } }
-};
+                                                  { { 100 }, { 66 } } };
 
 void vp10_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
-                                      unsigned int (*ct_32x32p)[2]) {
+                                           unsigned int (*ct_32x32p)[2]) {
   ct_32x32p[0][0] = tx_count_32x32p[TX_4X4];
-  ct_32x32p[0][1] = tx_count_32x32p[TX_8X8] +
-                    tx_count_32x32p[TX_16X16] +
+  ct_32x32p[0][1] = tx_count_32x32p[TX_8X8] + tx_count_32x32p[TX_16X16] +
                     tx_count_32x32p[TX_32X32];
   ct_32x32p[1][0] = tx_count_32x32p[TX_8X8];
-  ct_32x32p[1][1] = tx_count_32x32p[TX_16X16] +
-                    tx_count_32x32p[TX_32X32];
+  ct_32x32p[1][1] = tx_count_32x32p[TX_16X16] + tx_count_32x32p[TX_32X32];
   ct_32x32p[2][0] = tx_count_32x32p[TX_16X16];
   ct_32x32p[2][1] = tx_count_32x32p[TX_32X32];
 }
 
 void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
-                                      unsigned int (*ct_16x16p)[2]) {
+                                           unsigned int (*ct_16x16p)[2]) {
   ct_16x16p[0][0] = tx_count_16x16p[TX_4X4];
   ct_16x16p[0][1] = tx_count_16x16p[TX_8X8] + tx_count_16x16p[TX_16X16];
   ct_16x16p[1][0] = tx_count_16x16p[TX_8X8];
@@ -301,49 +305,38 @@ void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
 }
 
 void vp10_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
-                                    unsigned int (*ct_8x8p)[2]) {
+                                         unsigned int (*ct_8x8p)[2]) {
   ct_8x8p[0][0] = tx_count_8x8p[TX_4X4];
   ct_8x8p[0][1] = tx_count_8x8p[TX_8X8];
 }
 
-static const vpx_prob default_skip_probs[SKIP_CONTEXTS] = {
-  192, 128, 64
-};
+static const vpx_prob default_skip_probs[SKIP_CONTEXTS] = { 192, 128, 64 };
 
-static const vpx_prob default_switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS]
-                                                    [SWITCHABLE_FILTERS - 1] = {
-  { 235, 162, },
-  { 36, 255, },
-  { 34, 3, },
-  { 149, 144, },
-};
+static const vpx_prob default_switchable_interp_prob
+    [SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1] = {
+      { 235, 162 }, { 36, 255 }, { 34, 3 }, { 149, 144 },
+    };
 
 #if CONFIG_MISC_FIXES
 // FIXME(someone) need real defaults here
 static const struct segmentation_probs default_seg_probs = {
-  { 128, 128, 128, 128, 128, 128, 128 },
-  { 128, 128, 128 },
+  { 128, 128, 128, 128, 128, 128, 128 }, { 128, 128, 128 },
 };
 #endif
 
 const vpx_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
-  -DCT_DCT, 2,
-  -ADST_ADST, 4,
-  -ADST_DCT, -DCT_ADST
+  -DCT_DCT, 2, -ADST_ADST, 4, -ADST_DCT, -DCT_ADST
 };
 
-static const vpx_prob default_intra_ext_tx_prob[EXT_TX_SIZES]
-                                               [TX_TYPES][TX_TYPES - 1] = {
-  {{240, 85, 128}, {4, 1, 248}, {4, 1, 8}, {4, 248, 128}},
-  {{244, 85, 128}, {8, 2, 248}, {8, 2, 8}, {8, 248, 128}},
-  {{248, 85, 128}, {16, 4, 248}, {16, 4, 8}, {16, 248, 128}},
-};
+static const vpx_prob
+    default_intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1] = {
+      { { 240, 85, 128 }, { 4, 1, 248 }, { 4, 1, 8 }, { 4, 248, 128 } },
+      { { 244, 85, 128 }, { 8, 2, 248 }, { 8, 2, 8 }, { 8, 248, 128 } },
+      { { 248, 85, 128 }, { 16, 4, 248 }, { 16, 4, 8 }, { 16, 248, 128 } },
+    };
 
-static const vpx_prob default_inter_ext_tx_prob[EXT_TX_SIZES]
-                                               [TX_TYPES - 1] = {
-  {160, 85, 128},
-  {176, 85, 128},
-  {192, 85, 128},
+static const vpx_prob default_inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1] = {
+  { 160, 85, 128 }, { 176, 85, 128 }, { 192, 85, 128 },
 };
 
 static void init_mode_probs(FRAME_CONTEXT *fc) {
@@ -366,11 +359,8 @@ static void init_mode_probs(FRAME_CONTEXT *fc) {
   vp10_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
 }
 
-const vpx_tree_index vp10_switchable_interp_tree
-                         [TREE_SIZE(SWITCHABLE_FILTERS)] = {
-  -EIGHTTAP, 2,
-  -EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP
-};
+const vpx_tree_index vp10_switchable_interp_tree[TREE_SIZE(
+    SWITCHABLE_FILTERS)] = { -EIGHTTAP, 2, -EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP };
 
 void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
   int i, j;
@@ -382,11 +372,11 @@ void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
     fc->intra_inter_prob[i] = mode_mv_merge_probs(pre_fc->intra_inter_prob[i],
                                                   counts->intra_inter[i]);
   for (i = 0; i < COMP_INTER_CONTEXTS; i++)
-    fc->comp_inter_prob[i] = mode_mv_merge_probs(pre_fc->comp_inter_prob[i],
-                                                 counts->comp_inter[i]);
+    fc->comp_inter_prob[i] =
+        mode_mv_merge_probs(pre_fc->comp_inter_prob[i], counts->comp_inter[i]);
   for (i = 0; i < REF_CONTEXTS; i++)
-    fc->comp_ref_prob[i] = mode_mv_merge_probs(pre_fc->comp_ref_prob[i],
-                                               counts->comp_ref[i]);
+    fc->comp_ref_prob[i] =
+        mode_mv_merge_probs(pre_fc->comp_ref_prob[i], counts->comp_ref[i]);
   for (i = 0; i < REF_CONTEXTS; i++)
     for (j = 0; j < 2; j++)
       fc->single_ref_prob[i][j] = mode_mv_merge_probs(
@@ -394,11 +384,11 @@ void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
 
   for (i = 0; i < INTER_MODE_CONTEXTS; i++)
     vpx_tree_merge_probs(vp10_inter_mode_tree, pre_fc->inter_mode_probs[i],
-                counts->inter_mode[i], fc->inter_mode_probs[i]);
+                         counts->inter_mode[i], fc->inter_mode_probs[i]);
 
   for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
     vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->y_mode_prob[i],
-                counts->y_mode[i], fc->y_mode_prob[i]);
+                         counts->y_mode[i], fc->y_mode_prob[i]);
 
 #if !CONFIG_MISC_FIXES
   for (i = 0; i < INTRA_MODES; ++i)
@@ -412,10 +402,9 @@ void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
 
   if (cm->interp_filter == SWITCHABLE) {
     for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
-      vpx_tree_merge_probs(vp10_switchable_interp_tree,
-                           pre_fc->switchable_interp_prob[i],
-                           counts->switchable_interp[i],
-                           fc->switchable_interp_prob[i]);
+      vpx_tree_merge_probs(
+          vp10_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
+          counts->switchable_interp[i], fc->switchable_interp_prob[i]);
   }
 }
 
@@ -434,15 +423,17 @@ void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
     for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
       vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], branch_ct_8x8p);
       for (j = 0; j < TX_SIZES - 3; ++j)
-        fc->tx_probs.p8x8[i][j] = mode_mv_merge_probs(
-            pre_fc->tx_probs.p8x8[i][j], branch_ct_8x8p[j]);
+        fc->tx_probs.p8x8[i][j] =
+            mode_mv_merge_probs(pre_fc->tx_probs.p8x8[i][j], branch_ct_8x8p[j]);
 
-      vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], branch_ct_16x16p);
+      vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i],
+                                            branch_ct_16x16p);
       for (j = 0; j < TX_SIZES - 2; ++j)
         fc->tx_probs.p16x16[i][j] = mode_mv_merge_probs(
             pre_fc->tx_probs.p16x16[i][j], branch_ct_16x16p[j]);
 
-      vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], branch_ct_32x32p);
+      vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i],
+                                            branch_ct_32x32p);
       for (j = 0; j < TX_SIZES - 1; ++j)
         fc->tx_probs.p32x32[i][j] = mode_mv_merge_probs(
             pre_fc->tx_probs.p32x32[i][j], branch_ct_32x32p[j]);
@@ -450,29 +441,26 @@ void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
   }
 
   for (i = 0; i < SKIP_CONTEXTS; ++i)
-    fc->skip_probs[i] = mode_mv_merge_probs(
-        pre_fc->skip_probs[i], counts->skip[i]);
+    fc->skip_probs[i] =
+        mode_mv_merge_probs(pre_fc->skip_probs[i], counts->skip[i]);
 
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     int j;
     for (j = 0; j < TX_TYPES; ++j)
-      vpx_tree_merge_probs(vp10_ext_tx_tree,
-                           pre_fc->intra_ext_tx_prob[i][j],
+      vpx_tree_merge_probs(vp10_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
                            counts->intra_ext_tx[i][j],
                            fc->intra_ext_tx_prob[i][j]);
   }
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-    vpx_tree_merge_probs(vp10_ext_tx_tree,
-                         pre_fc->inter_ext_tx_prob[i],
-                         counts->inter_ext_tx[i],
-                         fc->inter_ext_tx_prob[i]);
+    vpx_tree_merge_probs(vp10_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
+                         counts->inter_ext_tx[i], fc->inter_ext_tx_prob[i]);
   }
 
 #if CONFIG_MISC_FIXES
   if (cm->seg.temporal_update) {
     for (i = 0; i < PREDICTION_PROBS; i++)
-      fc->seg.pred_probs[i] = mode_mv_merge_probs(pre_fc->seg.pred_probs[i],
-                                                  counts->seg.pred[i]);
+      fc->seg.pred_probs[i] =
+          mode_mv_merge_probs(pre_fc->seg.pred_probs[i], counts->seg.pred[i]);
 
     vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
                          counts->seg.tree_mispred, fc->seg.tree_probs);
@@ -535,8 +523,7 @@ void vp10_setup_past_independence(VP10_COMMON *cm) {
   if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
       cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
     // Reset all frame contexts.
-    for (i = 0; i < FRAME_CONTEXTS; ++i)
-      cm->frame_contexts[i] = *cm->fc;
+    for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
   } else if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT) {
     // Reset only the frame context specified in the frame header.
     cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
diff --git a/vp10/common/entropymode.h b/vp10/common/entropymode.h
index 611d3ad132f43323295c5eea30b1c9365c712e34..377762ab6356be4471522dac79c28ef0334457b9 100644
--- a/vp10/common/entropymode.h
+++ b/vp10/common/entropymode.h
@@ -25,7 +25,7 @@ extern "C" {
 
 #define TX_SIZE_CONTEXTS 2
 
-#define INTER_OFFSET(mode) ((mode) - NEARESTMV)
+#define INTER_OFFSET(mode) ((mode)-NEARESTMV)
 
 struct VP10Common;
 
@@ -53,8 +53,9 @@ typedef struct frame_contexts {
   vpx_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
   vpx_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
   vp10_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
-  vpx_prob switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS]
-                                 [SWITCHABLE_FILTERS - 1];
+  vpx_prob
+      switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS -
+                                                         1];
   vpx_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
   vpx_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
   vpx_prob comp_inter_prob[COMP_INTER_CONTEXTS];
@@ -77,10 +78,10 @@ typedef struct FRAME_COUNTS {
   unsigned int uv_mode[INTRA_MODES][INTRA_MODES];
   unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES];
   vp10_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
-  unsigned int eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES]
-                         [COEF_BANDS][COEFF_CONTEXTS];
-  unsigned int switchable_interp[SWITCHABLE_FILTER_CONTEXTS]
-                                [SWITCHABLE_FILTERS];
+  unsigned int
+      eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
+  unsigned int
+      switchable_interp[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS];
   unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES];
   unsigned int intra_inter[INTRA_INTER_CONTEXTS][2];
   unsigned int comp_inter[COMP_INTER_CONTEXTS][2];
@@ -96,20 +97,19 @@ typedef struct FRAME_COUNTS {
   unsigned int inter_ext_tx[EXT_TX_SIZES][TX_TYPES];
 } FRAME_COUNTS;
 
-extern const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES]
-                                        [INTRA_MODES - 1];
+extern const vpx_prob
+    vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
 #if !CONFIG_MISC_FIXES
 extern const vpx_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
-extern const vpx_prob vp10_kf_partition_probs[PARTITION_CONTEXTS]
-                                            [PARTITION_TYPES - 1];
+extern const vpx_prob
+    vp10_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
 #endif
 
 extern const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
 extern const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)];
 extern const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)];
-extern const vpx_tree_index vp10_switchable_interp_tree
-                                [TREE_SIZE(SWITCHABLE_FILTERS)];
-
+extern const vpx_tree_index
+    vp10_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
 
 void vp10_setup_past_independence(struct VP10Common *cm);
 
@@ -117,14 +117,13 @@ void vp10_adapt_intra_frame_probs(struct VP10Common *cm);
 void vp10_adapt_inter_frame_probs(struct VP10Common *cm);
 
 void vp10_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
-                                      unsigned int (*ct_32x32p)[2]);
+                                           unsigned int (*ct_32x32p)[2]);
 void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
-                                      unsigned int (*ct_16x16p)[2]);
+                                           unsigned int (*ct_16x16p)[2]);
 void vp10_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
-                                    unsigned int (*ct_8x8p)[2]);
+                                         unsigned int (*ct_8x8p)[2]);
 
-extern const vpx_tree_index
-    vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)];
+extern const vpx_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)];
 
 static INLINE int vp10_ceil_log2(int n) {
   int i = 1, p = 2;
diff --git a/vp10/common/entropymv.c b/vp10/common/entropymv.c
index a9946ee15228924d89d384c75deb79cb628f8834..a984241a931d1c841f91022651c002ee176aaf0b 100644
--- a/vp10/common/entropymv.c
+++ b/vp10/common/entropymv.c
@@ -15,11 +15,10 @@
 #define COMPANDED_MVREF_THRESH 8
 
 const vpx_tree_index vp10_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
-  -MV_JOINT_ZERO, 2,
-  -MV_JOINT_HNZVZ, 4,
-  -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
+  -MV_JOINT_ZERO, 2, -MV_JOINT_HNZVZ, 4, -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
 };
 
+/* clang-format off */
 const vpx_tree_index vp10_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
   -MV_CLASS_0, 2,
   -MV_CLASS_1, 4,
@@ -32,87 +31,82 @@ const vpx_tree_index vp10_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
   -MV_CLASS_7, -MV_CLASS_8,
   -MV_CLASS_9, -MV_CLASS_10,
 };
+/* clang-format on */
 
 const vpx_tree_index vp10_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
   -0, -1,
 };
 
-const vpx_tree_index vp10_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = {
-  -0, 2,
-  -1, 4,
-  -2, -3
-};
+const vpx_tree_index vp10_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2,  -1,
+                                                                4,  -2, -3 };
 
 static const nmv_context default_nmv_context = {
-  {32, 64, 96},
-  {
-    { // Vertical component
-      128,                                                  // sign
-      {224, 144, 192, 168, 192, 176, 192, 198, 198, 245},   // class
-      {216},                                                // class0
-      {136, 140, 148, 160, 176, 192, 224, 234, 234, 240},   // bits
-      {{128, 128, 64}, {96, 112, 64}},                      // class0_fp
-      {64, 96, 64},                                         // fp
-      160,                                                  // class0_hp bit
-      128,                                                  // hp
+  { 32, 64, 96 },
+  { {
+        // Vertical component
+        128,                                                   // sign
+        { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 },  // class
+        { 216 },                                               // class0
+        { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 },  // bits
+        { { 128, 128, 64 }, { 96, 112, 64 } },                 // class0_fp
+        { 64, 96, 64 },                                        // fp
+        160,                                                   // class0_hp bit
+        128,                                                   // hp
     },
-    { // Horizontal component
-      128,                                                  // sign
-      {216, 128, 176, 160, 176, 176, 192, 198, 198, 208},   // class
-      {208},                                                // class0
-      {136, 140, 148, 160, 176, 192, 224, 234, 234, 240},   // bits
-      {{128, 128, 64}, {96, 112, 64}},                      // class0_fp
-      {64, 96, 64},                                         // fp
-      160,                                                  // class0_hp bit
-      128,                                                  // hp
-    }
-  },
+    {
+        // Horizontal component
+        128,                                                   // sign
+        { 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 },  // class
+        { 208 },                                               // class0
+        { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 },  // bits
+        { { 128, 128, 64 }, { 96, 112, 64 } },                 // class0_fp
+        { 64, 96, 64 },                                        // fp
+        160,                                                   // class0_hp bit
+        128,                                                   // hp
+    } },
 };
 
 static const uint8_t log_in_base_2[] = {
-  0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
-  4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6,
-  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-  6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10
+  0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+  4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+  5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7,
+  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10
 };
 
 static INLINE int mv_class_base(MV_CLASS_TYPE c) {
@@ -120,16 +114,16 @@ static INLINE int mv_class_base(MV_CLASS_TYPE c) {
 }
 
 MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset) {
-  const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096) ?
-      MV_CLASS_10 : (MV_CLASS_TYPE)log_in_base_2[z >> 3];
-  if (offset)
-    *offset = z - mv_class_base(c);
+  const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096)
+                              ? MV_CLASS_10
+                              : (MV_CLASS_TYPE)log_in_base_2[z >> 3];
+  if (offset) *offset = z - mv_class_base(c);
   return c;
 }
 
 int vp10_use_mv_hp(const MV *ref) {
 #if CONFIG_MISC_FIXES
-  (void) ref;
+  (void)ref;
   return 1;
 #else
   return (abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
@@ -137,20 +131,20 @@ int vp10_use_mv_hp(const MV *ref) {
 #endif
 }
 
-static void inc_mv_component(int v, nmv_component_counts *comp_counts,
-                             int incr, int usehp) {
+static void inc_mv_component(int v, nmv_component_counts *comp_counts, int incr,
+                             int usehp) {
   int s, z, c, o, d, e, f;
-  assert(v != 0);            /* should not be zero */
+  assert(v != 0); /* should not be zero */
   s = v < 0;
   comp_counts->sign[s] += incr;
-  z = (s ? -v : v) - 1;       /* magnitude - 1 */
+  z = (s ? -v : v) - 1; /* magnitude - 1 */
 
   c = vp10_get_mv_class(z, &o);
   comp_counts->classes[c] += incr;
 
-  d = (o >> 3);               /* int mv data */
-  f = (o >> 1) & 3;           /* fractional pel mv data */
-  e = (o & 1);                /* high precision mv data */
+  d = (o >> 3);     /* int mv data */
+  f = (o >> 1) & 3; /* fractional pel mv data */
+  e = (o & 1);      /* high precision mv data */
 
   if (c == MV_CLASS_0) {
     comp_counts->class0[d] += incr;
@@ -159,8 +153,7 @@ static void inc_mv_component(int v, nmv_component_counts *comp_counts,
   } else {
     int i;
     int b = c + CLASS0_BITS - 1;  // number of bits
-    for (i = 0; i < b; ++i)
-      comp_counts->bits[i][((d >> i) & 1)] += incr;
+    for (i = 0; i < b; ++i) comp_counts->bits[i][((d >> i) & 1)] += incr;
     comp_counts->fp[f] += incr;
     comp_counts->hp[e] += usehp * incr;
   }
@@ -220,6 +213,4 @@ void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
   }
 }
 
-void vp10_init_mv_probs(VP10_COMMON *cm) {
-  cm->fc->nmvc = default_nmv_context;
-}
+void vp10_init_mv_probs(VP10_COMMON *cm) { cm->fc->nmvc = default_nmv_context; }
diff --git a/vp10/common/entropymv.h b/vp10/common/entropymv.h
index d1eb95c5798ba41cf4cb16aadf3f44319287a84d..737a88c24c394bd2726daac1cf73c0f8ac245874 100644
--- a/vp10/common/entropymv.h
+++ b/vp10/common/entropymv.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_COMMON_ENTROPYMV_H_
 #define VP10_COMMON_ENTROPYMV_H_
 
@@ -32,12 +31,12 @@ int vp10_use_mv_hp(const MV *ref);
 #define MV_UPDATE_PROB 252
 
 /* Symbols for coding which components are zero jointly */
-#define MV_JOINTS     4
+#define MV_JOINTS 4
 typedef enum {
-  MV_JOINT_ZERO = 0,             /* Zero vector */
-  MV_JOINT_HNZVZ = 1,            /* Vert zero, hor nonzero */
-  MV_JOINT_HZVNZ = 2,            /* Hor zero, vert nonzero */
-  MV_JOINT_HNZVNZ = 3,           /* Both components nonzero */
+  MV_JOINT_ZERO = 0,   /* Zero vector */
+  MV_JOINT_HNZVZ = 1,  /* Vert zero, hor nonzero */
+  MV_JOINT_HZVNZ = 2,  /* Hor zero, vert nonzero */
+  MV_JOINT_HNZVNZ = 3, /* Both components nonzero */
 } MV_JOINT_TYPE;
 
 static INLINE int mv_joint_vertical(MV_JOINT_TYPE type) {
@@ -49,33 +48,33 @@ static INLINE int mv_joint_horizontal(MV_JOINT_TYPE type) {
 }
 
 /* Symbols for coding magnitude class of nonzero components */
-#define MV_CLASSES     11
+#define MV_CLASSES 11
 typedef enum {
-  MV_CLASS_0 = 0,      /* (0, 2]     integer pel */
-  MV_CLASS_1 = 1,      /* (2, 4]     integer pel */
-  MV_CLASS_2 = 2,      /* (4, 8]     integer pel */
-  MV_CLASS_3 = 3,      /* (8, 16]    integer pel */
-  MV_CLASS_4 = 4,      /* (16, 32]   integer pel */
-  MV_CLASS_5 = 5,      /* (32, 64]   integer pel */
-  MV_CLASS_6 = 6,      /* (64, 128]  integer pel */
-  MV_CLASS_7 = 7,      /* (128, 256] integer pel */
-  MV_CLASS_8 = 8,      /* (256, 512] integer pel */
-  MV_CLASS_9 = 9,      /* (512, 1024] integer pel */
-  MV_CLASS_10 = 10,    /* (1024,2048] integer pel */
+  MV_CLASS_0 = 0,   /* (0, 2]     integer pel */
+  MV_CLASS_1 = 1,   /* (2, 4]     integer pel */
+  MV_CLASS_2 = 2,   /* (4, 8]     integer pel */
+  MV_CLASS_3 = 3,   /* (8, 16]    integer pel */
+  MV_CLASS_4 = 4,   /* (16, 32]   integer pel */
+  MV_CLASS_5 = 5,   /* (32, 64]   integer pel */
+  MV_CLASS_6 = 6,   /* (64, 128]  integer pel */
+  MV_CLASS_7 = 7,   /* (128, 256] integer pel */
+  MV_CLASS_8 = 8,   /* (256, 512] integer pel */
+  MV_CLASS_9 = 9,   /* (512, 1024] integer pel */
+  MV_CLASS_10 = 10, /* (1024,2048] integer pel */
 } MV_CLASS_TYPE;
 
-#define CLASS0_BITS    1  /* bits at integer precision for class 0 */
-#define CLASS0_SIZE    (1 << CLASS0_BITS)
+#define CLASS0_BITS 1 /* bits at integer precision for class 0 */
+#define CLASS0_SIZE (1 << CLASS0_BITS)
 #define MV_OFFSET_BITS (MV_CLASSES + CLASS0_BITS - 2)
 #define MV_FP_SIZE 4
 
-#define MV_MAX_BITS    (MV_CLASSES + CLASS0_BITS + 2)
-#define MV_MAX         ((1 << MV_MAX_BITS) - 1)
-#define MV_VALS        ((MV_MAX << 1) + 1)
+#define MV_MAX_BITS (MV_CLASSES + CLASS0_BITS + 2)
+#define MV_MAX ((1 << MV_MAX_BITS) - 1)
+#define MV_VALS ((MV_MAX << 1) + 1)
 
 #define MV_IN_USE_BITS 14
-#define MV_UPP   ((1 << MV_IN_USE_BITS) - 1)
-#define MV_LOW   (-(1 << MV_IN_USE_BITS))
+#define MV_UPP ((1 << MV_IN_USE_BITS) - 1)
+#define MV_LOW (-(1 << MV_IN_USE_BITS))
 
 extern const vpx_tree_index vp10_mv_joint_tree[];
 extern const vpx_tree_index vp10_mv_class_tree[];
diff --git a/vp10/common/enums.h b/vp10/common/enums.h
index 9b10379dbaffeaaadb7ae618c90548ee9b822cc5..c08b6b9a651ab2f6d84bd7778c75cff7cc38fe8e 100644
--- a/vp10/common/enums.h
+++ b/vp10/common/enums.h
@@ -21,7 +21,7 @@ extern "C" {
 #define MI_SIZE_LOG2 3
 #define MI_BLOCK_SIZE_LOG2 (6 - MI_SIZE_LOG2)  // 64 = 2^6
 
-#define MI_SIZE (1 << MI_SIZE_LOG2)  // pixels per mi-unit
+#define MI_SIZE (1 << MI_SIZE_LOG2)              // pixels per mi-unit
 #define MI_BLOCK_SIZE (1 << MI_BLOCK_SIZE_LOG2)  // mi-units per max block
 
 #define MI_MASK (MI_BLOCK_SIZE - 1)
@@ -41,20 +41,20 @@ typedef enum BITSTREAM_PROFILE {
   MAX_PROFILES
 } BITSTREAM_PROFILE;
 
-#define BLOCK_4X4     0
-#define BLOCK_4X8     1
-#define BLOCK_8X4     2
-#define BLOCK_8X8     3
-#define BLOCK_8X16    4
-#define BLOCK_16X8    5
-#define BLOCK_16X16   6
-#define BLOCK_16X32   7
-#define BLOCK_32X16   8
-#define BLOCK_32X32   9
-#define BLOCK_32X64  10
-#define BLOCK_64X32  11
-#define BLOCK_64X64  12
-#define BLOCK_SIZES  13
+#define BLOCK_4X4 0
+#define BLOCK_4X8 1
+#define BLOCK_8X4 2
+#define BLOCK_8X8 3
+#define BLOCK_8X16 4
+#define BLOCK_16X8 5
+#define BLOCK_16X16 6
+#define BLOCK_16X32 7
+#define BLOCK_32X16 8
+#define BLOCK_32X32 9
+#define BLOCK_32X64 10
+#define BLOCK_64X32 11
+#define BLOCK_64X64 12
+#define BLOCK_SIZES 13
 #define BLOCK_INVALID BLOCK_SIZES
 typedef uint8_t BLOCK_SIZE;
 
@@ -68,36 +68,36 @@ typedef enum PARTITION_TYPE {
 } PARTITION_TYPE;
 
 typedef char PARTITION_CONTEXT;
-#define PARTITION_PLOFFSET   4  // number of probability models per block size
+#define PARTITION_PLOFFSET 4  // number of probability models per block size
 #define PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET)
 
 // block transform size
 typedef uint8_t TX_SIZE;
-#define TX_4X4   ((TX_SIZE)0)   // 4x4 transform
-#define TX_8X8   ((TX_SIZE)1)   // 8x8 transform
-#define TX_16X16 ((TX_SIZE)2)   // 16x16 transform
-#define TX_32X32 ((TX_SIZE)3)   // 32x32 transform
+#define TX_4X4 ((TX_SIZE)0)    // 4x4 transform
+#define TX_8X8 ((TX_SIZE)1)    // 8x8 transform
+#define TX_16X16 ((TX_SIZE)2)  // 16x16 transform
+#define TX_32X32 ((TX_SIZE)3)  // 32x32 transform
 #define TX_SIZES ((TX_SIZE)4)
 
 // frame transform mode
 typedef enum {
-  ONLY_4X4            = 0,        // only 4x4 transform used
-  ALLOW_8X8           = 1,        // allow block transform size up to 8x8
-  ALLOW_16X16         = 2,        // allow block transform size up to 16x16
-  ALLOW_32X32         = 3,        // allow block transform size up to 32x32
-  TX_MODE_SELECT      = 4,        // transform specified for each block
-  TX_MODES            = 5,
+  ONLY_4X4 = 0,        // only 4x4 transform used
+  ALLOW_8X8 = 1,       // allow block transform size up to 8x8
+  ALLOW_16X16 = 2,     // allow block transform size up to 16x16
+  ALLOW_32X32 = 3,     // allow block transform size up to 32x32
+  TX_MODE_SELECT = 4,  // transform specified for each block
+  TX_MODES = 5,
 } TX_MODE;
 
 typedef enum {
-  DCT_DCT   = 0,                      // DCT  in both horizontal and vertical
-  ADST_DCT  = 1,                      // ADST in vertical, DCT in horizontal
-  DCT_ADST  = 2,                      // DCT  in vertical, ADST in horizontal
-  ADST_ADST = 3,                      // ADST in both directions
+  DCT_DCT = 0,    // DCT  in both horizontal and vertical
+  ADST_DCT = 1,   // ADST in vertical, DCT in horizontal
+  DCT_ADST = 2,   // DCT  in vertical, ADST in horizontal
+  ADST_ADST = 3,  // ADST in both directions
   TX_TYPES = 4
 } TX_TYPE;
 
-#define EXT_TX_SIZES       3  // number of sizes that use extended transforms
+#define EXT_TX_SIZES 3  // number of sizes that use extended transforms
 
 typedef enum {
   VPX_LAST_FLAG = 1 << 0,
@@ -105,26 +105,22 @@ typedef enum {
   VPX_ALT_FLAG = 1 << 2,
 } VPX_REFFRAME;
 
-typedef enum {
-  PLANE_TYPE_Y  = 0,
-  PLANE_TYPE_UV = 1,
-  PLANE_TYPES
-} PLANE_TYPE;
-
-#define DC_PRED    0       // Average of above and left pixels
-#define V_PRED     1       // Vertical
-#define H_PRED     2       // Horizontal
-#define D45_PRED   3       // Directional 45  deg = round(arctan(1/1) * 180/pi)
-#define D135_PRED  4       // Directional 135 deg = 180 - 45
-#define D117_PRED  5       // Directional 117 deg = 180 - 63
-#define D153_PRED  6       // Directional 153 deg = 180 - 27
-#define D207_PRED  7       // Directional 207 deg = 180 + 27
-#define D63_PRED   8       // Directional 63  deg = round(arctan(2/1) * 180/pi)
-#define TM_PRED    9       // True-motion
+typedef enum { PLANE_TYPE_Y = 0, PLANE_TYPE_UV = 1, PLANE_TYPES } PLANE_TYPE;
+
+#define DC_PRED 0    // Average of above and left pixels
+#define V_PRED 1     // Vertical
+#define H_PRED 2     // Horizontal
+#define D45_PRED 3   // Directional 45  deg = round(arctan(1/1) * 180/pi)
+#define D135_PRED 4  // Directional 135 deg = 180 - 45
+#define D117_PRED 5  // Directional 117 deg = 180 - 63
+#define D153_PRED 6  // Directional 153 deg = 180 - 27
+#define D207_PRED 7  // Directional 207 deg = 180 + 27
+#define D63_PRED 8   // Directional 63  deg = round(arctan(2/1) * 180/pi)
+#define TM_PRED 9    // True-motion
 #define NEARESTMV 10
-#define NEARMV    11
-#define ZEROMV    12
-#define NEWMV     13
+#define NEARMV 11
+#define ZEROMV 12
+#define NEWMV 13
 #define MB_MODE_COUNT 14
 typedef uint8_t PREDICTION_MODE;
 
diff --git a/vp10/common/filter.c b/vp10/common/filter.c
index dda279f132ec4d8b340b0eed2d894e76206180a8..8597efc49774492e8160dd628c635d1439174914 100644
--- a/vp10/common/filter.c
+++ b/vp10/common/filter.c
@@ -14,91 +14,55 @@
 
 DECLARE_ALIGNED(256, static const InterpKernel,
                 bilinear_filters[SUBPEL_SHIFTS]) = {
-  { 0, 0, 0, 128,   0, 0, 0, 0 },
-  { 0, 0, 0, 120,   8, 0, 0, 0 },
-  { 0, 0, 0, 112,  16, 0, 0, 0 },
-  { 0, 0, 0, 104,  24, 0, 0, 0 },
-  { 0, 0, 0,  96,  32, 0, 0, 0 },
-  { 0, 0, 0,  88,  40, 0, 0, 0 },
-  { 0, 0, 0,  80,  48, 0, 0, 0 },
-  { 0, 0, 0,  72,  56, 0, 0, 0 },
-  { 0, 0, 0,  64,  64, 0, 0, 0 },
-  { 0, 0, 0,  56,  72, 0, 0, 0 },
-  { 0, 0, 0,  48,  80, 0, 0, 0 },
-  { 0, 0, 0,  40,  88, 0, 0, 0 },
-  { 0, 0, 0,  32,  96, 0, 0, 0 },
-  { 0, 0, 0,  24, 104, 0, 0, 0 },
-  { 0, 0, 0,  16, 112, 0, 0, 0 },
-  { 0, 0, 0,   8, 120, 0, 0, 0 }
+  { 0, 0, 0, 128, 0, 0, 0, 0 },  { 0, 0, 0, 120, 8, 0, 0, 0 },
+  { 0, 0, 0, 112, 16, 0, 0, 0 }, { 0, 0, 0, 104, 24, 0, 0, 0 },
+  { 0, 0, 0, 96, 32, 0, 0, 0 },  { 0, 0, 0, 88, 40, 0, 0, 0 },
+  { 0, 0, 0, 80, 48, 0, 0, 0 },  { 0, 0, 0, 72, 56, 0, 0, 0 },
+  { 0, 0, 0, 64, 64, 0, 0, 0 },  { 0, 0, 0, 56, 72, 0, 0, 0 },
+  { 0, 0, 0, 48, 80, 0, 0, 0 },  { 0, 0, 0, 40, 88, 0, 0, 0 },
+  { 0, 0, 0, 32, 96, 0, 0, 0 },  { 0, 0, 0, 24, 104, 0, 0, 0 },
+  { 0, 0, 0, 16, 112, 0, 0, 0 }, { 0, 0, 0, 8, 120, 0, 0, 0 }
 };
 
 // Lagrangian interpolation filter
 DECLARE_ALIGNED(256, static const InterpKernel,
                 sub_pel_filters_8[SUBPEL_SHIFTS]) = {
-  { 0,   0,   0, 128,   0,   0,   0,  0},
-  { 0,   1,  -5, 126,   8,  -3,   1,  0},
-  { -1,   3, -10, 122,  18,  -6,   2,  0},
-  { -1,   4, -13, 118,  27,  -9,   3, -1},
-  { -1,   4, -16, 112,  37, -11,   4, -1},
-  { -1,   5, -18, 105,  48, -14,   4, -1},
-  { -1,   5, -19,  97,  58, -16,   5, -1},
-  { -1,   6, -19,  88,  68, -18,   5, -1},
-  { -1,   6, -19,  78,  78, -19,   6, -1},
-  { -1,   5, -18,  68,  88, -19,   6, -1},
-  { -1,   5, -16,  58,  97, -19,   5, -1},
-  { -1,   4, -14,  48, 105, -18,   5, -1},
-  { -1,   4, -11,  37, 112, -16,   4, -1},
-  { -1,   3,  -9,  27, 118, -13,   4, -1},
-  { 0,   2,  -6,  18, 122, -10,   3, -1},
-  { 0,   1,  -3,   8, 126,  -5,   1,  0}
+  { 0, 0, 0, 128, 0, 0, 0, 0 },        { 0, 1, -5, 126, 8, -3, 1, 0 },
+  { -1, 3, -10, 122, 18, -6, 2, 0 },   { -1, 4, -13, 118, 27, -9, 3, -1 },
+  { -1, 4, -16, 112, 37, -11, 4, -1 }, { -1, 5, -18, 105, 48, -14, 4, -1 },
+  { -1, 5, -19, 97, 58, -16, 5, -1 },  { -1, 6, -19, 88, 68, -18, 5, -1 },
+  { -1, 6, -19, 78, 78, -19, 6, -1 },  { -1, 5, -18, 68, 88, -19, 6, -1 },
+  { -1, 5, -16, 58, 97, -19, 5, -1 },  { -1, 4, -14, 48, 105, -18, 5, -1 },
+  { -1, 4, -11, 37, 112, -16, 4, -1 }, { -1, 3, -9, 27, 118, -13, 4, -1 },
+  { 0, 2, -6, 18, 122, -10, 3, -1 },   { 0, 1, -3, 8, 126, -5, 1, 0 }
 };
 
 // DCT based filter
 DECLARE_ALIGNED(256, static const InterpKernel,
                 sub_pel_filters_8s[SUBPEL_SHIFTS]) = {
-  {0,   0,   0, 128,   0,   0,   0, 0},
-  {-1,   3,  -7, 127,   8,  -3,   1, 0},
-  {-2,   5, -13, 125,  17,  -6,   3, -1},
-  {-3,   7, -17, 121,  27, -10,   5, -2},
-  {-4,   9, -20, 115,  37, -13,   6, -2},
-  {-4,  10, -23, 108,  48, -16,   8, -3},
-  {-4,  10, -24, 100,  59, -19,   9, -3},
-  {-4,  11, -24,  90,  70, -21,  10, -4},
-  {-4,  11, -23,  80,  80, -23,  11, -4},
-  {-4,  10, -21,  70,  90, -24,  11, -4},
-  {-3,   9, -19,  59, 100, -24,  10, -4},
-  {-3,   8, -16,  48, 108, -23,  10, -4},
-  {-2,   6, -13,  37, 115, -20,   9, -4},
-  {-2,   5, -10,  27, 121, -17,   7, -3},
-  {-1,   3,  -6,  17, 125, -13,   5, -2},
-  {0,   1,  -3,   8, 127,  -7,   3, -1}
+  { 0, 0, 0, 128, 0, 0, 0, 0 },         { -1, 3, -7, 127, 8, -3, 1, 0 },
+  { -2, 5, -13, 125, 17, -6, 3, -1 },   { -3, 7, -17, 121, 27, -10, 5, -2 },
+  { -4, 9, -20, 115, 37, -13, 6, -2 },  { -4, 10, -23, 108, 48, -16, 8, -3 },
+  { -4, 10, -24, 100, 59, -19, 9, -3 }, { -4, 11, -24, 90, 70, -21, 10, -4 },
+  { -4, 11, -23, 80, 80, -23, 11, -4 }, { -4, 10, -21, 70, 90, -24, 11, -4 },
+  { -3, 9, -19, 59, 100, -24, 10, -4 }, { -3, 8, -16, 48, 108, -23, 10, -4 },
+  { -2, 6, -13, 37, 115, -20, 9, -4 },  { -2, 5, -10, 27, 121, -17, 7, -3 },
+  { -1, 3, -6, 17, 125, -13, 5, -2 },   { 0, 1, -3, 8, 127, -7, 3, -1 }
 };
 
 // freqmultiplier = 0.5
 DECLARE_ALIGNED(256, static const InterpKernel,
                 sub_pel_filters_8lp[SUBPEL_SHIFTS]) = {
-  { 0,  0,  0, 128,  0,  0,  0,  0},
-  {-3, -1, 32,  64, 38,  1, -3,  0},
-  {-2, -2, 29,  63, 41,  2, -3,  0},
-  {-2, -2, 26,  63, 43,  4, -4,  0},
-  {-2, -3, 24,  62, 46,  5, -4,  0},
-  {-2, -3, 21,  60, 49,  7, -4,  0},
-  {-1, -4, 18,  59, 51,  9, -4,  0},
-  {-1, -4, 16,  57, 53, 12, -4, -1},
-  {-1, -4, 14,  55, 55, 14, -4, -1},
-  {-1, -4, 12,  53, 57, 16, -4, -1},
-  { 0, -4,  9,  51, 59, 18, -4, -1},
-  { 0, -4,  7,  49, 60, 21, -3, -2},
-  { 0, -4,  5,  46, 62, 24, -3, -2},
-  { 0, -4,  4,  43, 63, 26, -2, -2},
-  { 0, -3,  2,  41, 63, 29, -2, -2},
-  { 0, -3,  1,  38, 64, 32, -1, -3}
+  { 0, 0, 0, 128, 0, 0, 0, 0 },       { -3, -1, 32, 64, 38, 1, -3, 0 },
+  { -2, -2, 29, 63, 41, 2, -3, 0 },   { -2, -2, 26, 63, 43, 4, -4, 0 },
+  { -2, -3, 24, 62, 46, 5, -4, 0 },   { -2, -3, 21, 60, 49, 7, -4, 0 },
+  { -1, -4, 18, 59, 51, 9, -4, 0 },   { -1, -4, 16, 57, 53, 12, -4, -1 },
+  { -1, -4, 14, 55, 55, 14, -4, -1 }, { -1, -4, 12, 53, 57, 16, -4, -1 },
+  { 0, -4, 9, 51, 59, 18, -4, -1 },   { 0, -4, 7, 49, 60, 21, -3, -2 },
+  { 0, -4, 5, 46, 62, 24, -3, -2 },   { 0, -4, 4, 43, 63, 26, -2, -2 },
+  { 0, -3, 2, 41, 63, 29, -2, -2 },   { 0, -3, 1, 38, 64, 32, -1, -3 }
 };
 
-
 const InterpKernel *vp10_filter_kernels[4] = {
-  sub_pel_filters_8,
-  sub_pel_filters_8lp,
-  sub_pel_filters_8s,
-  bilinear_filters
+  sub_pel_filters_8, sub_pel_filters_8lp, sub_pel_filters_8s, bilinear_filters
 };
diff --git a/vp10/common/filter.h b/vp10/common/filter.h
index 826cd0386e70453bd1053f914dec4550bd8c2034..c0a25d35f702c41083d2b42e82ca4d5f148d175a 100644
--- a/vp10/common/filter.h
+++ b/vp10/common/filter.h
@@ -16,16 +16,15 @@
 #include "vpx_dsp/vpx_filter.h"
 #include "vpx_ports/mem.h"
 
-
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#define EIGHTTAP            0
-#define EIGHTTAP_SMOOTH     1
-#define EIGHTTAP_SHARP      2
-#define SWITCHABLE_FILTERS  3 /* Number of switchable filters */
-#define BILINEAR            3
+#define EIGHTTAP 0
+#define EIGHTTAP_SMOOTH 1
+#define EIGHTTAP_SHARP 2
+#define SWITCHABLE_FILTERS 3 /* Number of switchable filters */
+#define BILINEAR 3
 // The codec can operate in four possible inter prediction filter mode:
 // 8-tap, 8-tap-smooth, 8-tap-sharp, and switching between the three.
 #define SWITCHABLE_FILTER_CONTEXTS (SWITCHABLE_FILTERS + 1)
diff --git a/vp10/common/frame_buffers.c b/vp10/common/frame_buffers.c
index 31ff9602a60ee19ff98b59ffa44901759234deaf..564fa9c2acc7b9d0a5c3101192168be694745d1c 100644
--- a/vp10/common/frame_buffers.c
+++ b/vp10/common/frame_buffers.c
@@ -19,9 +19,8 @@ int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
 
   list->num_internal_frame_buffers =
       VPX_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS;
-  list->int_fb =
-      (InternalFrameBuffer *)vpx_calloc(list->num_internal_frame_buffers,
-                                        sizeof(*list->int_fb));
+  list->int_fb = (InternalFrameBuffer *)vpx_calloc(
+      list->num_internal_frame_buffers, sizeof(*list->int_fb));
   return (list->int_fb == NULL);
 }
 
@@ -39,27 +38,23 @@ void vp10_free_internal_frame_buffers(InternalFrameBufferList *list) {
 }
 
 int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
-                         vpx_codec_frame_buffer_t *fb) {
+                          vpx_codec_frame_buffer_t *fb) {
   int i;
   InternalFrameBufferList *const int_fb_list =
       (InternalFrameBufferList *)cb_priv;
-  if (int_fb_list == NULL)
-    return -1;
+  if (int_fb_list == NULL) return -1;
 
   // Find a free frame buffer.
   for (i = 0; i < int_fb_list->num_internal_frame_buffers; ++i) {
-    if (!int_fb_list->int_fb[i].in_use)
-      break;
+    if (!int_fb_list->int_fb[i].in_use) break;
   }
 
-  if (i == int_fb_list->num_internal_frame_buffers)
-    return -1;
+  if (i == int_fb_list->num_internal_frame_buffers) return -1;
 
   if (int_fb_list->int_fb[i].size < min_size) {
     int_fb_list->int_fb[i].data =
         (uint8_t *)vpx_realloc(int_fb_list->int_fb[i].data, min_size);
-    if (!int_fb_list->int_fb[i].data)
-      return -1;
+    if (!int_fb_list->int_fb[i].data) return -1;
 
     // This memset is needed for fixing valgrind error from C loop filter
     // due to access uninitialized memory in frame border. It could be
@@ -80,7 +75,6 @@ int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
 int vp10_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb) {
   InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv;
   (void)cb_priv;
-  if (int_fb)
-    int_fb->in_use = 0;
+  if (int_fb) int_fb->in_use = 0;
   return 0;
 }
diff --git a/vp10/common/frame_buffers.h b/vp10/common/frame_buffers.h
index 729ebafb023f900bf0fc17c10ce4e82ef831594e..fa6a2e2592157d709ef6e1cb9b968e702485fe39 100644
--- a/vp10/common/frame_buffers.h
+++ b/vp10/common/frame_buffers.h
@@ -40,7 +40,7 @@ void vp10_free_internal_frame_buffers(InternalFrameBufferList *list);
 // |min_size| is the minimum size in bytes needed to decode the next frame.
 // |fb| pointer to the frame buffer.
 int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
-                         vpx_codec_frame_buffer_t *fb);
+                          vpx_codec_frame_buffer_t *fb);
 
 // Callback used by libvpx when there are no references to the frame buffer.
 // |cb_priv| is not used. |fb| pointer to the frame buffer.
diff --git a/vp10/common/idct.c b/vp10/common/idct.c
index 20a120e7a1eae4bffda97bad3dc5fc29bdb5bcaa..987092554deda7bdc21d60acc75fd95de51b8ff7 100644
--- a/vp10/common/idct.c
+++ b/vp10/common/idct.c
@@ -18,12 +18,12 @@
 #include "vpx_ports/mem.h"
 
 void vp10_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                         int tx_type) {
+                          int tx_type) {
   const transform_2d IHT_4[] = {
-    { idct4_c, idct4_c  },  // DCT_DCT  = 0
-    { iadst4_c, idct4_c  },   // ADST_DCT = 1
-    { idct4_c, iadst4_c },    // DCT_ADST = 2
-    { iadst4_c, iadst4_c }      // ADST_ADST = 3
+    { idct4_c, idct4_c },   // DCT_DCT  = 0
+    { iadst4_c, idct4_c },  // ADST_DCT = 1
+    { idct4_c, iadst4_c },  // DCT_ADST = 2
+    { iadst4_c, iadst4_c }  // ADST_ADST = 3
   };
 
   int i, j;
@@ -34,14 +34,13 @@ void vp10_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
   // inverse transform row vectors
   for (i = 0; i < 4; ++i) {
     IHT_4[tx_type].rows(input, outptr);
-    input  += 4;
+    input += 4;
     outptr += 4;
   }
 
   // inverse transform column vectors
   for (i = 0; i < 4; ++i) {
-    for (j = 0; j < 4; ++j)
-      temp_in[j] = out[j * 4 + i];
+    for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
     IHT_4[tx_type].cols(temp_in, temp_out);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -51,14 +50,14 @@ void vp10_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
 }
 
 static const transform_2d IHT_8[] = {
-  { idct8_c,  idct8_c  },  // DCT_DCT  = 0
-  { iadst8_c, idct8_c  },  // ADST_DCT = 1
-  { idct8_c,  iadst8_c },  // DCT_ADST = 2
-  { iadst8_c, iadst8_c }   // ADST_ADST = 3
+  { idct8_c, idct8_c },   // DCT_DCT  = 0
+  { iadst8_c, idct8_c },  // ADST_DCT = 1
+  { idct8_c, iadst8_c },  // DCT_ADST = 2
+  { iadst8_c, iadst8_c }  // ADST_ADST = 3
 };
 
 void vp10_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                         int tx_type) {
+                          int tx_type) {
   int i, j;
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
@@ -74,8 +73,7 @@ void vp10_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
 
   // inverse transform column vectors
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     ht.cols(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -85,14 +83,14 @@ void vp10_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
 }
 
 static const transform_2d IHT_16[] = {
-  { idct16_c,  idct16_c  },  // DCT_DCT  = 0
-  { iadst16_c, idct16_c  },  // ADST_DCT = 1
-  { idct16_c,  iadst16_c },  // DCT_ADST = 2
-  { iadst16_c, iadst16_c }   // ADST_ADST = 3
+  { idct16_c, idct16_c },   // DCT_DCT  = 0
+  { iadst16_c, idct16_c },  // ADST_DCT = 1
+  { idct16_c, iadst16_c },  // DCT_ADST = 2
+  { iadst16_c, iadst16_c }  // ADST_ADST = 3
 };
 
 void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
-                            int tx_type) {
+                             int tx_type) {
   int i, j;
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
@@ -108,8 +106,7 @@ void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
 
   // Columns
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j * 16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     ht.cols(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -120,16 +117,15 @@ void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
 
 // idct
 void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                     int eob) {
+                      int eob) {
   if (eob > 1)
     vpx_idct4x4_16_add(input, dest, stride);
   else
     vpx_idct4x4_1_add(input, dest, stride);
 }
 
-
 void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                     int eob) {
+                      int eob) {
   if (eob > 1)
     vpx_iwht4x4_16_add(input, dest, stride);
   else
@@ -137,7 +133,7 @@ void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
 }
 
 void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                     int eob) {
+                      int eob) {
   // If dc is 1, then input[0] is the reconstructed value, do not need
   // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
 
@@ -155,11 +151,10 @@ void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
 }
 
 void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
-                       int eob) {
+                        int eob) {
   /* The calculation can be simplified if there are not many non-zero dct
    * coefficients. Use eobs to separate different cases. */
-  if (eob == 1)
-    /* DC only DCT coefficient. */
+  if (eob == 1) /* DC only DCT coefficient. */
     vpx_idct16x16_1_add(input, dest, stride);
   else if (eob <= 10)
     vpx_idct16x16_10_add(input, dest, stride);
@@ -168,7 +163,7 @@ void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
 }
 
 void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
-                       int eob) {
+                        int eob) {
   if (eob == 1)
     vpx_idct32x32_1_add(input, dest, stride);
   else if (eob <= 34)
@@ -178,87 +173,63 @@ void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
     vpx_idct32x32_1024_add(input, dest, stride);
 }
 
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
-                           int stride, int eob, TX_TYPE tx_type, int lossless) {
+void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+                           int eob, TX_TYPE tx_type, int lossless) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
     vp10_iwht4x4_add(input, dest, stride, eob);
   } else {
     switch (tx_type) {
-      case DCT_DCT:
-        vp10_idct4x4_add(input, dest, stride, eob);
-        break;
+      case DCT_DCT: vp10_idct4x4_add(input, dest, stride, eob); break;
       case ADST_DCT:
       case DCT_ADST:
-      case ADST_ADST:
-        vp10_iht4x4_16_add(input, dest, stride, tx_type);
-        break;
-      default:
-        assert(0);
-        break;
+      case ADST_ADST: vp10_iht4x4_16_add(input, dest, stride, tx_type); break;
+      default: assert(0); break;
     }
   }
 }
 
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
-                           int stride, int eob, TX_TYPE tx_type) {
+void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+                           int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT:
-      vp10_idct8x8_add(input, dest, stride, eob);
-      break;
+    case DCT_DCT: vp10_idct8x8_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST:
-      vp10_iht8x8_64_add(input, dest, stride, tx_type);
-      break;
-    default:
-      assert(0);
-      break;
+    case ADST_ADST: vp10_iht8x8_64_add(input, dest, stride, tx_type); break;
+    default: assert(0); break;
   }
 }
 
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
-                             int stride, int eob, TX_TYPE tx_type) {
+void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+                             int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT:
-      vp10_idct16x16_add(input, dest, stride, eob);
-      break;
+    case DCT_DCT: vp10_idct16x16_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST:
-      vp10_iht16x16_256_add(input, dest, stride, tx_type);
-      break;
-    default:
-      assert(0);
-      break;
+    case ADST_ADST: vp10_iht16x16_256_add(input, dest, stride, tx_type); break;
+    default: assert(0); break;
   }
 }
 
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
-                             int stride, int eob, TX_TYPE tx_type) {
+void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+                             int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT:
-      vp10_idct32x32_add(input, dest, stride, eob);
-      break;
+    case DCT_DCT: vp10_idct32x32_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST:
-      assert(0);
-      break;
-    default:
-      assert(0);
-      break;
+    case ADST_ADST: assert(0); break;
+    default: assert(0); break;
   }
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
 void vp10_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
-                                int stride, int tx_type, int bd) {
+                                 int stride, int tx_type, int bd) {
   const highbd_transform_2d IHT_4[] = {
-    { vpx_highbd_idct4_c, vpx_highbd_idct4_c  },    // DCT_DCT  = 0
-    { vpx_highbd_iadst4_c, vpx_highbd_idct4_c },    // ADST_DCT = 1
-    { vpx_highbd_idct4_c, vpx_highbd_iadst4_c },    // DCT_ADST = 2
-    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c }    // ADST_ADST = 3
+    { vpx_highbd_idct4_c, vpx_highbd_idct4_c },   // DCT_DCT  = 0
+    { vpx_highbd_iadst4_c, vpx_highbd_idct4_c },  // ADST_DCT = 1
+    { vpx_highbd_idct4_c, vpx_highbd_iadst4_c },  // DCT_ADST = 2
+    { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c }  // ADST_ADST = 3
   };
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
 
@@ -270,14 +241,13 @@ void vp10_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
   // Inverse transform row vectors.
   for (i = 0; i < 4; ++i) {
     IHT_4[tx_type].rows(input, outptr, bd);
-    input  += 4;
+    input += 4;
     outptr += 4;
   }
 
   // Inverse transform column vectors.
   for (i = 0; i < 4; ++i) {
-    for (j = 0; j < 4; ++j)
-      temp_in[j] = out[j * 4 + i];
+    for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
     IHT_4[tx_type].cols(temp_in, temp_out, bd);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -287,14 +257,14 @@ void vp10_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
 }
 
 static const highbd_transform_2d HIGH_IHT_8[] = {
-  { vpx_highbd_idct8_c,  vpx_highbd_idct8_c  },  // DCT_DCT  = 0
-  { vpx_highbd_iadst8_c, vpx_highbd_idct8_c  },  // ADST_DCT = 1
-  { vpx_highbd_idct8_c,  vpx_highbd_iadst8_c },  // DCT_ADST = 2
-  { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c }   // ADST_ADST = 3
+  { vpx_highbd_idct8_c, vpx_highbd_idct8_c },   // DCT_DCT  = 0
+  { vpx_highbd_iadst8_c, vpx_highbd_idct8_c },  // ADST_DCT = 1
+  { vpx_highbd_idct8_c, vpx_highbd_iadst8_c },  // DCT_ADST = 2
+  { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c }  // ADST_ADST = 3
 };
 
 void vp10_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
-                                int stride, int tx_type, int bd) {
+                                 int stride, int tx_type, int bd) {
   int i, j;
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
@@ -311,8 +281,7 @@ void vp10_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Inverse transform column vectors.
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     ht.cols(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -322,14 +291,14 @@ void vp10_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
 }
 
 static const highbd_transform_2d HIGH_IHT_16[] = {
-  { vpx_highbd_idct16_c,  vpx_highbd_idct16_c  },  // DCT_DCT  = 0
-  { vpx_highbd_iadst16_c, vpx_highbd_idct16_c  },  // ADST_DCT = 1
-  { vpx_highbd_idct16_c,  vpx_highbd_iadst16_c },  // DCT_ADST = 2
-  { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c }   // ADST_ADST = 3
+  { vpx_highbd_idct16_c, vpx_highbd_idct16_c },   // DCT_DCT  = 0
+  { vpx_highbd_iadst16_c, vpx_highbd_idct16_c },  // ADST_DCT = 1
+  { vpx_highbd_idct16_c, vpx_highbd_iadst16_c },  // DCT_ADST = 2
+  { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c }  // ADST_ADST = 3
 };
 
 void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int tx_type, int bd) {
+                                    int stride, int tx_type, int bd) {
   int i, j;
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
@@ -346,8 +315,7 @@ void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Columns
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j * 16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     ht.cols(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -358,16 +326,15 @@ void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
 
 // idct
 void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                            int eob, int bd) {
+                             int eob, int bd) {
   if (eob > 1)
     vpx_highbd_idct4x4_16_add(input, dest, stride, bd);
   else
     vpx_highbd_idct4x4_1_add(input, dest, stride, bd);
 }
 
-
 void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                            int eob, int bd) {
+                             int eob, int bd) {
   if (eob > 1)
     vpx_highbd_iwht4x4_16_add(input, dest, stride, bd);
   else
@@ -375,7 +342,7 @@ void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
 }
 
 void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                            int eob, int bd) {
+                             int eob, int bd) {
   // If dc is 1, then input[0] is the reconstructed value, do not need
   // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
 
@@ -394,7 +361,7 @@ void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
 }
 
 void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
-                              int stride, int eob, int bd) {
+                               int stride, int eob, int bd) {
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to separate different cases.
   // DC only DCT coefficient.
@@ -408,7 +375,7 @@ void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
 }
 
 void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
-                              int stride, int eob, int bd) {
+                               int stride, int eob, int bd) {
   // Non-zero coeff only in upper-left 8x8
   if (eob == 1) {
     vpx_highbd_idct32x32_1_add(input, dest, stride, bd);
@@ -433,11 +400,9 @@ void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
       case ADST_DCT:
       case DCT_ADST:
       case ADST_ADST:
-         vp10_highbd_iht4x4_16_add(input, dest, stride, tx_type, bd);
-         break;
-      default:
-        assert(0);
+        vp10_highbd_iht4x4_16_add(input, dest, stride, tx_type, bd);
         break;
+      default: assert(0); break;
     }
   }
 }
@@ -446,17 +411,13 @@ void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd,
                                   TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT:
-      vp10_highbd_idct8x8_add(input, dest, stride, eob, bd);
-      break;
+    case DCT_DCT: vp10_highbd_idct8x8_add(input, dest, stride, eob, bd); break;
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
       vp10_highbd_iht8x8_64_add(input, dest, stride, tx_type, bd);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
 
@@ -472,9 +433,7 @@ void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
     case ADST_ADST:
       vp10_highbd_iht16x16_256_add(input, dest, stride, tx_type, bd);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
 
@@ -487,12 +446,8 @@ void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
       break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST:
-      assert(0);
-      break;
-    default:
-      assert(0);
-      break;
+    case ADST_ADST: assert(0); break;
+    default: assert(0); break;
   }
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vp10/common/idct.h b/vp10/common/idct.h
index 410fa14fedfe8cf10493b1936d9a26eae4fde74d..a73e61f2806423bb665a64c8a59c60bbeeb2b8c9 100644
--- a/vp10/common/idct.h
+++ b/vp10/common/idct.h
@@ -24,14 +24,14 @@
 extern "C" {
 #endif
 
-typedef void (*transform_1d)(const tran_low_t*, tran_low_t*);
+typedef void (*transform_1d)(const tran_low_t *, tran_low_t *);
 
 typedef struct {
   transform_1d cols, rows;  // vertical and horizontal
 } transform_2d;
 
 #if CONFIG_VPX_HIGHBITDEPTH
-typedef void (*highbd_transform_1d)(const tran_low_t*, tran_low_t*, int bd);
+typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
 
 typedef struct {
   highbd_transform_1d cols, rows;  // vertical and horizontal
@@ -39,30 +39,30 @@ typedef struct {
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                     int eob);
+                      int eob);
 void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                     int eob);
+                      int eob);
 
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
-                           int stride, int eob, TX_TYPE tx_type, int lossless);
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
-                           int stride, int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
-                             int stride, int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
-                             int stride, int eob, TX_TYPE tx_type);
+void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+                           int eob, TX_TYPE tx_type, int lossless);
+void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+                           int eob, TX_TYPE tx_type);
+void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+                             int eob, TX_TYPE tx_type);
+void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+                             int eob, TX_TYPE tx_type);
 
 #if CONFIG_VPX_HIGHBITDEPTH
 void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                            int eob, int bd);
+                             int eob, int bd);
 void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
-                            int eob, int bd);
+                             int eob, int bd);
 void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
-                            int eob, int bd);
+                             int eob, int bd);
 void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
-                              int stride, int eob, int bd);
+                               int stride, int eob, int bd);
 void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
-                              int stride, int eob, int bd);
+                               int stride, int eob, int bd);
 void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd, TX_TYPE tx_type,
                                   int lossless);
diff --git a/vp10/common/loopfilter.c b/vp10/common/loopfilter.c
index 4c1716ecd7cb66b79f8b542afd875a485a1d82f4..a17939deb89d9332a215c7e544ee14f97bf5ddf7 100644
--- a/vp10/common/loopfilter.c
+++ b/vp10/common/loopfilter.c
@@ -36,7 +36,7 @@
 //    10101010
 //
 // A loopfilter should be applied to every other 8x8 horizontally.
-static const uint64_t left_64x64_txform_mask[TX_SIZES]= {
+static const uint64_t left_64x64_txform_mask[TX_SIZES] = {
   0xffffffffffffffffULL,  // TX_4X4
   0xffffffffffffffffULL,  // TX_8x8
   0x5555555555555555ULL,  // TX_16x16
@@ -60,7 +60,7 @@ static const uint64_t left_64x64_txform_mask[TX_SIZES]= {
 //    00000000
 //
 // A loopfilter should be applied to every other 4 the row vertically.
-static const uint64_t above_64x64_txform_mask[TX_SIZES]= {
+static const uint64_t above_64x64_txform_mask[TX_SIZES] = {
   0xffffffffffffffffULL,  // TX_4X4
   0xffffffffffffffffULL,  // TX_8x8
   0x00ff00ff00ff00ffULL,  // TX_16x16
@@ -134,18 +134,18 @@ static const uint64_t size_mask[BLOCK_SIZES] = {
 };
 
 // These are used for masking the left and above borders.
-static const uint64_t left_border =  0x1111111111111111ULL;
+static const uint64_t left_border = 0x1111111111111111ULL;
 static const uint64_t above_border = 0x000000ff000000ffULL;
 
 // 16 bit masks for uv transform sizes.
-static const uint16_t left_64x64_txform_mask_uv[TX_SIZES]= {
+static const uint16_t left_64x64_txform_mask_uv[TX_SIZES] = {
   0xffff,  // TX_4X4
   0xffff,  // TX_8x8
   0x5555,  // TX_16x16
   0x1111,  // TX_32x32
 };
 
-static const uint16_t above_64x64_txform_mask_uv[TX_SIZES]= {
+static const uint16_t above_64x64_txform_mask_uv[TX_SIZES] = {
   0xffff,  // TX_4X4
   0xffff,  // TX_8x8
   0x0f0f,  // TX_16x16
@@ -201,7 +201,7 @@ static const uint16_t size_mask_uv[BLOCK_SIZES] = {
   0x00ff,  // BLOCK_64X32,
   0xffff,  // BLOCK_64X64
 };
-static const uint16_t left_border_uv =  0x1111;
+static const uint16_t left_border_uv = 0x1111;
 static const uint16_t above_border_uv = 0x000f;
 
 static const int mode_lf_lut[MB_MODE_COUNT] = {
@@ -222,8 +222,7 @@ static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
         block_inside_limit = (9 - sharpness_lvl);
     }
 
-    if (block_inside_limit < 1)
-      block_inside_limit = 1;
+    if (block_inside_limit < 1) block_inside_limit = 1;
 
     memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH);
     memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit),
@@ -233,8 +232,8 @@ static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
 
 static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
                                 const MB_MODE_INFO *mbmi) {
-  return lfi_n->lvl[mbmi->segment_id][mbmi->ref_frame[0]]
-                   [mode_lf_lut[mbmi->mode]];
+  return lfi_n
+      ->lvl[mbmi->segment_id][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]];
 }
 
 void vp10_loop_filter_init(VP10_COMMON *cm) {
@@ -271,9 +270,9 @@ void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
     int lvl_seg = default_filt_lvl;
     if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) {
       const int data = get_segdata(seg, seg_id, SEG_LVL_ALT_LF);
-      lvl_seg = clamp(seg->abs_delta == SEGMENT_ABSDATA ?
-                      data : default_filt_lvl + data,
-                      0, MAX_LOOP_FILTER);
+      lvl_seg = clamp(
+          seg->abs_delta == SEGMENT_ABSDATA ? data : default_filt_lvl + data, 0,
+          MAX_LOOP_FILTER);
     }
 
     if (!lf->mode_ref_delta_enabled) {
@@ -287,8 +286,8 @@ void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
 
       for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) {
         for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) {
-          const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale
-                                        + lf->mode_deltas[mode] * scale;
+          const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale +
+                                lf->mode_deltas[mode] * scale;
           lfi->lvl[seg_id][ref][mode] = clamp(inter_lvl, 0, MAX_LOOP_FILTER);
         }
       }
@@ -296,9 +295,8 @@ void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
   }
 }
 
-static void filter_selectively_vert_row2(int subsampling_factor,
-                                         uint8_t *s, int pitch,
-                                         unsigned int mask_16x16_l,
+static void filter_selectively_vert_row2(int subsampling_factor, uint8_t *s,
+                                         int pitch, unsigned int mask_16x16_l,
                                          unsigned int mask_8x8_l,
                                          unsigned int mask_4x4_l,
                                          unsigned int mask_4x4_int_l,
@@ -331,11 +329,10 @@ static void filter_selectively_vert_row2(int subsampling_factor,
           vpx_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
                                    lfi0->hev_thr);
         } else if (mask_16x16_0 & 1) {
-          vpx_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
-                              lfi0->hev_thr);
+          vpx_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
         } else {
-          vpx_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim,
-                              lfi1->lim, lfi1->hev_thr);
+          vpx_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
+                              lfi1->hev_thr);
         }
       }
 
@@ -396,14 +393,11 @@ static void filter_selectively_vert_row2(int subsampling_factor,
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static void highbd_filter_selectively_vert_row2(int subsampling_factor,
-                                                uint16_t *s, int pitch,
-                                                unsigned int mask_16x16_l,
-                                                unsigned int mask_8x8_l,
-                                                unsigned int mask_4x4_l,
-                                                unsigned int mask_4x4_int_l,
-                                                const loop_filter_info_n *lfi_n,
-                                                const uint8_t *lfl, int bd) {
+static void highbd_filter_selectively_vert_row2(
+    int subsampling_factor, uint16_t *s, int pitch, unsigned int mask_16x16_l,
+    unsigned int mask_8x8_l, unsigned int mask_4x4_l,
+    unsigned int mask_4x4_int_l, const loop_filter_info_n *lfi_n,
+    const uint8_t *lfl, int bd) {
   const int mask_shift = subsampling_factor ? 4 : 8;
   const int mask_cutoff = subsampling_factor ? 0xf : 0xff;
   const int lfl_forward = subsampling_factor ? 4 : 8;
@@ -419,7 +413,7 @@ static void highbd_filter_selectively_vert_row2(int subsampling_factor,
   unsigned int mask;
 
   for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 |
-       mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1;
+              mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1;
        mask; mask >>= 1) {
     const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl;
     const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward);
@@ -434,7 +428,7 @@ static void highbd_filter_selectively_vert_row2(int subsampling_factor,
           vpx_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
                                      lfi0->hev_thr, bd);
         } else {
-          vpx_highbd_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim,
+          vpx_highbd_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim,
                                      lfi1->lim, lfi1->hev_thr, bd);
         }
       }
@@ -496,30 +490,27 @@ static void highbd_filter_selectively_vert_row2(int subsampling_factor,
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-static void filter_selectively_horiz(uint8_t *s, int pitch,
-                                     unsigned int mask_16x16,
-                                     unsigned int mask_8x8,
-                                     unsigned int mask_4x4,
-                                     unsigned int mask_4x4_int,
-                                     const loop_filter_info_n *lfi_n,
-                                     const uint8_t *lfl) {
+static void filter_selectively_horiz(
+    uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
+    unsigned int mask_4x4, unsigned int mask_4x4_int,
+    const loop_filter_info_n *lfi_n, const uint8_t *lfl) {
   unsigned int mask;
   int count;
 
-  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
-       mask; mask >>= count) {
+  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask;
+       mask >>= count) {
     const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
 
     count = 1;
     if (mask & 1) {
       if (mask_16x16 & 1) {
         if ((mask_16x16 & 3) == 3) {
-          vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
-                                lfi->hev_thr, 2);
+          vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+                                2);
           count = 2;
         } else {
-          vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
-                                lfi->hev_thr, 1);
+          vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+                                1);
         }
       } else if (mask_8x8 & 1) {
         if ((mask_8x8 & 3) == 3) {
@@ -593,18 +584,15 @@ static void filter_selectively_horiz(uint8_t *s, int pitch,
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static void highbd_filter_selectively_horiz(uint16_t *s, int pitch,
-                                            unsigned int mask_16x16,
-                                            unsigned int mask_8x8,
-                                            unsigned int mask_4x4,
-                                            unsigned int mask_4x4_int,
-                                            const loop_filter_info_n *lfi_n,
-                                            const uint8_t *lfl, int bd) {
+static void highbd_filter_selectively_horiz(
+    uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
+    unsigned int mask_4x4, unsigned int mask_4x4_int,
+    const loop_filter_info_n *lfi_n, const uint8_t *lfl, int bd) {
   unsigned int mask;
   int count;
 
-  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
-       mask; mask >>= count) {
+  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask;
+       mask >>= count) {
     const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
 
     count = 1;
@@ -628,10 +616,9 @@ static void highbd_filter_selectively_horiz(uint16_t *s, int pitch,
                                            lfin->hev_thr, bd);
 
           if ((mask_4x4_int & 3) == 3) {
-            vpx_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
-                                             lfi->lim, lfi->hev_thr,
-                                             lfin->mblim, lfin->lim,
-                                             lfin->hev_thr, bd);
+            vpx_highbd_lpf_horizontal_4_dual(
+                s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+                lfin->mblim, lfin->lim, lfin->hev_thr, bd);
           } else {
             if (mask_4x4_int & 1) {
               vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
@@ -660,10 +647,9 @@ static void highbd_filter_selectively_horiz(uint16_t *s, int pitch,
                                            lfi->hev_thr, lfin->mblim, lfin->lim,
                                            lfin->hev_thr, bd);
           if ((mask_4x4_int & 3) == 3) {
-            vpx_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
-                                             lfi->lim, lfi->hev_thr,
-                                             lfin->mblim, lfin->lim,
-                                             lfin->hev_thr, bd);
+            vpx_highbd_lpf_horizontal_4_dual(
+                s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+                lfin->mblim, lfin->lim, lfin->hev_thr, bd);
           } else {
             if (mask_4x4_int & 1) {
               vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
@@ -707,8 +693,7 @@ static void highbd_filter_selectively_horiz(uint16_t *s, int pitch,
 // TODO(JBB) Need another function for different resolution color..
 static void build_masks(const loop_filter_info_n *const lfi_n,
                         const MODE_INFO *mi, const int shift_y,
-                        const int shift_uv,
-                        LOOP_FILTER_MASK *lfm) {
+                        const int shift_uv, LOOP_FILTER_MASK *lfm) {
   const MB_MODE_INFO *mbmi = &mi->mbmi;
   const BLOCK_SIZE block_size = mbmi->sb_type;
   const TX_SIZE tx_size_y = mbmi->tx_size;
@@ -756,29 +741,28 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
   *left_y |= left_prediction_mask[block_size] << shift_y;
   *left_uv |= left_prediction_mask_uv[block_size] << shift_uv;
 
-  // If the block has no coefficients and is not intra we skip applying
-  // the loop filter on block edges.
+// If the block has no coefficients and is not intra we skip applying
+// the loop filter on block edges.
 #if CONFIG_MISC_FIXES
-  if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi))
-    return;
+  if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi)) return;
 #else
-  if (mbmi->skip && is_inter_block(mbmi))
-    return;
+  if (mbmi->skip && is_inter_block(mbmi)) return;
 #endif
 
   // Here we are adding a mask for the transform size. The transform
   // size mask is set to be correct for a 64x64 prediction block size. We
   // mask to match the size of the block we are working on and then shift it
   // into place..
-  *above_y |= (size_mask[block_size] &
-               above_64x64_txform_mask[tx_size_y]) << shift_y;
-  *above_uv |= (size_mask_uv[block_size] &
-                above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv;
+  *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y])
+              << shift_y;
+  *above_uv |=
+      (size_mask_uv[block_size] & above_64x64_txform_mask_uv[tx_size_uv])
+      << shift_uv;
 
-  *left_y |= (size_mask[block_size] &
-              left_64x64_txform_mask[tx_size_y]) << shift_y;
-  *left_uv |= (size_mask_uv[block_size] &
-               left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv;
+  *left_y |= (size_mask[block_size] & left_64x64_txform_mask[tx_size_y])
+             << shift_y;
+  *left_uv |= (size_mask_uv[block_size] & left_64x64_txform_mask_uv[tx_size_uv])
+              << shift_uv;
 
   // Here we are trying to determine what to do with the internal 4x4 block
   // boundaries.  These differ from the 4x4 boundaries on the outside edge of
@@ -822,18 +806,16 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
   *left_y |= left_prediction_mask[block_size] << shift_y;
 
 #if CONFIG_MISC_FIXES
-  if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi))
-    return;
+  if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi)) return;
 #else
-  if (mbmi->skip && is_inter_block(mbmi))
-    return;
+  if (mbmi->skip && is_inter_block(mbmi)) return;
 #endif
 
-  *above_y |= (size_mask[block_size] &
-               above_64x64_txform_mask[tx_size_y]) << shift_y;
+  *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y])
+              << shift_y;
 
-  *left_y |= (size_mask[block_size] &
-              left_64x64_txform_mask[tx_size_y]) << shift_y;
+  *left_y |= (size_mask[block_size] & left_64x64_txform_mask[tx_size_y])
+             << shift_y;
 
   if (tx_size_y == TX_4X4)
     *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffffULL) << shift_y;
@@ -843,8 +825,8 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
 // by mi_row, mi_col.
 // TODO(JBB): This function only works for yv12.
 void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
-                    MODE_INFO **mi, const int mode_info_stride,
-                    LOOP_FILTER_MASK *lfm) {
+                     MODE_INFO **mi, const int mode_info_stride,
+                     LOOP_FILTER_MASK *lfm) {
   int idx_32, idx_16, idx_8;
   const loop_filter_info_n *const lfi_n = &cm->lf_info;
   MODE_INFO **mip = mi;
@@ -854,26 +836,28 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
   // added to the mi ptr as we go through each loop. It helps us to avoid
   // setting up special row and column counters for each index. The last step
   // brings us out back to the starting position.
-  const int offset_32[] = {4, (mode_info_stride << 2) - 4, 4,
-                           -(mode_info_stride << 2) - 4};
-  const int offset_16[] = {2, (mode_info_stride << 1) - 2, 2,
-                           -(mode_info_stride << 1) - 2};
-  const int offset[] = {1, mode_info_stride - 1, 1, -mode_info_stride - 1};
+  const int offset_32[] = { 4, (mode_info_stride << 2) - 4, 4,
+                            -(mode_info_stride << 2) - 4 };
+  const int offset_16[] = { 2, (mode_info_stride << 1) - 2, 2,
+                            -(mode_info_stride << 1) - 2 };
+  const int offset[] = { 1, mode_info_stride - 1, 1, -mode_info_stride - 1 };
 
   // Following variables represent shifts to position the current block
   // mask over the appropriate block. A shift of 36 to the left will move
   // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left
   // 4 rows to the appropriate spot.
-  const int shift_32_y[] = {0, 4, 32, 36};
-  const int shift_16_y[] = {0, 2, 16, 18};
-  const int shift_8_y[] = {0, 1, 8, 9};
-  const int shift_32_uv[] = {0, 2, 8, 10};
-  const int shift_16_uv[] = {0, 1, 4, 5};
+  const int shift_32_y[] = { 0, 4, 32, 36 };
+  const int shift_16_y[] = { 0, 2, 16, 18 };
+  const int shift_8_y[] = { 0, 1, 8, 9 };
+  const int shift_32_uv[] = { 0, 2, 8, 10 };
+  const int shift_16_uv[] = { 0, 1, 4, 5 };
   int i;
-  const int max_rows = (mi_row + MI_BLOCK_SIZE > cm->mi_rows ?
-                        cm->mi_rows - mi_row : MI_BLOCK_SIZE);
-  const int max_cols = (mi_col + MI_BLOCK_SIZE > cm->mi_cols ?
-                        cm->mi_cols - mi_col : MI_BLOCK_SIZE);
+  const int max_rows =
+      (mi_row + MI_BLOCK_SIZE > cm->mi_rows ? cm->mi_rows - mi_row
+                                            : MI_BLOCK_SIZE);
+  const int max_cols =
+      (mi_col + MI_BLOCK_SIZE > cm->mi_cols ? cm->mi_cols - mi_col
+                                            : MI_BLOCK_SIZE);
 
   vp10_zero(*lfm);
   assert(mip[0] != NULL);
@@ -882,21 +866,17 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
   // loop and storing lfm in the mbmi structure so that we don't have to go
   // through the recursive loop structure multiple times.
   switch (mip[0]->mbmi.sb_type) {
-    case BLOCK_64X64:
-      build_masks(lfi_n, mip[0] , 0, 0, lfm);
-      break;
+    case BLOCK_64X64: build_masks(lfi_n, mip[0], 0, 0, lfm); break;
     case BLOCK_64X32:
       build_masks(lfi_n, mip[0], 0, 0, lfm);
       mip2 = mip + mode_info_stride * 4;
-      if (4 >= max_rows)
-        break;
+      if (4 >= max_rows) break;
       build_masks(lfi_n, mip2[0], 32, 8, lfm);
       break;
     case BLOCK_32X64:
       build_masks(lfi_n, mip[0], 0, 0, lfm);
       mip2 = mip + 4;
-      if (4 >= max_cols)
-        break;
+      if (4 >= max_cols) break;
       build_masks(lfi_n, mip2[0], 4, 2, lfm);
       break;
     default:
@@ -913,15 +893,13 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
             break;
           case BLOCK_32X16:
             build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
-            if (mi_32_row_offset + 2 >= max_rows)
-              continue;
+            if (mi_32_row_offset + 2 >= max_rows) continue;
             mip2 = mip + mode_info_stride * 2;
             build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm);
             break;
           case BLOCK_16X32:
             build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
-            if (mi_32_col_offset + 2 >= max_cols)
-              continue;
+            if (mi_32_col_offset + 2 >= max_cols) continue;
             mip2 = mip + 2;
             build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm);
             break;
@@ -929,10 +907,10 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
             for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
               const int shift_y = shift_32_y[idx_32] + shift_16_y[idx_16];
               const int shift_uv = shift_32_uv[idx_32] + shift_16_uv[idx_16];
-              const int mi_16_col_offset = mi_32_col_offset +
-                  ((idx_16 & 1) << 1);
-              const int mi_16_row_offset = mi_32_row_offset +
-                  ((idx_16 >> 1) << 1);
+              const int mi_16_col_offset =
+                  mi_32_col_offset + ((idx_16 & 1) << 1);
+              const int mi_16_row_offset =
+                  mi_32_row_offset + ((idx_16 >> 1) << 1);
 
               if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
                 continue;
@@ -943,32 +921,28 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
                   break;
                 case BLOCK_16X8:
                   build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
-                  if (mi_16_row_offset + 1 >= max_rows)
-                    continue;
+                  if (mi_16_row_offset + 1 >= max_rows) continue;
                   mip2 = mip + mode_info_stride;
-                  build_y_mask(lfi_n, mip2[0], shift_y+8, lfm);
+                  build_y_mask(lfi_n, mip2[0], shift_y + 8, lfm);
                   break;
                 case BLOCK_8X16:
                   build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
-                  if (mi_16_col_offset +1 >= max_cols)
-                    continue;
+                  if (mi_16_col_offset + 1 >= max_cols) continue;
                   mip2 = mip + 1;
-                  build_y_mask(lfi_n, mip2[0], shift_y+1, lfm);
+                  build_y_mask(lfi_n, mip2[0], shift_y + 1, lfm);
                   break;
                 default: {
-                  const int shift_y = shift_32_y[idx_32] +
-                                      shift_16_y[idx_16] +
-                                      shift_8_y[0];
+                  const int shift_y =
+                      shift_32_y[idx_32] + shift_16_y[idx_16] + shift_8_y[0];
                   build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
                   mip += offset[0];
                   for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
                     const int shift_y = shift_32_y[idx_32] +
-                                        shift_16_y[idx_16] +
-                                        shift_8_y[idx_8];
-                    const int mi_8_col_offset = mi_16_col_offset +
-                        ((idx_8 & 1));
-                    const int mi_8_row_offset = mi_16_row_offset +
-                        ((idx_8 >> 1));
+                                        shift_16_y[idx_16] + shift_8_y[idx_8];
+                    const int mi_8_col_offset =
+                        mi_16_col_offset + ((idx_8 & 1));
+                    const int mi_8_row_offset =
+                        mi_16_row_offset + ((idx_8 >> 1));
 
                     if (mi_8_col_offset >= max_cols ||
                         mi_8_row_offset >= max_rows)
@@ -1008,8 +982,8 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
     const uint64_t rows = cm->mi_rows - mi_row;
 
     // Each pixel inside the border gets a 1,
-    const uint64_t mask_y = (((uint64_t) 1 << (rows << 3)) - 1);
-    const uint16_t mask_uv = (((uint16_t) 1 << (((rows + 1) >> 1) << 2)) - 1);
+    const uint64_t mask_y = (((uint64_t)1 << (rows << 3)) - 1);
+    const uint16_t mask_uv = (((uint16_t)1 << (((rows + 1) >> 1) << 2)) - 1);
 
     // Remove values completely outside our border.
     for (i = 0; i < TX_32X32; i++) {
@@ -1042,7 +1016,7 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
 
     // Each pixel inside the border gets a 1, the multiply copies the border
     // to where we need it.
-    const uint64_t mask_y  = (((1 << columns) - 1)) * 0x0101010101010101ULL;
+    const uint64_t mask_y = (((1 << columns) - 1)) * 0x0101010101010101ULL;
     const uint16_t mask_uv = ((1 << ((columns + 1) >> 1)) - 1) * 0x1111;
 
     // Internal edges are not applied on the last column of the image so
@@ -1088,7 +1062,7 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
   assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4]));
   assert(!(lfm->left_y[TX_8X8] & lfm->left_y[TX_4X4]));
   assert(!(lfm->int_4x4_y & lfm->left_y[TX_16X16]));
-  assert(!(lfm->left_uv[TX_16X16]&lfm->left_uv[TX_8X8]));
+  assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_8X8]));
   assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4]));
   assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4]));
 #if CONFIG_MISC_FIXES
@@ -1110,17 +1084,14 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
 #endif
 }
 
-static void filter_selectively_vert(uint8_t *s, int pitch,
-                                    unsigned int mask_16x16,
-                                    unsigned int mask_8x8,
-                                    unsigned int mask_4x4,
-                                    unsigned int mask_4x4_int,
-                                    const loop_filter_info_n *lfi_n,
-                                    const uint8_t *lfl) {
+static void filter_selectively_vert(
+    uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
+    unsigned int mask_4x4, unsigned int mask_4x4_int,
+    const loop_filter_info_n *lfi_n, const uint8_t *lfl) {
   unsigned int mask;
 
-  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
-       mask; mask >>= 1) {
+  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask;
+       mask >>= 1) {
     const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
 
     if (mask & 1) {
@@ -1144,29 +1115,26 @@ static void filter_selectively_vert(uint8_t *s, int pitch,
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static void highbd_filter_selectively_vert(uint16_t *s, int pitch,
-                                           unsigned int mask_16x16,
-                                           unsigned int mask_8x8,
-                                           unsigned int mask_4x4,
-                                           unsigned int mask_4x4_int,
-                                           const loop_filter_info_n *lfi_n,
-                                           const uint8_t *lfl, int bd) {
+static void highbd_filter_selectively_vert(
+    uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
+    unsigned int mask_4x4, unsigned int mask_4x4_int,
+    const loop_filter_info_n *lfi_n, const uint8_t *lfl, int bd) {
   unsigned int mask;
 
-  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
-       mask; mask >>= 1) {
+  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask;
+       mask >>= 1) {
     const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
 
     if (mask & 1) {
       if (mask_16x16 & 1) {
-        vpx_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim,
-                                   lfi->hev_thr, bd);
+        vpx_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+                                   bd);
       } else if (mask_8x8 & 1) {
-        vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim,
-                                  lfi->hev_thr, 1, bd);
+        vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+                                  1, bd);
       } else if (mask_4x4 & 1) {
-        vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim,
-                                lfi->hev_thr, 1, bd);
+        vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
+                                  1, bd);
       }
     }
     if (mask_4x4_int & 1)
@@ -1183,20 +1151,20 @@ static void highbd_filter_selectively_vert(uint16_t *s, int pitch,
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 void vp10_filter_block_plane_non420(VP10_COMMON *cm,
-                                   struct macroblockd_plane *plane,
-                                   MODE_INFO **mi_8x8,
-                                   int mi_row, int mi_col) {
+                                    struct macroblockd_plane *plane,
+                                    MODE_INFO **mi_8x8, int mi_row,
+                                    int mi_col) {
   const int ss_x = plane->subsampling_x;
   const int ss_y = plane->subsampling_y;
   const int row_step = 1 << ss_y;
   const int col_step = 1 << ss_x;
   const int row_step_stride = cm->mi_stride * row_step;
   struct buf_2d *const dst = &plane->dst;
-  uint8_t* const dst0 = dst->buf;
-  unsigned int mask_16x16[MI_BLOCK_SIZE] = {0};
-  unsigned int mask_8x8[MI_BLOCK_SIZE] = {0};
-  unsigned int mask_4x4[MI_BLOCK_SIZE] = {0};
-  unsigned int mask_4x4_int[MI_BLOCK_SIZE] = {0};
+  uint8_t *const dst0 = dst->buf;
+  unsigned int mask_16x16[MI_BLOCK_SIZE] = { 0 };
+  unsigned int mask_8x8[MI_BLOCK_SIZE] = { 0 };
+  unsigned int mask_4x4[MI_BLOCK_SIZE] = { 0 };
+  unsigned int mask_4x4_int[MI_BLOCK_SIZE] = { 0 };
   uint8_t lfl[MI_BLOCK_SIZE * MI_BLOCK_SIZE];
   int r, c;
 
@@ -1212,22 +1180,26 @@ void vp10_filter_block_plane_non420(VP10_COMMON *cm,
       const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type;
       const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi);
       // left edge of current unit is block/partition edge -> no skip
-      const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ?
-          !(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1;
+      const int block_edge_left =
+          (num_4x4_blocks_wide_lookup[sb_type] > 1)
+              ? !(c & (num_8x8_blocks_wide_lookup[sb_type] - 1))
+              : 1;
       const int skip_this_c = skip_this && !block_edge_left;
       // top edge of current unit is block/partition edge -> no skip
-      const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ?
-          !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1;
+      const int block_edge_above =
+          (num_4x4_blocks_high_lookup[sb_type] > 1)
+              ? !(r & (num_8x8_blocks_high_lookup[sb_type] - 1))
+              : 1;
       const int skip_this_r = skip_this && !block_edge_above;
       const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV)
-                            ? get_uv_tx_size(&mi[0].mbmi, plane)
-                            : mi[0].mbmi.tx_size;
+                                  ? get_uv_tx_size(&mi[0].mbmi, plane)
+                                  : mi[0].mbmi.tx_size;
       const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
       const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
 
       // Filter level can vary per MI
       if (!(lfl[(r << 3) + (c >> ss_x)] =
-            get_filter_level(&cm->lf_info, &mi[0].mbmi)))
+                get_filter_level(&cm->lf_info, &mi[0].mbmi)))
         continue;
 
       // Build masks based on the transform size of each block
@@ -1282,29 +1254,21 @@ void vp10_filter_block_plane_non420(VP10_COMMON *cm,
     border_mask = ~(mi_col == 0);
 #if CONFIG_VPX_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
-      highbd_filter_selectively_vert(CONVERT_TO_SHORTPTR(dst->buf),
-                                     dst->stride,
-                                     mask_16x16_c & border_mask,
-                                     mask_8x8_c & border_mask,
-                                     mask_4x4_c & border_mask,
-                                     mask_4x4_int[r],
-                                     &cm->lf_info, &lfl[r << 3],
-                                     (int)cm->bit_depth);
+      highbd_filter_selectively_vert(
+          CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
+          mask_16x16_c & border_mask, mask_8x8_c & border_mask,
+          mask_4x4_c & border_mask, mask_4x4_int[r], &cm->lf_info, &lfl[r << 3],
+          (int)cm->bit_depth);
     } else {
-      filter_selectively_vert(dst->buf, dst->stride,
-                              mask_16x16_c & border_mask,
+      filter_selectively_vert(dst->buf, dst->stride, mask_16x16_c & border_mask,
                               mask_8x8_c & border_mask,
-                              mask_4x4_c & border_mask,
-                              mask_4x4_int[r],
+                              mask_4x4_c & border_mask, mask_4x4_int[r],
                               &cm->lf_info, &lfl[r << 3]);
     }
 #else
-    filter_selectively_vert(dst->buf, dst->stride,
-                            mask_16x16_c & border_mask,
-                            mask_8x8_c & border_mask,
-                            mask_4x4_c & border_mask,
-                            mask_4x4_int[r],
-                            &cm->lf_info, &lfl[r << 3]);
+    filter_selectively_vert(dst->buf, dst->stride, mask_16x16_c & border_mask,
+                            mask_8x8_c & border_mask, mask_4x4_c & border_mask,
+                            mask_4x4_int[r], &cm->lf_info, &lfl[r << 3]);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
     dst->buf += 8 * dst->stride;
     mi_8x8 += row_step_stride;
@@ -1332,37 +1296,26 @@ void vp10_filter_block_plane_non420(VP10_COMMON *cm,
 #if CONFIG_VPX_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
-                                      dst->stride,
-                                      mask_16x16_r,
-                                      mask_8x8_r,
-                                      mask_4x4_r,
-                                      mask_4x4_int_r,
-                                      &cm->lf_info, &lfl[r << 3],
-                                      (int)cm->bit_depth);
+                                      dst->stride, mask_16x16_r, mask_8x8_r,
+                                      mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
+                                      &lfl[r << 3], (int)cm->bit_depth);
     } else {
-      filter_selectively_horiz(dst->buf, dst->stride,
-                               mask_16x16_r,
-                               mask_8x8_r,
-                               mask_4x4_r,
-                               mask_4x4_int_r,
-                               &cm->lf_info, &lfl[r << 3]);
+      filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
+                               mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
+                               &lfl[r << 3]);
     }
 #else
-    filter_selectively_horiz(dst->buf, dst->stride,
-                             mask_16x16_r,
-                             mask_8x8_r,
-                             mask_4x4_r,
-                             mask_4x4_int_r,
-                             &cm->lf_info, &lfl[r << 3]);
+    filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
+                             mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
+                             &lfl[r << 3]);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
     dst->buf += 8 * dst->stride;
   }
 }
 
 void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
-                                 struct macroblockd_plane *const plane,
-                                 int mi_row,
-                                 LOOP_FILTER_MASK *lfm) {
+                                  struct macroblockd_plane *const plane,
+                                  int mi_row, LOOP_FILTER_MASK *lfm) {
   struct buf_2d *const dst = &plane->dst;
   uint8_t *const dst0 = dst->buf;
   int r;
@@ -1452,9 +1405,8 @@ void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
 }
 
 void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
-                                 struct macroblockd_plane *const plane,
-                                 int mi_row,
-                                 LOOP_FILTER_MASK *lfm) {
+                                  struct macroblockd_plane *const plane,
+                                  int mi_row, LOOP_FILTER_MASK *lfm) {
   struct buf_2d *const dst = &plane->dst;
   uint8_t *const dst0 = dst->buf;
   int r, c;
@@ -1493,16 +1445,15 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
             mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info,
             &lfm->lfl_uv[r << 1], (int)cm->bit_depth);
       } else {
-        filter_selectively_vert_row2(
-            plane->subsampling_x, dst->buf, dst->stride,
-            mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info,
-            &lfm->lfl_uv[r << 1]);
+        filter_selectively_vert_row2(plane->subsampling_x, dst->buf,
+                                     dst->stride, mask_16x16_l, mask_8x8_l,
+                                     mask_4x4_l, mask_4x4_int_l, &cm->lf_info,
+                                     &lfm->lfl_uv[r << 1]);
       }
 #else
       filter_selectively_vert_row2(
-          plane->subsampling_x, dst->buf, dst->stride,
-          mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info,
-          &lfm->lfl_uv[r << 1]);
+          plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
+          mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_uv[r << 1]);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
       dst->buf += 16 * dst->stride;
@@ -1567,10 +1518,9 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
   }
 }
 
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
-                          VP10_COMMON *cm,
-                          struct macroblockd_plane planes[MAX_MB_PLANE],
-                          int start, int stop, int y_only) {
+void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP10_COMMON *cm,
+                           struct macroblockd_plane planes[MAX_MB_PLANE],
+                           int start, int stop, int y_only) {
   const int num_planes = y_only ? 1 : MAX_MB_PLANE;
   enum lf_path path;
   LOOP_FILTER_MASK lfm;
@@ -1594,8 +1544,7 @@ void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
       vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
       // TODO(JBB): Make setup_mask work for non 420.
-      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride,
-                     &lfm);
+      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
 
       vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
       for (plane = 1; plane < num_planes; ++plane) {
@@ -1608,7 +1557,7 @@ void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
             break;
           case LF_PATH_SLOW:
             vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
-                                          mi_row, mi_col);
+                                           mi_row, mi_col);
             break;
         }
       }
@@ -1616,10 +1565,9 @@ void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
   }
 }
 
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame,
-                           VP10_COMMON *cm, MACROBLOCKD *xd,
-                           int frame_filter_level,
-                           int y_only, int partial_frame) {
+void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+                            MACROBLOCKD *xd, int frame_filter_level, int y_only,
+                            int partial_frame) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
   if (!frame_filter_level) return;
   start_mi_row = 0;
@@ -1631,9 +1579,7 @@ void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame,
   }
   end_mi_row = start_mi_row + mi_rows_to_filter;
   vp10_loop_filter_frame_init(cm, frame_filter_level);
-  vp10_loop_filter_rows(frame, cm, xd->plane,
-                       start_mi_row, end_mi_row,
-                       y_only);
+  vp10_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
 }
 
 void vp10_loop_filter_data_reset(
@@ -1651,6 +1597,6 @@ void vp10_loop_filter_data_reset(
 int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
   (void)unused;
   vp10_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
-                       lf_data->start, lf_data->stop, lf_data->y_only);
+                        lf_data->start, lf_data->stop, lf_data->y_only);
   return 1;
 }
diff --git a/vp10/common/loopfilter.h b/vp10/common/loopfilter.h
index 51aae0734ac2dfc74311c1a536ef731e8079d6b3..18aa7a11996738c4e4229a8dda9cd407e39c1492 100644
--- a/vp10/common/loopfilter.h
+++ b/vp10/common/loopfilter.h
@@ -26,7 +26,7 @@ extern "C" {
 
 #define SIMD_WIDTH 16
 
-#define MAX_MODE_LF_DELTAS      2
+#define MAX_MODE_LF_DELTAS 2
 
 enum lf_path {
   LF_PATH_420,
@@ -97,44 +97,39 @@ struct VP10LfSyncData;
 
 // This function sets up the bit masks for the entire 64x64 region represented
 // by mi_row, mi_col.
-void vp10_setup_mask(struct VP10Common *const cm,
-                    const int mi_row, const int mi_col,
-                    MODE_INFO **mi_8x8, const int mode_info_stride,
-                    LOOP_FILTER_MASK *lfm);
+void vp10_setup_mask(struct VP10Common *const cm, const int mi_row,
+                     const int mi_col, MODE_INFO **mi_8x8,
+                     const int mode_info_stride, LOOP_FILTER_MASK *lfm);
 
 void vp10_filter_block_plane_ss00(struct VP10Common *const cm,
-                                 struct macroblockd_plane *const plane,
-                                 int mi_row,
-                                 LOOP_FILTER_MASK *lfm);
+                                  struct macroblockd_plane *const plane,
+                                  int mi_row, LOOP_FILTER_MASK *lfm);
 
 void vp10_filter_block_plane_ss11(struct VP10Common *const cm,
-                                 struct macroblockd_plane *const plane,
-                                 int mi_row,
-                                 LOOP_FILTER_MASK *lfm);
+                                  struct macroblockd_plane *const plane,
+                                  int mi_row, LOOP_FILTER_MASK *lfm);
 
 void vp10_filter_block_plane_non420(struct VP10Common *cm,
-                                   struct macroblockd_plane *plane,
-                                   MODE_INFO **mi_8x8,
-                                   int mi_row, int mi_col);
+                                    struct macroblockd_plane *plane,
+                                    MODE_INFO **mi_8x8, int mi_row, int mi_col);
 
 void vp10_loop_filter_init(struct VP10Common *cm);
 
 // Update the loop filter for the current frame.
-// This should be called before vp10_loop_filter_rows(), vp10_loop_filter_frame()
+// This should be called before vp10_loop_filter_rows(),
+// vp10_loop_filter_frame()
 // calls this function directly.
 void vp10_loop_filter_frame_init(struct VP10Common *cm, int default_filt_lvl);
 
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame,
-                           struct VP10Common *cm,
-                           struct macroblockd *mbd,
-                           int filter_level,
-                           int y_only, int partial_frame);
+void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
+                            struct macroblockd *mbd, int filter_level,
+                            int y_only, int partial_frame);
 
 // Apply the loop filter to [start, stop) macro block rows in frame_buffer.
 void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
-                          struct VP10Common *cm,
-                          struct macroblockd_plane planes[MAX_MB_PLANE],
-                          int start, int stop, int y_only);
+                           struct VP10Common *cm,
+                           struct macroblockd_plane planes[MAX_MB_PLANE],
+                           int start, int stop, int y_only);
 
 typedef struct LoopFilterWorkerData {
   YV12_BUFFER_CONFIG *frame_buffer;
diff --git a/vp10/common/mips/dspr2/itrans16_dspr2.c b/vp10/common/mips/dspr2/itrans16_dspr2.c
index 3d1bd3d906d60328475ed0821a0ea30dea4fd4d9..0fcae870ed5ad4b532db111cf7b50458e036235f 100644
--- a/vp10/common/mips/dspr2/itrans16_dspr2.c
+++ b/vp10/common/mips/dspr2/itrans16_dspr2.c
@@ -21,27 +21,23 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
-                                int pitch, int tx_type) {
+void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
+                                 int tx_type) {
   int i, j;
-  DECLARE_ALIGNED(32, int16_t,  out[16 * 16]);
+  DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   int16_t *outptr = out;
   int16_t temp_out[16];
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp    %[pos],    1    \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp    %[pos],    1    \n\t" : : [pos] "r"(pos));
 
   switch (tx_type) {
-    case DCT_DCT:     // DCT in both horizontal and vertical
+    case DCT_DCT:  // DCT in both horizontal and vertical
       idct16_rows_dspr2(input, outptr, 16);
       idct16_cols_add_blk_dspr2(out, dest, pitch);
       break;
-    case ADST_DCT:    // ADST in vertical, DCT in horizontal
+    case ADST_DCT:  // ADST in vertical, DCT in horizontal
       idct16_rows_dspr2(input, outptr, 16);
 
       outptr = out;
@@ -50,13 +46,12 @@ void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
         iadst16_dspr2(outptr, temp_out);
 
         for (j = 0; j < 16; ++j)
-          dest[j * pitch + i] =
-                    clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
-                                      + dest[j * pitch + i]);
+          dest[j * pitch + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) +
+                                           dest[j * pitch + i]);
         outptr += 16;
       }
       break;
-    case DCT_ADST:    // DCT in vertical, ADST in horizontal
+    case DCT_ADST:  // DCT in vertical, ADST in horizontal
     {
       int16_t temp_in[16 * 16];
 
@@ -70,13 +65,11 @@ void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
       }
 
       for (i = 0; i < 16; ++i)
-        for (j = 0; j < 16; ++j)
-            temp_in[j * 16 + i] = out[i * 16 + j];
+        for (j = 0; j < 16; ++j) temp_in[j * 16 + i] = out[i * 16 + j];
 
       idct16_cols_add_blk_dspr2(temp_in, dest, pitch);
-    }
-    break;
-    case ADST_ADST:   // ADST in both directions
+    } break;
+    case ADST_ADST:  // ADST in both directions
     {
       int16_t temp_in[16];
 
@@ -90,19 +83,14 @@ void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
       }
 
       for (i = 0; i < 16; ++i) {
-        for (j = 0; j < 16; ++j)
-          temp_in[j] = out[j * 16 + i];
+        for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
         iadst16_dspr2(temp_in, temp_out);
         for (j = 0; j < 16; ++j)
-          dest[j * pitch + i] =
-                    clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
-                                      + dest[j * pitch + i]);
+          dest[j * pitch + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) +
+                                           dest[j * pitch + i]);
       }
-    }
-    break;
-    default:
-      printf("vp10_short_iht16x16_add_dspr2 : Invalid tx_type\n");
-      break;
+    } break;
+    default: printf("vp10_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/vp10/common/mips/dspr2/itrans4_dspr2.c b/vp10/common/mips/dspr2/itrans4_dspr2.c
index 5249287b85ecdd3238ec49a9cfd0e2499670724c..9d10d5e83b722ae0aa8a1f541fefa78e8b346189 100644
--- a/vp10/common/mips/dspr2/itrans4_dspr2.c
+++ b/vp10/common/mips/dspr2/itrans4_dspr2.c
@@ -22,7 +22,7 @@
 
 #if HAVE_DSPR2
 void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
-                             int dest_stride, int tx_type) {
+                              int dest_stride, int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[4 * 4]);
   int16_t *outptr = out;
@@ -30,14 +30,12 @@ void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
   switch (tx_type) {
-    case DCT_DCT:   // DCT in both horizontal and vertical
+    case DCT_DCT:  // DCT in both horizontal and vertical
       vpx_idct4_rows_dspr2(input, outptr);
       vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride);
       break;
@@ -50,9 +48,8 @@ void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
         iadst4_dspr2(outptr, temp_out);
 
         for (j = 0; j < 4; ++j)
-          dest[j * dest_stride + i] =
-                    clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4)
-                                      + dest[j * dest_stride + i]);
+          dest[j * dest_stride + i] = clip_pixel(
+              ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]);
 
         outptr += 4;
       }
@@ -60,7 +57,7 @@ void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
     case DCT_ADST:  // DCT in vertical, ADST in horizontal
       for (i = 0; i < 4; ++i) {
         iadst4_dspr2(input, outptr);
-        input  += 4;
+        input += 4;
         outptr += 4;
       }
 
@@ -74,24 +71,20 @@ void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
     case ADST_ADST:  // ADST in both directions
       for (i = 0; i < 4; ++i) {
         iadst4_dspr2(input, outptr);
-        input  += 4;
+        input += 4;
         outptr += 4;
       }
 
       for (i = 0; i < 4; ++i) {
-        for (j = 0; j < 4; ++j)
-          temp_in[j] = out[j * 4 + i];
+        for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
         iadst4_dspr2(temp_in, temp_out);
 
         for (j = 0; j < 4; ++j)
-          dest[j * dest_stride + i] =
-                  clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4)
-                                      + dest[j * dest_stride + i]);
+          dest[j * dest_stride + i] = clip_pixel(
+              ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]);
       }
       break;
-    default:
-      printf("vp10_short_iht4x4_add_dspr2 : Invalid tx_type\n");
-      break;
+    default: printf("vp10_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/vp10/common/mips/dspr2/itrans8_dspr2.c b/vp10/common/mips/dspr2/itrans8_dspr2.c
index b25b93aee0552fe86e62c88775e4645766f9d7d7..3ebf8ce90cbcec09b2b0b893fb29de4eb8f5e4a9 100644
--- a/vp10/common/mips/dspr2/itrans8_dspr2.c
+++ b/vp10/common/mips/dspr2/itrans8_dspr2.c
@@ -21,7 +21,7 @@
 
 #if HAVE_DSPR2
 void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
-                             int dest_stride, int tx_type) {
+                              int dest_stride, int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
   int16_t *outptr = out;
@@ -29,30 +29,25 @@ void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp    %[pos],    1    \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp    %[pos],    1    \n\t" : : [pos] "r"(pos));
 
   switch (tx_type) {
-    case DCT_DCT:     // DCT in both horizontal and vertical
+    case DCT_DCT:  // DCT in both horizontal and vertical
       idct8_rows_dspr2(input, outptr, 8);
       idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride);
       break;
-    case ADST_DCT:    // ADST in vertical, DCT in horizontal
+    case ADST_DCT:  // ADST in vertical, DCT in horizontal
       idct8_rows_dspr2(input, outptr, 8);
 
       for (i = 0; i < 8; ++i) {
         iadst8_dspr2(&out[i * 8], temp_out);
 
         for (j = 0; j < 8; ++j)
-          dest[j * dest_stride + i] =
-                    clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5)
-                                      + dest[j * dest_stride + i]);
+          dest[j * dest_stride + i] = clip_pixel(
+              ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]);
       }
       break;
-    case DCT_ADST:    // DCT in vertical, ADST in horizontal
+    case DCT_ADST:  // DCT in vertical, ADST in horizontal
       for (i = 0; i < 8; ++i) {
         iadst8_dspr2(input, outptr);
         input += 8;
@@ -66,7 +61,7 @@ void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
       }
       idct8_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride);
       break;
-    case ADST_ADST:   // ADST in both directions
+    case ADST_ADST:  // ADST in both directions
       for (i = 0; i < 8; ++i) {
         iadst8_dspr2(input, outptr);
         input += 8;
@@ -74,20 +69,16 @@ void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
       }
 
       for (i = 0; i < 8; ++i) {
-        for (j = 0; j < 8; ++j)
-          temp_in[j] = out[j * 8 + i];
+        for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
 
         iadst8_dspr2(temp_in, temp_out);
 
         for (j = 0; j < 8; ++j)
-          dest[j * dest_stride + i] =
-                clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5)
-                                      + dest[j * dest_stride + i]);
+          dest[j * dest_stride + i] = clip_pixel(
+              ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]);
       }
       break;
-    default:
-      printf("vp10_short_iht8x8_add_dspr2 : Invalid tx_type\n");
-      break;
+    default: printf("vp10_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/vp10/common/mips/msa/idct16x16_msa.c b/vp10/common/mips/msa/idct16x16_msa.c
index a89e41b3dd93af0d83ca5ba590049e43719008b8..c73ef3764b5158e3ac7201416a0257773bb684e5 100644
--- a/vp10/common/mips/msa/idct16x16_msa.c
+++ b/vp10/common/mips/msa/idct16x16_msa.c
@@ -14,7 +14,7 @@
 #include "vpx_dsp/mips/inv_txfm_msa.h"
 
 void vp10_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
-                              int32_t dst_stride, int32_t tx_type) {
+                               int32_t dst_stride, int32_t tx_type) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   int16_t *out_ptr = &out[0];
@@ -74,8 +74,6 @@ void vp10_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
                                           (dst + (i << 3)), dst_stride);
       }
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
diff --git a/vp10/common/mips/msa/idct4x4_msa.c b/vp10/common/mips/msa/idct4x4_msa.c
index e38889f27539c67c17c99cae6369beb3bf3b46ca..ea4091b6a68b2ce9123ef9c328c5fa85569ce084 100644
--- a/vp10/common/mips/msa/idct4x4_msa.c
+++ b/vp10/common/mips/msa/idct4x4_msa.c
@@ -14,7 +14,7 @@
 #include "vpx_dsp/mips/inv_txfm_msa.h"
 
 void vp10_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
-                           int32_t dst_stride, int32_t tx_type) {
+                            int32_t dst_stride, int32_t tx_type) {
   v8i16 in0, in1, in2, in3;
 
   /* load vector elements of 4x4 block */
@@ -50,9 +50,7 @@ void vp10_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
       VPX_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 
   /* final rounding (add 2^3, divide by 2^4) and shift */
diff --git a/vp10/common/mips/msa/idct8x8_msa.c b/vp10/common/mips/msa/idct8x8_msa.c
index ede6751c4ee359d0880a69b9787aa5877ecbc230..c62e82d59343cf64926fed3a59eabacdc5de58e5 100644
--- a/vp10/common/mips/msa/idct8x8_msa.c
+++ b/vp10/common/mips/msa/idct8x8_msa.c
@@ -14,59 +14,57 @@
 #include "vpx_dsp/mips/inv_txfm_msa.h"
 
 void vp10_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
-                           int32_t dst_stride, int32_t tx_type) {
+                            int32_t dst_stride, int32_t tx_type) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
   /* load vector elements of 8x8 block */
   LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
 
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
 
   switch (tx_type) {
     case DCT_DCT:
       /* DCT in horizontal */
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
       /* DCT in vertical */
-      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                         in0, in1, in2, in3, in4, in5, in6, in7);
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
       break;
     case ADST_DCT:
       /* DCT in horizontal */
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
       /* ADST in vertical */
-      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                         in0, in1, in2, in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
       break;
     case DCT_ADST:
       /* ADST in horizontal */
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
       /* DCT in vertical */
-      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                         in0, in1, in2, in3, in4, in5, in6, in7);
-      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
       break;
     case ADST_ADST:
       /* ADST in horizontal */
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
       /* ADST in vertical */
-      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                         in0, in1, in2, in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
-      break;
-    default:
-      assert(0);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
       break;
+    default: assert(0); break;
   }
 
   /* final rounding (add 2^4, divide by 2^5) and shift */
diff --git a/vp10/common/mv.h b/vp10/common/mv.h
index b4971a567ef35a46fa953a5a4307e8fbb900b606..96656aad1fd38a40c7e7b1af14fcdeeaaf95d390 100644
--- a/vp10/common/mv.h
+++ b/vp10/common/mv.h
@@ -39,11 +39,11 @@ static INLINE int is_zero_mv(const MV *mv) {
 }
 
 static INLINE int is_equal_mv(const MV *a, const MV *b) {
-  return  *((const uint32_t *)a) == *((const uint32_t *)b);
+  return *((const uint32_t *)a) == *((const uint32_t *)b);
 }
 
-static INLINE void clamp_mv(MV *mv, int min_col, int max_col,
-                            int min_row, int max_row) {
+static INLINE void clamp_mv(MV *mv, int min_col, int max_col, int min_row,
+                            int max_row) {
   mv->col = clamp(mv->col, min_col, max_col);
   mv->row = clamp(mv->row, min_row, max_row);
 }
diff --git a/vp10/common/mvref_common.c b/vp10/common/mvref_common.c
index 1ef80c21aae3dd782778f903955e646510bcb00b..198bbf5bc0eeb9e2b2e38380c726f8c67c8b9158 100644
--- a/vp10/common/mvref_common.c
+++ b/vp10/common/mvref_common.c
@@ -15,17 +15,18 @@
 // to try and find candidate reference vectors.
 static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
                              MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
-                             int_mv *mv_ref_list,
-                             int block, int mi_row, int mi_col,
-                             find_mv_refs_sync sync, void *const data,
-                             uint8_t *mode_context) {
+                             int_mv *mv_ref_list, int block, int mi_row,
+                             int mi_col, find_mv_refs_sync sync,
+                             void *const data, uint8_t *mode_context) {
   const int *ref_sign_bias = cm->ref_frame_sign_bias;
   int i, refmv_count = 0;
   const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
   int different_ref_found = 0;
   int context_counter = 0;
-  const MV_REF *const  prev_frame_mvs = cm->use_prev_frame_mvs ?
-      cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL;
+  const MV_REF *const prev_frame_mvs =
+      cm->use_prev_frame_mvs
+          ? cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col
+          : NULL;
   const TileInfo *const tile = &xd->tile;
   const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type] << 3;
   const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type] << 3;
@@ -41,8 +42,8 @@ static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
   for (i = 0; i < 2; ++i) {
     const POSITION *const mv_ref = &mv_ref_search[i];
     if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
-      const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
-                                                   xd->mi_stride];
+      const MODE_INFO *const candidate_mi =
+          xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
       const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
       // Keep counts for entropy encoding.
       context_counter += mode_2_counter[candidate->mode];
@@ -63,27 +64,27 @@ static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
   for (; i < MVREF_NEIGHBOURS; ++i) {
     const POSITION *const mv_ref = &mv_ref_search[i];
     if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
-      const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row *
-                                                    xd->mi_stride]->mbmi;
+      const MB_MODE_INFO *const candidate =
+          &xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
       different_ref_found = 1;
 
       if (candidate->ref_frame[0] == ref_frame)
-        ADD_MV_REF_LIST(candidate->mv[0], refmv_count, mv_ref_list,
-                        bw, bh, xd, Done);
+        ADD_MV_REF_LIST(candidate->mv[0], refmv_count, mv_ref_list, bw, bh, xd,
+                        Done);
       else if (candidate->ref_frame[1] == ref_frame)
-        ADD_MV_REF_LIST(candidate->mv[1], refmv_count, mv_ref_list,
-                        bw, bh, xd, Done);
+        ADD_MV_REF_LIST(candidate->mv[1], refmv_count, mv_ref_list, bw, bh, xd,
+                        Done);
     }
   }
 
-  // TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast
-  // on windows platform. The sync here is unncessary if use_perv_frame_mvs
-  // is 0. But after removing it, there will be hang in the unit test on windows
-  // due to several threads waiting for a thread's signal.
+// TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast
+// on windows platform. The sync here is unncessary if use_perv_frame_mvs
+// is 0. But after removing it, there will be hang in the unit test on windows
+// due to several threads waiting for a thread's signal.
 #if defined(_WIN32) && !HAVE_PTHREAD_H
-    if (cm->frame_parallel_decode && sync != NULL) {
-      sync(data, mi_row);
-    }
+  if (cm->frame_parallel_decode && sync != NULL) {
+    sync(data, mi_row);
+  }
 #endif
 
   // Check the last frame's mode and mv info.
@@ -94,11 +95,11 @@ static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
     }
 
     if (prev_frame_mvs->ref_frame[0] == ref_frame) {
-      ADD_MV_REF_LIST(prev_frame_mvs->mv[0], refmv_count, mv_ref_list,
-                      bw, bh, xd, Done);
+      ADD_MV_REF_LIST(prev_frame_mvs->mv[0], refmv_count, mv_ref_list, bw, bh,
+                      xd, Done);
     } else if (prev_frame_mvs->ref_frame[1] == ref_frame) {
-      ADD_MV_REF_LIST(prev_frame_mvs->mv[1], refmv_count, mv_ref_list,
-                      bw, bh, xd, Done);
+      ADD_MV_REF_LIST(prev_frame_mvs->mv[1], refmv_count, mv_ref_list, bw, bh,
+                      xd, Done);
     }
   }
 
@@ -109,8 +110,8 @@ static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
     for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
       const POSITION *mv_ref = &mv_ref_search[i];
       if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
-        const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row
-                                              * xd->mi_stride]->mbmi;
+        const MB_MODE_INFO *const candidate =
+            &xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
 
         // If the candidate is INTRA we don't want to consider its mv.
         IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias,
@@ -147,13 +148,13 @@ static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
     }
   }
 
- Done:
+Done:
 
   mode_context[ref_frame] = counter_to_context[context_counter];
 
 #if CONFIG_MISC_FIXES
   for (i = refmv_count; i < MAX_MV_REF_CANDIDATES; ++i)
-      mv_ref_list[i].as_int = 0;
+    mv_ref_list[i].as_int = 0;
 #else
   // Clamp vectors
   for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
@@ -162,28 +163,24 @@ static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
 }
 
 void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                      MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
-                      int_mv *mv_ref_list,
-                      int mi_row, int mi_col,
-                      find_mv_refs_sync sync, void *const data,
-                      uint8_t *mode_context) {
-  find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1,
-                   mi_row, mi_col, sync, data, mode_context);
+                       MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+                       int_mv *mv_ref_list, int mi_row, int mi_col,
+                       find_mv_refs_sync sync, void *const data,
+                       uint8_t *mode_context) {
+  find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col, sync,
+                   data, mode_context);
 }
 
 static void lower_mv_precision(MV *mv, int allow_hp) {
   const int use_hp = allow_hp && vp10_use_mv_hp(mv);
   if (!use_hp) {
-    if (mv->row & 1)
-      mv->row += (mv->row > 0 ? -1 : 1);
-    if (mv->col & 1)
-      mv->col += (mv->col > 0 ? -1 : 1);
+    if (mv->row & 1) mv->row += (mv->row > 0 ? -1 : 1);
+    if (mv->col & 1) mv->col += (mv->col > 0 ? -1 : 1);
   }
 }
 
-void vp10_find_best_ref_mvs(int allow_hp,
-                           int_mv *mvlist, int_mv *nearest_mv,
-                           int_mv *near_mv) {
+void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+                            int_mv *near_mv) {
   int i;
   // Make sure all the candidates are properly clamped etc
   for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
@@ -193,10 +190,10 @@ void vp10_find_best_ref_mvs(int allow_hp,
   *near_mv = mvlist[1];
 }
 
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                   int block, int ref, int mi_row, int mi_col,
-                                   int_mv *nearest_mv, int_mv *near_mv,
-                                   uint8_t *mode_context) {
+void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
+                                    int ref, int mi_row, int mi_col,
+                                    int_mv *nearest_mv, int_mv *near_mv,
+                                    uint8_t *mode_context) {
   int_mv mv_list[MAX_MV_REF_CANDIDATES];
   MODE_INFO *const mi = xd->mi[0];
   b_mode_info *bmi = mi->bmi;
@@ -204,8 +201,8 @@ void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd,
 
   assert(MAX_MV_REF_CANDIDATES == 2);
 
-  find_mv_refs_idx(cm, xd, mi, mi->mbmi.ref_frame[ref], mv_list, block,
-                   mi_row, mi_col, NULL, NULL, mode_context);
+  find_mv_refs_idx(cm, xd, mi, mi->mbmi.ref_frame[ref], mv_list, block, mi_row,
+                   mi_col, NULL, NULL, mode_context);
 
   near_mv->as_int = 0;
   switch (block) {
@@ -237,7 +234,6 @@ void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd,
         }
       break;
     }
-    default:
-      assert(0 && "Invalid block index.");
+    default: assert(0 && "Invalid block index.");
   }
 }
diff --git a/vp10/common/mvref_common.h b/vp10/common/mvref_common.h
index 0a98866149f37492dab1d4e1fd2b8535545e006b..44e4bb5020c440817a2de2d37fd45ec5dc0859ec 100644
--- a/vp10/common/mvref_common.h
+++ b/vp10/common/mvref_common.h
@@ -61,61 +61,149 @@ static const int mode_2_counter[MB_MODE_COUNT] = {
 // 2. However the actual count can never be greater than 2 so the highest
 // counter we need is 18. 9 is an invalid counter that's never used.
 static const int counter_to_context[19] = {
-  BOTH_PREDICTED,  // 0
-  NEW_PLUS_NON_INTRA,  // 1
-  BOTH_NEW,  // 2
-  ZERO_PLUS_PREDICTED,  // 3
-  NEW_PLUS_NON_INTRA,  // 4
-  INVALID_CASE,  // 5
-  BOTH_ZERO,  // 6
-  INVALID_CASE,  // 7
-  INVALID_CASE,  // 8
+  BOTH_PREDICTED,        // 0
+  NEW_PLUS_NON_INTRA,    // 1
+  BOTH_NEW,              // 2
+  ZERO_PLUS_PREDICTED,   // 3
+  NEW_PLUS_NON_INTRA,    // 4
+  INVALID_CASE,          // 5
+  BOTH_ZERO,             // 6
+  INVALID_CASE,          // 7
+  INVALID_CASE,          // 8
   INTRA_PLUS_NON_INTRA,  // 9
   INTRA_PLUS_NON_INTRA,  // 10
-  INVALID_CASE,  // 11
+  INVALID_CASE,          // 11
   INTRA_PLUS_NON_INTRA,  // 12
-  INVALID_CASE,  // 13
-  INVALID_CASE,  // 14
-  INVALID_CASE,  // 15
-  INVALID_CASE,  // 16
-  INVALID_CASE,  // 17
-  BOTH_INTRA  // 18
+  INVALID_CASE,          // 13
+  INVALID_CASE,          // 14
+  INVALID_CASE,          // 15
+  INVALID_CASE,          // 16
+  INVALID_CASE,          // 17
+  BOTH_INTRA             // 18
 };
 
 static const POSITION mv_ref_blocks[BLOCK_SIZES][MVREF_NEIGHBOURS] = {
   // 4X4
-  {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}},
+  { { -1, 0 },
+    { 0, -1 },
+    { -1, -1 },
+    { -2, 0 },
+    { 0, -2 },
+    { -2, -1 },
+    { -1, -2 },
+    { -2, -2 } },
   // 4X8
-  {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}},
+  { { -1, 0 },
+    { 0, -1 },
+    { -1, -1 },
+    { -2, 0 },
+    { 0, -2 },
+    { -2, -1 },
+    { -1, -2 },
+    { -2, -2 } },
   // 8X4
-  {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}},
+  { { -1, 0 },
+    { 0, -1 },
+    { -1, -1 },
+    { -2, 0 },
+    { 0, -2 },
+    { -2, -1 },
+    { -1, -2 },
+    { -2, -2 } },
   // 8X8
-  {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}},
+  { { -1, 0 },
+    { 0, -1 },
+    { -1, -1 },
+    { -2, 0 },
+    { 0, -2 },
+    { -2, -1 },
+    { -1, -2 },
+    { -2, -2 } },
   // 8X16
-  {{0, -1}, {-1, 0}, {1, -1}, {-1, -1}, {0, -2}, {-2, 0}, {-2, -1}, {-1, -2}},
+  { { 0, -1 },
+    { -1, 0 },
+    { 1, -1 },
+    { -1, -1 },
+    { 0, -2 },
+    { -2, 0 },
+    { -2, -1 },
+    { -1, -2 } },
   // 16X8
-  {{-1, 0}, {0, -1}, {-1, 1}, {-1, -1}, {-2, 0}, {0, -2}, {-1, -2}, {-2, -1}},
+  { { -1, 0 },
+    { 0, -1 },
+    { -1, 1 },
+    { -1, -1 },
+    { -2, 0 },
+    { 0, -2 },
+    { -1, -2 },
+    { -2, -1 } },
   // 16X16
-  {{-1, 0}, {0, -1}, {-1, 1}, {1, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-3, -3}},
+  { { -1, 0 },
+    { 0, -1 },
+    { -1, 1 },
+    { 1, -1 },
+    { -1, -1 },
+    { -3, 0 },
+    { 0, -3 },
+    { -3, -3 } },
   // 16X32
-  {{0, -1}, {-1, 0}, {2, -1}, {-1, -1}, {-1, 1}, {0, -3}, {-3, 0}, {-3, -3}},
+  { { 0, -1 },
+    { -1, 0 },
+    { 2, -1 },
+    { -1, -1 },
+    { -1, 1 },
+    { 0, -3 },
+    { -3, 0 },
+    { -3, -3 } },
   // 32X16
-  {{-1, 0}, {0, -1}, {-1, 2}, {-1, -1}, {1, -1}, {-3, 0}, {0, -3}, {-3, -3}},
+  { { -1, 0 },
+    { 0, -1 },
+    { -1, 2 },
+    { -1, -1 },
+    { 1, -1 },
+    { -3, 0 },
+    { 0, -3 },
+    { -3, -3 } },
   // 32X32
-  {{-1, 1}, {1, -1}, {-1, 2}, {2, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-3, -3}},
+  { { -1, 1 },
+    { 1, -1 },
+    { -1, 2 },
+    { 2, -1 },
+    { -1, -1 },
+    { -3, 0 },
+    { 0, -3 },
+    { -3, -3 } },
   // 32X64
-  {{0, -1}, {-1, 0}, {4, -1}, {-1, 2}, {-1, -1}, {0, -3}, {-3, 0}, {2, -1}},
+  { { 0, -1 },
+    { -1, 0 },
+    { 4, -1 },
+    { -1, 2 },
+    { -1, -1 },
+    { 0, -3 },
+    { -3, 0 },
+    { 2, -1 } },
   // 64X32
-  {{-1, 0}, {0, -1}, {-1, 4}, {2, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-1, 2}},
+  { { -1, 0 },
+    { 0, -1 },
+    { -1, 4 },
+    { 2, -1 },
+    { -1, -1 },
+    { -3, 0 },
+    { 0, -3 },
+    { -1, 2 } },
   // 64X64
-  {{-1, 3}, {3, -1}, {-1, 4}, {4, -1}, {-1, -1}, {-1, 0}, {0, -1}, {-1, 6}}
+  { { -1, 3 },
+    { 3, -1 },
+    { -1, 4 },
+    { 4, -1 },
+    { -1, -1 },
+    { -1, 0 },
+    { 0, -1 },
+    { -1, 6 } }
 };
 
 static const int idx_n_column_to_subblock[4][2] = {
-  {1, 2},
-  {1, 3},
-  {3, 2},
-  {3, 3}
+  { 1, 2 }, { 1, 3 }, { 3, 2 }, { 3, 3 }
 };
 
 // clamp_mv_ref
@@ -128,16 +216,15 @@ static const int idx_n_column_to_subblock[4][2] = {
 static INLINE void clamp_mv_ref(MV *mv, int bw, int bh, const MACROBLOCKD *xd) {
 #if CONFIG_MISC_FIXES
   clamp_mv(mv, xd->mb_to_left_edge - bw * 8 - MV_BORDER,
-               xd->mb_to_right_edge + bw * 8 + MV_BORDER,
-               xd->mb_to_top_edge - bh * 8 - MV_BORDER,
-               xd->mb_to_bottom_edge + bh * 8 + MV_BORDER);
+           xd->mb_to_right_edge + bw * 8 + MV_BORDER,
+           xd->mb_to_top_edge - bh * 8 - MV_BORDER,
+           xd->mb_to_bottom_edge + bh * 8 + MV_BORDER);
 #else
-  (void) bw;
-  (void) bh;
+  (void)bw;
+  (void)bh;
   clamp_mv(mv, xd->mb_to_left_edge - MV_BORDER,
-               xd->mb_to_right_edge + MV_BORDER,
-               xd->mb_to_top_edge - MV_BORDER,
-               xd->mb_to_bottom_edge + MV_BORDER);
+           xd->mb_to_right_edge + MV_BORDER, xd->mb_to_top_edge - MV_BORDER,
+           xd->mb_to_bottom_edge + MV_BORDER);
 #endif
 }
 
@@ -146,12 +233,12 @@ static INLINE void clamp_mv_ref(MV *mv, int bw, int bh, const MACROBLOCKD *xd) {
 static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate, int which_mv,
                                       int search_col, int block_idx) {
   return block_idx >= 0 && candidate->mbmi.sb_type < BLOCK_8X8
-          ? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]]
-              .as_mv[which_mv]
-          : candidate->mbmi.mv[which_mv];
+             ? candidate
+                   ->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]]
+                   .as_mv[which_mv]
+             : candidate->mbmi.mv[which_mv];
 }
 
-
 // Performs mv sign inversion if indicated by the reference frame combination.
 static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref,
                               const MV_REFERENCE_FRAME this_ref_frame,
@@ -167,47 +254,47 @@ static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref,
 #if CONFIG_MISC_FIXES
 #define CLIP_IN_ADD(mv, bw, bh, xd) clamp_mv_ref(mv, bw, bh, xd)
 #else
-#define CLIP_IN_ADD(mv, bw, bh, xd) do {} while (0)
+#define CLIP_IN_ADD(mv, bw, bh, xd) \
+  do {                              \
+  } while (0)
 #endif
 
 // This macro is used to add a motion vector mv_ref list if it isn't
 // already in the list.  If it's the second motion vector it will also
 // skip all additional processing and jump to done!
-#define ADD_MV_REF_LIST(mv, refmv_count, mv_ref_list, bw, bh, xd, Done) \
-  do { \
-    (mv_ref_list)[(refmv_count)] = (mv); \
-    CLIP_IN_ADD(&(mv_ref_list)[(refmv_count)].as_mv, (bw), (bh), (xd)); \
+#define ADD_MV_REF_LIST(mv, refmv_count, mv_ref_list, bw, bh, xd, Done)      \
+  do {                                                                       \
+    (mv_ref_list)[(refmv_count)] = (mv);                                     \
+    CLIP_IN_ADD(&(mv_ref_list)[(refmv_count)].as_mv, (bw), (bh), (xd));      \
     if (refmv_count && (mv_ref_list)[1].as_int != (mv_ref_list)[0].as_int) { \
-        (refmv_count) = 2; \
-        goto Done; \
-    } \
-    (refmv_count) = 1; \
+      (refmv_count) = 2;                                                     \
+      goto Done;                                                             \
+    }                                                                        \
+    (refmv_count) = 1;                                                       \
   } while (0)
 
 // If either reference frame is different, not INTRA, and they
 // are different from each other scale and add the mv to our list.
 #define IF_DIFF_REF_FRAME_ADD_MV(mbmi, ref_frame, ref_sign_bias, refmv_count, \
-                                 mv_ref_list, bw, bh, xd, Done) \
-  do { \
-    if (is_inter_block(mbmi)) { \
-      if ((mbmi)->ref_frame[0] != ref_frame) \
-        ADD_MV_REF_LIST(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \
-                        refmv_count, mv_ref_list, bw, bh, xd, Done); \
-      if (has_second_ref(mbmi) && \
-          (CONFIG_MISC_FIXES || \
-           (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) && \
-          (mbmi)->ref_frame[1] != ref_frame) \
-        ADD_MV_REF_LIST(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \
-                        refmv_count, mv_ref_list, bw, bh, xd, Done); \
-    } \
+                                 mv_ref_list, bw, bh, xd, Done)               \
+  do {                                                                        \
+    if (is_inter_block(mbmi)) {                                               \
+      if ((mbmi)->ref_frame[0] != ref_frame)                                  \
+        ADD_MV_REF_LIST(scale_mv((mbmi), 0, ref_frame, ref_sign_bias),        \
+                        refmv_count, mv_ref_list, bw, bh, xd, Done);          \
+      if (has_second_ref(mbmi) &&                                             \
+          (CONFIG_MISC_FIXES ||                                               \
+           (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) &&                   \
+          (mbmi)->ref_frame[1] != ref_frame)                                  \
+        ADD_MV_REF_LIST(scale_mv((mbmi), 1, ref_frame, ref_sign_bias),        \
+                        refmv_count, mv_ref_list, bw, bh, xd, Done);          \
+    }                                                                         \
   } while (0)
 
-
 // Checks that the given mi_row, mi_col and search point
 // are inside the borders of the tile.
-static INLINE int is_inside(const TileInfo *const tile,
-                            int mi_col, int mi_row, int mi_rows,
-                            const POSITION *mi_pos) {
+static INLINE int is_inside(const TileInfo *const tile, int mi_col, int mi_row,
+                            int mi_rows, const POSITION *mi_pos) {
   return !(mi_row + mi_pos->row < 0 ||
            mi_col + mi_pos->col < tile->mi_col_start ||
            mi_row + mi_pos->row >= mi_rows ||
@@ -216,21 +303,21 @@ static INLINE int is_inside(const TileInfo *const tile,
 
 typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
 void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
-                      MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
-                      int_mv *mv_ref_list, int mi_row, int mi_col,
-                      find_mv_refs_sync sync, void *const data,
-                      uint8_t *mode_context);
+                       MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+                       int_mv *mv_ref_list, int mi_row, int mi_col,
+                       find_mv_refs_sync sync, void *const data,
+                       uint8_t *mode_context);
 
 // check a list of motion vectors by sad score using a number rows of pixels
 // above and a number cols of pixels in the left to select the one with best
 // score to use as ref motion vector
-void vp10_find_best_ref_mvs(int allow_hp,
-                           int_mv *mvlist, int_mv *nearest_mv, int_mv *near_mv);
+void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+                            int_mv *near_mv);
 
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                   int block, int ref, int mi_row, int mi_col,
-                                   int_mv *nearest_mv, int_mv *near_mv,
-                                   uint8_t *mode_context);
+void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
+                                    int ref, int mi_row, int mi_col,
+                                    int_mv *nearest_mv, int_mv *near_mv,
+                                    uint8_t *mode_context);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/common/onyxc_int.h b/vp10/common/onyxc_int.h
index 067b261b174b75540da61def0c7528d31f21163b..df316967c133f744689407c8df12033d54b70e91 100644
--- a/vp10/common/onyxc_int.h
+++ b/vp10/common/onyxc_int.h
@@ -47,10 +47,10 @@ extern "C" {
 #define NUM_PING_PONG_BUFFERS 2
 
 typedef enum {
-  SINGLE_REFERENCE      = 0,
-  COMPOUND_REFERENCE    = 1,
+  SINGLE_REFERENCE = 0,
+  COMPOUND_REFERENCE = 1,
   REFERENCE_MODE_SELECT = 2,
-  REFERENCE_MODES       = 3,
+  REFERENCE_MODES = 3,
 } REFERENCE_MODE;
 
 typedef enum {
@@ -103,9 +103,9 @@ typedef struct {
 } RefCntBuffer;
 
 typedef struct BufferPool {
-  // Protect BufferPool from being accessed by several FrameWorkers at
-  // the same time during frame parallel decode.
-  // TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
+// Protect BufferPool from being accessed by several FrameWorkers at
+// the same time during frame parallel decode.
+// TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
 #if CONFIG_MULTITHREAD
   pthread_mutex_t pool_mutex;
 #endif
@@ -123,7 +123,7 @@ typedef struct BufferPool {
 } BufferPool;
 
 typedef struct VP10Common {
-  struct vpx_internal_error_info  error;
+  struct vpx_internal_error_info error;
   vpx_color_space_t color_space;
   int color_range;
   int width;
@@ -163,7 +163,7 @@ typedef struct VP10Common {
 
   int new_fb_idx;
 
-  FRAME_TYPE last_frame_type;  /* last frame's frame type for motion search.*/
+  FRAME_TYPE last_frame_type; /* last frame's frame type for motion search.*/
   FRAME_TYPE frame_type;
 
   int show_frame;
@@ -239,7 +239,7 @@ typedef struct VP10Common {
   // a frame decode
   REFRESH_FRAME_CONTEXT_MODE refresh_frame_context;
 
-  int ref_frame_sign_bias[MAX_REF_FRAMES];    /* Two state 0, 1 */
+  int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */
 
   struct loopfilter lf;
   struct segmentation seg;
@@ -254,9 +254,9 @@ typedef struct VP10Common {
   MV_REFERENCE_FRAME comp_var_ref[2];
   REFERENCE_MODE reference_mode;
 
-  FRAME_CONTEXT *fc;  /* this frame entropy */
-  FRAME_CONTEXT *frame_contexts;   // FRAME_CONTEXTS
-  unsigned int  frame_context_idx; /* Context to use/update */
+  FRAME_CONTEXT *fc;              /* this frame entropy */
+  FRAME_CONTEXT *frame_contexts;  // FRAME_CONTEXTS
+  unsigned int frame_context_idx; /* Context to use/update */
   FRAME_COUNTS counts;
 
   unsigned int current_video_frame;
@@ -313,10 +313,8 @@ static void unlock_buffer_pool(BufferPool *const pool) {
 }
 
 static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP10_COMMON *cm, int index) {
-  if (index < 0 || index >= REF_FRAMES)
-    return NULL;
-  if (cm->ref_frame_map[index] < 0)
-    return NULL;
+  if (index < 0 || index >= REF_FRAMES) return NULL;
+  if (cm->ref_frame_map[index] < 0) return NULL;
   assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
   return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
 }
@@ -331,8 +329,7 @@ static INLINE int get_free_fb(VP10_COMMON *cm) {
 
   lock_buffer_pool(cm->buffer_pool);
   for (i = 0; i < FRAME_BUFFERS; ++i)
-    if (frame_bufs[i].ref_count == 0)
-      break;
+    if (frame_bufs[i].ref_count == 0) break;
 
   if (i != FRAME_BUFFERS) {
     frame_bufs[i].ref_count = 1;
@@ -365,12 +362,13 @@ static INLINE int frame_is_intra_only(const VP10_COMMON *const cm) {
 }
 
 static INLINE void vp10_init_macroblockd(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                        tran_low_t *dqcoeff) {
+                                         tran_low_t *dqcoeff) {
   int i;
 
   for (i = 0; i < MAX_MB_PLANE; ++i) {
     xd->plane[i].dqcoeff = dqcoeff;
-    xd->above_context[i] = cm->above_context +
+    xd->above_context[i] =
+        cm->above_context +
         i * sizeof(*cm->above_context) * 2 * mi_cols_aligned_to_sb(cm->mi_cols);
 
     if (xd->plane[i].plane_type == PLANE_TYPE_Y) {
@@ -403,17 +401,16 @@ static INLINE int calc_mi_size(int len) {
 }
 
 static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
-                                  int mi_row, int bh,
-                                  int mi_col, int bw,
+                                  int mi_row, int bh, int mi_col, int bw,
                                   int mi_rows, int mi_cols) {
-  xd->mb_to_top_edge    = -((mi_row * MI_SIZE) * 8);
+  xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8);
   xd->mb_to_bottom_edge = ((mi_rows - bh - mi_row) * MI_SIZE) * 8;
-  xd->mb_to_left_edge   = -((mi_col * MI_SIZE) * 8);
-  xd->mb_to_right_edge  = ((mi_cols - bw - mi_col) * MI_SIZE) * 8;
+  xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
+  xd->mb_to_right_edge = ((mi_cols - bw - mi_col) * MI_SIZE) * 8;
 
   // Are edges available for intra prediction?
-  xd->up_available    = (mi_row != 0);
-  xd->left_available  = (mi_col > tile->mi_col_start);
+  xd->up_available = (mi_row != 0);
+  xd->left_available = (mi_col > tile->mi_col_start);
   if (xd->up_available) {
     xd->above_mi = xd->mi[-xd->mi_stride];
     // above_mi may be NULL in encoder's first pass.
@@ -443,9 +440,8 @@ static INLINE const vpx_prob *get_y_mode_probs(const VP10_COMMON *cm,
   return cm->kf_y_prob[above][left];
 }
 
-static INLINE void update_partition_context(MACROBLOCKD *xd,
-                                            int mi_row, int mi_col,
-                                            BLOCK_SIZE subsize,
+static INLINE void update_partition_context(MACROBLOCKD *xd, int mi_row,
+                                            int mi_col, BLOCK_SIZE subsize,
                                             BLOCK_SIZE bsize) {
   PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col;
   PARTITION_CONTEXT *const left_ctx = xd->left_seg_context + (mi_row & MI_MASK);
@@ -460,13 +456,12 @@ static INLINE void update_partition_context(MACROBLOCKD *xd,
   memset(left_ctx, partition_context_lookup[subsize].left, bs);
 }
 
-static INLINE int partition_plane_context(const MACROBLOCKD *xd,
-                                          int mi_row, int mi_col,
-                                          BLOCK_SIZE bsize) {
+static INLINE int partition_plane_context(const MACROBLOCKD *xd, int mi_row,
+                                          int mi_col, BLOCK_SIZE bsize) {
   const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col;
   const PARTITION_CONTEXT *left_ctx = xd->left_seg_context + (mi_row & MI_MASK);
   const int bsl = mi_width_log2_lookup[bsize];
-  int above = (*above_ctx >> bsl) & 1 , left = (*left_ctx >> bsl) & 1;
+  int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
 
   assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]);
   assert(bsl >= 0);
diff --git a/vp10/common/pred_common.c b/vp10/common/pred_common.c
index 2e79e0de35f95960c73f35882ccd0d7b2250d461..b3a453120bce97ee845f48fe2e2f0184d1c5eb74 100644
--- a/vp10/common/pred_common.c
+++ b/vp10/common/pred_common.c
@@ -20,11 +20,13 @@ int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
   // left of the entries correpsonding to real macroblocks.
   // The prediction flags in these dummy entries are initialised to 0.
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
-  const int left_type = xd->left_available && is_inter_block(left_mbmi) ?
-                            left_mbmi->interp_filter : SWITCHABLE_FILTERS;
+  const int left_type = xd->left_available && is_inter_block(left_mbmi)
+                            ? left_mbmi->interp_filter
+                            : SWITCHABLE_FILTERS;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
-  const int above_type = xd->up_available && is_inter_block(above_mbmi) ?
-                             above_mbmi->interp_filter : SWITCHABLE_FILTERS;
+  const int above_type = xd->up_available && is_inter_block(above_mbmi)
+                             ? above_mbmi->interp_filter
+                             : SWITCHABLE_FILTERS;
 
   if (left_type == above_type)
     return left_type;
@@ -52,8 +54,7 @@ int vp10_get_intra_inter_context(const MACROBLOCKD *xd) {
   if (has_above && has_left) {  // both edges available
     const int above_intra = !is_inter_block(above_mbmi);
     const int left_intra = !is_inter_block(left_mbmi);
-    return left_intra && above_intra ? 3
-                                     : left_intra || above_intra;
+    return left_intra && above_intra ? 3 : left_intra || above_intra;
   } else if (has_above || has_left) {  // one edge available
     return 2 * !is_inter_block(has_above ? above_mbmi : left_mbmi);
   } else {
@@ -62,7 +63,7 @@ int vp10_get_intra_inter_context(const MACROBLOCKD *xd) {
 }
 
 int vp10_get_reference_mode_context(const VP10_COMMON *cm,
-                                   const MACROBLOCKD *xd) {
+                                    const MACROBLOCKD *xd) {
   int ctx;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -105,7 +106,7 @@ int vp10_get_reference_mode_context(const VP10_COMMON *cm,
 
 // Returns a context number for the given MB prediction signal
 int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
-                                    const MACROBLOCKD *xd) {
+                                     const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -131,15 +132,15 @@ int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
       if (!has_second_ref(edge_mbmi))  // single pred (1/3)
         pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]);
       else  // comp pred (1/3)
-        pred_context = 1 + 2 * (edge_mbmi->ref_frame[var_ref_idx]
-                                    != cm->comp_var_ref[1]);
+        pred_context =
+            1 + 2 * (edge_mbmi->ref_frame[var_ref_idx] != cm->comp_var_ref[1]);
     } else {  // inter/inter
       const int l_sg = !has_second_ref(left_mbmi);
       const int a_sg = !has_second_ref(above_mbmi);
-      const MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0]
-                                           : above_mbmi->ref_frame[var_ref_idx];
-      const MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0]
-                                           : left_mbmi->ref_frame[var_ref_idx];
+      const MV_REFERENCE_FRAME vrfa =
+          a_sg ? above_mbmi->ref_frame[0] : above_mbmi->ref_frame[var_ref_idx];
+      const MV_REFERENCE_FRAME vrfl =
+          l_sg ? left_mbmi->ref_frame[0] : left_mbmi->ref_frame[var_ref_idx];
 
       if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) {
         pred_context = 0;
@@ -173,8 +174,8 @@ int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
       pred_context = 2;
     } else {
       if (has_second_ref(edge_mbmi))
-        pred_context = 4 * (edge_mbmi->ref_frame[var_ref_idx]
-                              != cm->comp_var_ref[1]);
+        pred_context =
+            4 * (edge_mbmi->ref_frame[var_ref_idx] != cm->comp_var_ref[1]);
       else
         pred_context = 3 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]);
     }
@@ -277,8 +278,9 @@ int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
         else
           pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME);
       } else {
-        pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME ||
-                                edge_mbmi->ref_frame[1] == GOLDEN_FRAME);
+        pred_context = 1 +
+                       2 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME ||
+                            edge_mbmi->ref_frame[1] == GOLDEN_FRAME);
       }
     } else {  // inter/inter
       const int above_has_second = has_second_ref(above_mbmi);
@@ -290,10 +292,9 @@ int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
 
       if (above_has_second && left_has_second) {
         if (above0 == left0 && above1 == left1)
-          pred_context = 3 * (above0 == GOLDEN_FRAME ||
-                              above1 == GOLDEN_FRAME ||
-                              left0 == GOLDEN_FRAME ||
-                              left1 == GOLDEN_FRAME);
+          pred_context =
+              3 * (above0 == GOLDEN_FRAME || above1 == GOLDEN_FRAME ||
+                   left0 == GOLDEN_FRAME || left1 == GOLDEN_FRAME);
         else
           pred_context = 2;
       } else if (above_has_second || left_has_second) {
@@ -311,12 +312,12 @@ int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
         if (above0 == LAST_FRAME && left0 == LAST_FRAME) {
           pred_context = 3;
         } else if (above0 == LAST_FRAME || left0 == LAST_FRAME) {
-          const MV_REFERENCE_FRAME edge0 = (above0 == LAST_FRAME) ? left0
-                                                                  : above0;
+          const MV_REFERENCE_FRAME edge0 =
+              (above0 == LAST_FRAME) ? left0 : above0;
           pred_context = 4 * (edge0 == GOLDEN_FRAME);
         } else {
-          pred_context = 2 * (above0 == GOLDEN_FRAME) +
-                             2 * (left0 == GOLDEN_FRAME);
+          pred_context =
+              2 * (above0 == GOLDEN_FRAME) + 2 * (left0 == GOLDEN_FRAME);
         }
       }
     }
diff --git a/vp10/common/pred_common.h b/vp10/common/pred_common.h
index d6d7146d7a4405572aef38420ae2fbd936031bbb..dfb86f9afaa4335b086c59af8a71af8c2dc83696 100644
--- a/vp10/common/pred_common.h
+++ b/vp10/common/pred_common.h
@@ -20,8 +20,8 @@ extern "C" {
 #endif
 
 static INLINE int get_segment_id(const VP10_COMMON *cm,
-                                 const uint8_t *segment_ids,
-                                 BLOCK_SIZE bsize, int mi_row, int mi_col) {
+                                 const uint8_t *segment_ids, BLOCK_SIZE bsize,
+                                 int mi_row, int mi_col) {
   const int mi_offset = mi_row * cm->mi_cols + mi_col;
   const int bw = num_8x8_blocks_wide_lookup[bsize];
   const int bh = num_8x8_blocks_high_lookup[bsize];
@@ -41,8 +41,8 @@ static INLINE int get_segment_id(const VP10_COMMON *cm,
 static INLINE int vp10_get_pred_context_seg_id(const MACROBLOCKD *xd) {
   const MODE_INFO *const above_mi = xd->above_mi;
   const MODE_INFO *const left_mi = xd->left_mi;
-  const int above_sip = (above_mi != NULL) ?
-                        above_mi->mbmi.seg_id_predicted : 0;
+  const int above_sip =
+      (above_mi != NULL) ? above_mi->mbmi.seg_id_predicted : 0;
   const int left_sip = (left_mi != NULL) ? left_mi->mbmi.seg_id_predicted : 0;
 
   return above_sip + left_sip;
@@ -62,7 +62,7 @@ static INLINE int vp10_get_skip_context(const MACROBLOCKD *xd) {
 }
 
 static INLINE vpx_prob vp10_get_skip_prob(const VP10_COMMON *cm,
-                                         const MACROBLOCKD *xd) {
+                                          const MACROBLOCKD *xd) {
   return cm->fc->skip_probs[vp10_get_skip_context(xd)];
 }
 
@@ -71,7 +71,7 @@ int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
 int vp10_get_intra_inter_context(const MACROBLOCKD *xd);
 
 static INLINE vpx_prob vp10_get_intra_inter_prob(const VP10_COMMON *cm,
-                                                const MACROBLOCKD *xd) {
+                                                 const MACROBLOCKD *xd) {
   return cm->fc->intra_inter_prob[vp10_get_intra_inter_context(xd)];
 }
 
@@ -79,15 +79,15 @@ int vp10_get_reference_mode_context(const VP10_COMMON *cm,
                                     const MACROBLOCKD *xd);
 
 static INLINE vpx_prob vp10_get_reference_mode_prob(const VP10_COMMON *cm,
-                                                   const MACROBLOCKD *xd) {
+                                                    const MACROBLOCKD *xd) {
   return cm->fc->comp_inter_prob[vp10_get_reference_mode_context(cm, xd)];
 }
 
 int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
-                                    const MACROBLOCKD *xd);
+                                     const MACROBLOCKD *xd);
 
 static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p(const VP10_COMMON *cm,
-                                                    const MACROBLOCKD *xd) {
+                                                     const MACROBLOCKD *xd) {
   const int pred_context = vp10_get_pred_context_comp_ref_p(cm, xd);
   return cm->fc->comp_ref_prob[pred_context];
 }
@@ -95,14 +95,14 @@ static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p(const VP10_COMMON *cm,
 int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
 
 static INLINE vpx_prob vp10_get_pred_prob_single_ref_p1(const VP10_COMMON *cm,
-                                                       const MACROBLOCKD *xd) {
+                                                        const MACROBLOCKD *xd) {
   return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p1(xd)][0];
 }
 
 int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
 
 static INLINE vpx_prob vp10_get_pred_prob_single_ref_p2(const VP10_COMMON *cm,
-                                                       const MACROBLOCKD *xd) {
+                                                        const MACROBLOCKD *xd) {
   return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p2(xd)][1];
 }
 
@@ -116,15 +116,13 @@ static INLINE int get_tx_size_context(const MACROBLOCKD *xd) {
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
   const int has_above = xd->up_available;
   const int has_left = xd->left_available;
-  int above_ctx = (has_above && !above_mbmi->skip) ? (int)above_mbmi->tx_size
-                                                   : max_tx_size;
-  int left_ctx = (has_left && !left_mbmi->skip) ? (int)left_mbmi->tx_size
-                                                : max_tx_size;
-  if (!has_left)
-    left_ctx = above_ctx;
+  int above_ctx =
+      (has_above && !above_mbmi->skip) ? (int)above_mbmi->tx_size : max_tx_size;
+  int left_ctx =
+      (has_left && !left_mbmi->skip) ? (int)left_mbmi->tx_size : max_tx_size;
+  if (!has_left) left_ctx = above_ctx;
 
-  if (!has_above)
-    above_ctx = left_ctx;
+  if (!has_above) above_ctx = left_ctx;
 
   return (above_ctx + left_ctx) > max_tx_size;
 }
@@ -132,15 +130,10 @@ static INLINE int get_tx_size_context(const MACROBLOCKD *xd) {
 static INLINE const vpx_prob *get_tx_probs(TX_SIZE max_tx_size, int ctx,
                                            const struct tx_probs *tx_probs) {
   switch (max_tx_size) {
-    case TX_8X8:
-      return tx_probs->p8x8[ctx];
-    case TX_16X16:
-      return tx_probs->p16x16[ctx];
-    case TX_32X32:
-      return tx_probs->p32x32[ctx];
-    default:
-      assert(0 && "Invalid max_tx_size.");
-      return NULL;
+    case TX_8X8: return tx_probs->p8x8[ctx];
+    case TX_16X16: return tx_probs->p16x16[ctx];
+    case TX_32X32: return tx_probs->p32x32[ctx];
+    default: assert(0 && "Invalid max_tx_size."); return NULL;
   }
 }
 
@@ -153,15 +146,10 @@ static INLINE const vpx_prob *get_tx_probs2(TX_SIZE max_tx_size,
 static INLINE unsigned int *get_tx_counts(TX_SIZE max_tx_size, int ctx,
                                           struct tx_counts *tx_counts) {
   switch (max_tx_size) {
-    case TX_8X8:
-      return tx_counts->p8x8[ctx];
-    case TX_16X16:
-      return tx_counts->p16x16[ctx];
-    case TX_32X32:
-      return tx_counts->p32x32[ctx];
-    default:
-      assert(0 && "Invalid max_tx_size.");
-      return NULL;
+    case TX_8X8: return tx_counts->p8x8[ctx];
+    case TX_16X16: return tx_counts->p16x16[ctx];
+    case TX_32X32: return tx_counts->p32x32[ctx];
+    default: assert(0 && "Invalid max_tx_size."); return NULL;
   }
 }
 
diff --git a/vp10/common/quant_common.c b/vp10/common/quant_common.c
index 968a816012d69c38d783fc0da709c9dd62955c66..20bc8c587f108a871a929a6ce1c56928847da7a3 100644
--- a/vp10/common/quant_common.c
+++ b/vp10/common/quant_common.c
@@ -13,234 +13,166 @@
 #include "vp10/common/seg_common.h"
 
 static const int16_t dc_qlookup[QINDEX_RANGE] = {
-  4,       8,    8,    9,   10,   11,   12,   12,
-  13,     14,   15,   16,   17,   18,   19,   19,
-  20,     21,   22,   23,   24,   25,   26,   26,
-  27,     28,   29,   30,   31,   32,   32,   33,
-  34,     35,   36,   37,   38,   38,   39,   40,
-  41,     42,   43,   43,   44,   45,   46,   47,
-  48,     48,   49,   50,   51,   52,   53,   53,
-  54,     55,   56,   57,   57,   58,   59,   60,
-  61,     62,   62,   63,   64,   65,   66,   66,
-  67,     68,   69,   70,   70,   71,   72,   73,
-  74,     74,   75,   76,   77,   78,   78,   79,
-  80,     81,   81,   82,   83,   84,   85,   85,
-  87,     88,   90,   92,   93,   95,   96,   98,
-  99,    101,  102,  104,  105,  107,  108,  110,
-  111,   113,  114,  116,  117,  118,  120,  121,
-  123,   125,  127,  129,  131,  134,  136,  138,
-  140,   142,  144,  146,  148,  150,  152,  154,
-  156,   158,  161,  164,  166,  169,  172,  174,
-  177,   180,  182,  185,  187,  190,  192,  195,
-  199,   202,  205,  208,  211,  214,  217,  220,
-  223,   226,  230,  233,  237,  240,  243,  247,
-  250,   253,  257,  261,  265,  269,  272,  276,
-  280,   284,  288,  292,  296,  300,  304,  309,
-  313,   317,  322,  326,  330,  335,  340,  344,
-  349,   354,  359,  364,  369,  374,  379,  384,
-  389,   395,  400,  406,  411,  417,  423,  429,
-  435,   441,  447,  454,  461,  467,  475,  482,
-  489,   497,  505,  513,  522,  530,  539,  549,
-  559,   569,  579,  590,  602,  614,  626,  640,
-  654,   668,  684,  700,  717,  736,  755,  775,
-  796,   819,  843,  869,  896,  925,  955,  988,
-  1022, 1058, 1098, 1139, 1184, 1232, 1282, 1336,
+  4,    8,    8,    9,    10,  11,  12,  12,  13,  14,  15,   16,   17,   18,
+  19,   19,   20,   21,   22,  23,  24,  25,  26,  26,  27,   28,   29,   30,
+  31,   32,   32,   33,   34,  35,  36,  37,  38,  38,  39,   40,   41,   42,
+  43,   43,   44,   45,   46,  47,  48,  48,  49,  50,  51,   52,   53,   53,
+  54,   55,   56,   57,   57,  58,  59,  60,  61,  62,  62,   63,   64,   65,
+  66,   66,   67,   68,   69,  70,  70,  71,  72,  73,  74,   74,   75,   76,
+  77,   78,   78,   79,   80,  81,  81,  82,  83,  84,  85,   85,   87,   88,
+  90,   92,   93,   95,   96,  98,  99,  101, 102, 104, 105,  107,  108,  110,
+  111,  113,  114,  116,  117, 118, 120, 121, 123, 125, 127,  129,  131,  134,
+  136,  138,  140,  142,  144, 146, 148, 150, 152, 154, 156,  158,  161,  164,
+  166,  169,  172,  174,  177, 180, 182, 185, 187, 190, 192,  195,  199,  202,
+  205,  208,  211,  214,  217, 220, 223, 226, 230, 233, 237,  240,  243,  247,
+  250,  253,  257,  261,  265, 269, 272, 276, 280, 284, 288,  292,  296,  300,
+  304,  309,  313,  317,  322, 326, 330, 335, 340, 344, 349,  354,  359,  364,
+  369,  374,  379,  384,  389, 395, 400, 406, 411, 417, 423,  429,  435,  441,
+  447,  454,  461,  467,  475, 482, 489, 497, 505, 513, 522,  530,  539,  549,
+  559,  569,  579,  590,  602, 614, 626, 640, 654, 668, 684,  700,  717,  736,
+  755,  775,  796,  819,  843, 869, 896, 925, 955, 988, 1022, 1058, 1098, 1139,
+  1184, 1232, 1282, 1336,
 };
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static const int16_t dc_qlookup_10[QINDEX_RANGE] = {
-  4,     9,    10,    13,    15,    17,    20,    22,
-  25,    28,    31,    34,    37,    40,    43,    47,
-  50,    53,    57,    60,    64,    68,    71,    75,
-  78,    82,    86,    90,    93,    97,   101,   105,
-  109,   113,   116,   120,   124,   128,   132,   136,
-  140,   143,   147,   151,   155,   159,   163,   166,
-  170,   174,   178,   182,   185,   189,   193,   197,
-  200,   204,   208,   212,   215,   219,   223,   226,
-  230,   233,   237,   241,   244,   248,   251,   255,
-  259,   262,   266,   269,   273,   276,   280,   283,
-  287,   290,   293,   297,   300,   304,   307,   310,
-  314,   317,   321,   324,   327,   331,   334,   337,
-  343,   350,   356,   362,   369,   375,   381,   387,
-  394,   400,   406,   412,   418,   424,   430,   436,
-  442,   448,   454,   460,   466,   472,   478,   484,
-  490,   499,   507,   516,   525,   533,   542,   550,
-  559,   567,   576,   584,   592,   601,   609,   617,
-  625,   634,   644,   655,   666,   676,   687,   698,
-  708,   718,   729,   739,   749,   759,   770,   782,
-  795,   807,   819,   831,   844,   856,   868,   880,
-  891,   906,   920,   933,   947,   961,   975,   988,
-  1001,  1015,  1030,  1045,  1061,  1076,  1090,  1105,
-  1120,  1137,  1153,  1170,  1186,  1202,  1218,  1236,
-  1253,  1271,  1288,  1306,  1323,  1342,  1361,  1379,
-  1398,  1416,  1436,  1456,  1476,  1496,  1516,  1537,
-  1559,  1580,  1601,  1624,  1647,  1670,  1692,  1717,
-  1741,  1766,  1791,  1817,  1844,  1871,  1900,  1929,
-  1958,  1990,  2021,  2054,  2088,  2123,  2159,  2197,
-  2236,  2276,  2319,  2363,  2410,  2458,  2508,  2561,
-  2616,  2675,  2737,  2802,  2871,  2944,  3020,  3102,
-  3188,  3280,  3375,  3478,  3586,  3702,  3823,  3953,
-  4089,  4236,  4394,  4559,  4737,  4929,  5130,  5347,
+  4,    9,    10,   13,   15,   17,   20,   22,   25,   28,   31,   34,   37,
+  40,   43,   47,   50,   53,   57,   60,   64,   68,   71,   75,   78,   82,
+  86,   90,   93,   97,   101,  105,  109,  113,  116,  120,  124,  128,  132,
+  136,  140,  143,  147,  151,  155,  159,  163,  166,  170,  174,  178,  182,
+  185,  189,  193,  197,  200,  204,  208,  212,  215,  219,  223,  226,  230,
+  233,  237,  241,  244,  248,  251,  255,  259,  262,  266,  269,  273,  276,
+  280,  283,  287,  290,  293,  297,  300,  304,  307,  310,  314,  317,  321,
+  324,  327,  331,  334,  337,  343,  350,  356,  362,  369,  375,  381,  387,
+  394,  400,  406,  412,  418,  424,  430,  436,  442,  448,  454,  460,  466,
+  472,  478,  484,  490,  499,  507,  516,  525,  533,  542,  550,  559,  567,
+  576,  584,  592,  601,  609,  617,  625,  634,  644,  655,  666,  676,  687,
+  698,  708,  718,  729,  739,  749,  759,  770,  782,  795,  807,  819,  831,
+  844,  856,  868,  880,  891,  906,  920,  933,  947,  961,  975,  988,  1001,
+  1015, 1030, 1045, 1061, 1076, 1090, 1105, 1120, 1137, 1153, 1170, 1186, 1202,
+  1218, 1236, 1253, 1271, 1288, 1306, 1323, 1342, 1361, 1379, 1398, 1416, 1436,
+  1456, 1476, 1496, 1516, 1537, 1559, 1580, 1601, 1624, 1647, 1670, 1692, 1717,
+  1741, 1766, 1791, 1817, 1844, 1871, 1900, 1929, 1958, 1990, 2021, 2054, 2088,
+  2123, 2159, 2197, 2236, 2276, 2319, 2363, 2410, 2458, 2508, 2561, 2616, 2675,
+  2737, 2802, 2871, 2944, 3020, 3102, 3188, 3280, 3375, 3478, 3586, 3702, 3823,
+  3953, 4089, 4236, 4394, 4559, 4737, 4929, 5130, 5347,
 };
 
 static const int16_t dc_qlookup_12[QINDEX_RANGE] = {
-  4,    12,    18,    25,    33,    41,    50,    60,
-  70,    80,    91,   103,   115,   127,   140,   153,
-  166,   180,   194,   208,   222,   237,   251,   266,
-  281,   296,   312,   327,   343,   358,   374,   390,
-  405,   421,   437,   453,   469,   484,   500,   516,
-  532,   548,   564,   580,   596,   611,   627,   643,
-  659,   674,   690,   706,   721,   737,   752,   768,
-  783,   798,   814,   829,   844,   859,   874,   889,
-  904,   919,   934,   949,   964,   978,   993,  1008,
-  1022,  1037,  1051,  1065,  1080,  1094,  1108,  1122,
-  1136,  1151,  1165,  1179,  1192,  1206,  1220,  1234,
-  1248,  1261,  1275,  1288,  1302,  1315,  1329,  1342,
-  1368,  1393,  1419,  1444,  1469,  1494,  1519,  1544,
-  1569,  1594,  1618,  1643,  1668,  1692,  1717,  1741,
-  1765,  1789,  1814,  1838,  1862,  1885,  1909,  1933,
-  1957,  1992,  2027,  2061,  2096,  2130,  2165,  2199,
-  2233,  2267,  2300,  2334,  2367,  2400,  2434,  2467,
-  2499,  2532,  2575,  2618,  2661,  2704,  2746,  2788,
-  2830,  2872,  2913,  2954,  2995,  3036,  3076,  3127,
-  3177,  3226,  3275,  3324,  3373,  3421,  3469,  3517,
-  3565,  3621,  3677,  3733,  3788,  3843,  3897,  3951,
-  4005,  4058,  4119,  4181,  4241,  4301,  4361,  4420,
-  4479,  4546,  4612,  4677,  4742,  4807,  4871,  4942,
-  5013,  5083,  5153,  5222,  5291,  5367,  5442,  5517,
-  5591,  5665,  5745,  5825,  5905,  5984,  6063,  6149,
-  6234,  6319,  6404,  6495,  6587,  6678,  6769,  6867,
-  6966,  7064,  7163,  7269,  7376,  7483,  7599,  7715,
-  7832,  7958,  8085,  8214,  8352,  8492,  8635,  8788,
-  8945,  9104,  9275,  9450,  9639,  9832, 10031, 10245,
-  10465, 10702, 10946, 11210, 11482, 11776, 12081, 12409,
-  12750, 13118, 13501, 13913, 14343, 14807, 15290, 15812,
-  16356, 16943, 17575, 18237, 18949, 19718, 20521, 21387,
+  4,     12,    18,    25,    33,    41,    50,    60,    70,    80,    91,
+  103,   115,   127,   140,   153,   166,   180,   194,   208,   222,   237,
+  251,   266,   281,   296,   312,   327,   343,   358,   374,   390,   405,
+  421,   437,   453,   469,   484,   500,   516,   532,   548,   564,   580,
+  596,   611,   627,   643,   659,   674,   690,   706,   721,   737,   752,
+  768,   783,   798,   814,   829,   844,   859,   874,   889,   904,   919,
+  934,   949,   964,   978,   993,   1008,  1022,  1037,  1051,  1065,  1080,
+  1094,  1108,  1122,  1136,  1151,  1165,  1179,  1192,  1206,  1220,  1234,
+  1248,  1261,  1275,  1288,  1302,  1315,  1329,  1342,  1368,  1393,  1419,
+  1444,  1469,  1494,  1519,  1544,  1569,  1594,  1618,  1643,  1668,  1692,
+  1717,  1741,  1765,  1789,  1814,  1838,  1862,  1885,  1909,  1933,  1957,
+  1992,  2027,  2061,  2096,  2130,  2165,  2199,  2233,  2267,  2300,  2334,
+  2367,  2400,  2434,  2467,  2499,  2532,  2575,  2618,  2661,  2704,  2746,
+  2788,  2830,  2872,  2913,  2954,  2995,  3036,  3076,  3127,  3177,  3226,
+  3275,  3324,  3373,  3421,  3469,  3517,  3565,  3621,  3677,  3733,  3788,
+  3843,  3897,  3951,  4005,  4058,  4119,  4181,  4241,  4301,  4361,  4420,
+  4479,  4546,  4612,  4677,  4742,  4807,  4871,  4942,  5013,  5083,  5153,
+  5222,  5291,  5367,  5442,  5517,  5591,  5665,  5745,  5825,  5905,  5984,
+  6063,  6149,  6234,  6319,  6404,  6495,  6587,  6678,  6769,  6867,  6966,
+  7064,  7163,  7269,  7376,  7483,  7599,  7715,  7832,  7958,  8085,  8214,
+  8352,  8492,  8635,  8788,  8945,  9104,  9275,  9450,  9639,  9832,  10031,
+  10245, 10465, 10702, 10946, 11210, 11482, 11776, 12081, 12409, 12750, 13118,
+  13501, 13913, 14343, 14807, 15290, 15812, 16356, 16943, 17575, 18237, 18949,
+  19718, 20521, 21387,
 };
 #endif
 
 static const int16_t ac_qlookup[QINDEX_RANGE] = {
-  4,       8,    9,   10,   11,   12,   13,   14,
-  15,     16,   17,   18,   19,   20,   21,   22,
-  23,     24,   25,   26,   27,   28,   29,   30,
-  31,     32,   33,   34,   35,   36,   37,   38,
-  39,     40,   41,   42,   43,   44,   45,   46,
-  47,     48,   49,   50,   51,   52,   53,   54,
-  55,     56,   57,   58,   59,   60,   61,   62,
-  63,     64,   65,   66,   67,   68,   69,   70,
-  71,     72,   73,   74,   75,   76,   77,   78,
-  79,     80,   81,   82,   83,   84,   85,   86,
-  87,     88,   89,   90,   91,   92,   93,   94,
-  95,     96,   97,   98,   99,  100,  101,  102,
-  104,   106,  108,  110,  112,  114,  116,  118,
-  120,   122,  124,  126,  128,  130,  132,  134,
-  136,   138,  140,  142,  144,  146,  148,  150,
-  152,   155,  158,  161,  164,  167,  170,  173,
-  176,   179,  182,  185,  188,  191,  194,  197,
-  200,   203,  207,  211,  215,  219,  223,  227,
-  231,   235,  239,  243,  247,  251,  255,  260,
-  265,   270,  275,  280,  285,  290,  295,  300,
-  305,   311,  317,  323,  329,  335,  341,  347,
-  353,   359,  366,  373,  380,  387,  394,  401,
-  408,   416,  424,  432,  440,  448,  456,  465,
-  474,   483,  492,  501,  510,  520,  530,  540,
-  550,   560,  571,  582,  593,  604,  615,  627,
-  639,   651,  663,  676,  689,  702,  715,  729,
-  743,   757,  771,  786,  801,  816,  832,  848,
-  864,   881,  898,  915,  933,  951,  969,  988,
-  1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151,
-  1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343,
-  1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567,
-  1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
+  4,    8,    9,    10,   11,   12,   13,   14,   15,   16,   17,   18,   19,
+  20,   21,   22,   23,   24,   25,   26,   27,   28,   29,   30,   31,   32,
+  33,   34,   35,   36,   37,   38,   39,   40,   41,   42,   43,   44,   45,
+  46,   47,   48,   49,   50,   51,   52,   53,   54,   55,   56,   57,   58,
+  59,   60,   61,   62,   63,   64,   65,   66,   67,   68,   69,   70,   71,
+  72,   73,   74,   75,   76,   77,   78,   79,   80,   81,   82,   83,   84,
+  85,   86,   87,   88,   89,   90,   91,   92,   93,   94,   95,   96,   97,
+  98,   99,   100,  101,  102,  104,  106,  108,  110,  112,  114,  116,  118,
+  120,  122,  124,  126,  128,  130,  132,  134,  136,  138,  140,  142,  144,
+  146,  148,  150,  152,  155,  158,  161,  164,  167,  170,  173,  176,  179,
+  182,  185,  188,  191,  194,  197,  200,  203,  207,  211,  215,  219,  223,
+  227,  231,  235,  239,  243,  247,  251,  255,  260,  265,  270,  275,  280,
+  285,  290,  295,  300,  305,  311,  317,  323,  329,  335,  341,  347,  353,
+  359,  366,  373,  380,  387,  394,  401,  408,  416,  424,  432,  440,  448,
+  456,  465,  474,  483,  492,  501,  510,  520,  530,  540,  550,  560,  571,
+  582,  593,  604,  615,  627,  639,  651,  663,  676,  689,  702,  715,  729,
+  743,  757,  771,  786,  801,  816,  832,  848,  864,  881,  898,  915,  933,
+  951,  969,  988,  1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151, 1173, 1196,
+  1219, 1243, 1267, 1292, 1317, 1343, 1369, 1396, 1423, 1451, 1479, 1508, 1537,
+  1567, 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
 };
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static const int16_t ac_qlookup_10[QINDEX_RANGE] = {
-  4,     9,    11,    13,    16,    18,    21,    24,
-  27,    30,    33,    37,    40,    44,    48,    51,
-  55,    59,    63,    67,    71,    75,    79,    83,
-  88,    92,    96,   100,   105,   109,   114,   118,
-  122,   127,   131,   136,   140,   145,   149,   154,
-  158,   163,   168,   172,   177,   181,   186,   190,
-  195,   199,   204,   208,   213,   217,   222,   226,
-  231,   235,   240,   244,   249,   253,   258,   262,
-  267,   271,   275,   280,   284,   289,   293,   297,
-  302,   306,   311,   315,   319,   324,   328,   332,
-  337,   341,   345,   349,   354,   358,   362,   367,
-  371,   375,   379,   384,   388,   392,   396,   401,
-  409,   417,   425,   433,   441,   449,   458,   466,
-  474,   482,   490,   498,   506,   514,   523,   531,
-  539,   547,   555,   563,   571,   579,   588,   596,
-  604,   616,   628,   640,   652,   664,   676,   688,
-  700,   713,   725,   737,   749,   761,   773,   785,
-  797,   809,   825,   841,   857,   873,   889,   905,
-  922,   938,   954,   970,   986,  1002,  1018,  1038,
-  1058,  1078,  1098,  1118,  1138,  1158,  1178,  1198,
-  1218,  1242,  1266,  1290,  1314,  1338,  1362,  1386,
-  1411,  1435,  1463,  1491,  1519,  1547,  1575,  1603,
-  1631,  1663,  1695,  1727,  1759,  1791,  1823,  1859,
-  1895,  1931,  1967,  2003,  2039,  2079,  2119,  2159,
-  2199,  2239,  2283,  2327,  2371,  2415,  2459,  2507,
-  2555,  2603,  2651,  2703,  2755,  2807,  2859,  2915,
-  2971,  3027,  3083,  3143,  3203,  3263,  3327,  3391,
-  3455,  3523,  3591,  3659,  3731,  3803,  3876,  3952,
-  4028,  4104,  4184,  4264,  4348,  4432,  4516,  4604,
-  4692,  4784,  4876,  4972,  5068,  5168,  5268,  5372,
-  5476,  5584,  5692,  5804,  5916,  6032,  6148,  6268,
-  6388,  6512,  6640,  6768,  6900,  7036,  7172,  7312,
+  4,    9,    11,   13,   16,   18,   21,   24,   27,   30,   33,   37,   40,
+  44,   48,   51,   55,   59,   63,   67,   71,   75,   79,   83,   88,   92,
+  96,   100,  105,  109,  114,  118,  122,  127,  131,  136,  140,  145,  149,
+  154,  158,  163,  168,  172,  177,  181,  186,  190,  195,  199,  204,  208,
+  213,  217,  222,  226,  231,  235,  240,  244,  249,  253,  258,  262,  267,
+  271,  275,  280,  284,  289,  293,  297,  302,  306,  311,  315,  319,  324,
+  328,  332,  337,  341,  345,  349,  354,  358,  362,  367,  371,  375,  379,
+  384,  388,  392,  396,  401,  409,  417,  425,  433,  441,  449,  458,  466,
+  474,  482,  490,  498,  506,  514,  523,  531,  539,  547,  555,  563,  571,
+  579,  588,  596,  604,  616,  628,  640,  652,  664,  676,  688,  700,  713,
+  725,  737,  749,  761,  773,  785,  797,  809,  825,  841,  857,  873,  889,
+  905,  922,  938,  954,  970,  986,  1002, 1018, 1038, 1058, 1078, 1098, 1118,
+  1138, 1158, 1178, 1198, 1218, 1242, 1266, 1290, 1314, 1338, 1362, 1386, 1411,
+  1435, 1463, 1491, 1519, 1547, 1575, 1603, 1631, 1663, 1695, 1727, 1759, 1791,
+  1823, 1859, 1895, 1931, 1967, 2003, 2039, 2079, 2119, 2159, 2199, 2239, 2283,
+  2327, 2371, 2415, 2459, 2507, 2555, 2603, 2651, 2703, 2755, 2807, 2859, 2915,
+  2971, 3027, 3083, 3143, 3203, 3263, 3327, 3391, 3455, 3523, 3591, 3659, 3731,
+  3803, 3876, 3952, 4028, 4104, 4184, 4264, 4348, 4432, 4516, 4604, 4692, 4784,
+  4876, 4972, 5068, 5168, 5268, 5372, 5476, 5584, 5692, 5804, 5916, 6032, 6148,
+  6268, 6388, 6512, 6640, 6768, 6900, 7036, 7172, 7312,
 };
 
 static const int16_t ac_qlookup_12[QINDEX_RANGE] = {
-  4,    13,    19,    27,    35,    44,    54,    64,
-  75,    87,    99,   112,   126,   139,   154,   168,
-  183,   199,   214,   230,   247,   263,   280,   297,
-  314,   331,   349,   366,   384,   402,   420,   438,
-  456,   475,   493,   511,   530,   548,   567,   586,
-  604,   623,   642,   660,   679,   698,   716,   735,
-  753,   772,   791,   809,   828,   846,   865,   884,
-  902,   920,   939,   957,   976,   994,  1012,  1030,
-  1049,  1067,  1085,  1103,  1121,  1139,  1157,  1175,
-  1193,  1211,  1229,  1246,  1264,  1282,  1299,  1317,
-  1335,  1352,  1370,  1387,  1405,  1422,  1440,  1457,
-  1474,  1491,  1509,  1526,  1543,  1560,  1577,  1595,
-  1627,  1660,  1693,  1725,  1758,  1791,  1824,  1856,
-  1889,  1922,  1954,  1987,  2020,  2052,  2085,  2118,
-  2150,  2183,  2216,  2248,  2281,  2313,  2346,  2378,
-  2411,  2459,  2508,  2556,  2605,  2653,  2701,  2750,
-  2798,  2847,  2895,  2943,  2992,  3040,  3088,  3137,
-  3185,  3234,  3298,  3362,  3426,  3491,  3555,  3619,
-  3684,  3748,  3812,  3876,  3941,  4005,  4069,  4149,
-  4230,  4310,  4390,  4470,  4550,  4631,  4711,  4791,
-  4871,  4967,  5064,  5160,  5256,  5352,  5448,  5544,
-  5641,  5737,  5849,  5961,  6073,  6185,  6297,  6410,
-  6522,  6650,  6778,  6906,  7034,  7162,  7290,  7435,
-  7579,  7723,  7867,  8011,  8155,  8315,  8475,  8635,
-  8795,  8956,  9132,  9308,  9484,  9660,  9836, 10028,
-  10220, 10412, 10604, 10812, 11020, 11228, 11437, 11661,
-  11885, 12109, 12333, 12573, 12813, 13053, 13309, 13565,
-  13821, 14093, 14365, 14637, 14925, 15213, 15502, 15806,
-  16110, 16414, 16734, 17054, 17390, 17726, 18062, 18414,
-  18766, 19134, 19502, 19886, 20270, 20670, 21070, 21486,
-  21902, 22334, 22766, 23214, 23662, 24126, 24590, 25070,
-  25551, 26047, 26559, 27071, 27599, 28143, 28687, 29247,
+  4,     13,    19,    27,    35,    44,    54,    64,    75,    87,    99,
+  112,   126,   139,   154,   168,   183,   199,   214,   230,   247,   263,
+  280,   297,   314,   331,   349,   366,   384,   402,   420,   438,   456,
+  475,   493,   511,   530,   548,   567,   586,   604,   623,   642,   660,
+  679,   698,   716,   735,   753,   772,   791,   809,   828,   846,   865,
+  884,   902,   920,   939,   957,   976,   994,   1012,  1030,  1049,  1067,
+  1085,  1103,  1121,  1139,  1157,  1175,  1193,  1211,  1229,  1246,  1264,
+  1282,  1299,  1317,  1335,  1352,  1370,  1387,  1405,  1422,  1440,  1457,
+  1474,  1491,  1509,  1526,  1543,  1560,  1577,  1595,  1627,  1660,  1693,
+  1725,  1758,  1791,  1824,  1856,  1889,  1922,  1954,  1987,  2020,  2052,
+  2085,  2118,  2150,  2183,  2216,  2248,  2281,  2313,  2346,  2378,  2411,
+  2459,  2508,  2556,  2605,  2653,  2701,  2750,  2798,  2847,  2895,  2943,
+  2992,  3040,  3088,  3137,  3185,  3234,  3298,  3362,  3426,  3491,  3555,
+  3619,  3684,  3748,  3812,  3876,  3941,  4005,  4069,  4149,  4230,  4310,
+  4390,  4470,  4550,  4631,  4711,  4791,  4871,  4967,  5064,  5160,  5256,
+  5352,  5448,  5544,  5641,  5737,  5849,  5961,  6073,  6185,  6297,  6410,
+  6522,  6650,  6778,  6906,  7034,  7162,  7290,  7435,  7579,  7723,  7867,
+  8011,  8155,  8315,  8475,  8635,  8795,  8956,  9132,  9308,  9484,  9660,
+  9836,  10028, 10220, 10412, 10604, 10812, 11020, 11228, 11437, 11661, 11885,
+  12109, 12333, 12573, 12813, 13053, 13309, 13565, 13821, 14093, 14365, 14637,
+  14925, 15213, 15502, 15806, 16110, 16414, 16734, 17054, 17390, 17726, 18062,
+  18414, 18766, 19134, 19502, 19886, 20270, 20670, 21070, 21486, 21902, 22334,
+  22766, 23214, 23662, 24126, 24590, 25070, 25551, 26047, 26559, 27071, 27599,
+  28143, 28687, 29247,
 };
 #endif
 
 int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) {
 #if CONFIG_VPX_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8:
-      return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_10:
-      return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_12:
-      return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
+    case VPX_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
+    case VPX_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
+    case VPX_BITS_12: return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
     default:
       assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
       return -1;
   }
 #else
-  (void) bit_depth;
+  (void)bit_depth;
   return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
 #endif
 }
@@ -248,31 +180,27 @@ int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) {
 int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) {
 #if CONFIG_VPX_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8:
-      return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_10:
-      return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
-    case VPX_BITS_12:
-      return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
+    case VPX_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
+    case VPX_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
+    case VPX_BITS_12: return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)];
     default:
       assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
       return -1;
   }
 #else
-  (void) bit_depth;
+  (void)bit_depth;
   return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
 #endif
 }
 
 int vp10_get_qindex(const struct segmentation *seg, int segment_id,
-                   int base_qindex) {
+                    int base_qindex) {
   if (segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) {
     const int data = get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
-    const int seg_qindex = seg->abs_delta == SEGMENT_ABSDATA ?
-        data : base_qindex + data;
+    const int seg_qindex =
+        seg->abs_delta == SEGMENT_ABSDATA ? data : base_qindex + data;
     return clamp(seg_qindex, 0, MAXQ);
   } else {
     return base_qindex;
   }
 }
-
diff --git a/vp10/common/quant_common.h b/vp10/common/quant_common.h
index 6813e1734cdfa9b05306d8845d10b571ad99a0d2..c6bb24f0b2cf7d9c4089d5a0c744cf90b200acc5 100644
--- a/vp10/common/quant_common.h
+++ b/vp10/common/quant_common.h
@@ -27,7 +27,7 @@ int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth);
 int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth);
 
 int vp10_get_qindex(const struct segmentation *seg, int segment_id,
-                   int base_qindex);
+                    int base_qindex);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/common/reconinter.c b/vp10/common/reconinter.c
index ddfb8fa2f9a0338b4c9189f5cf56bc5603d5faf9..f857e9479869e7d520c1fe8d26ce9f2984954485 100644
--- a/vp10/common/reconinter.c
+++ b/vp10/common/reconinter.c
@@ -20,14 +20,11 @@
 #include "vp10/common/reconintra.h"
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
-                                      uint8_t *dst, int dst_stride,
-                                      const MV *src_mv,
-                                      const struct scale_factors *sf,
-                                      int w, int h, int ref,
-                                      const InterpKernel *kernel,
-                                      enum mv_precision precision,
-                                      int x, int y, int bd) {
+void vp10_highbd_build_inter_predictor(
+    const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
+    const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
+    const InterpKernel *kernel, enum mv_precision precision, int x, int y,
+    int bd) {
   const int is_q4 = precision == MV_PRECISION_Q4;
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
@@ -37,19 +34,16 @@ void vp10_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
 
   src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
 
-  high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
-                       sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd);
+  high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, sf,
+                       w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
-                               uint8_t *dst, int dst_stride,
-                               const MV *src_mv,
-                               const struct scale_factors *sf,
-                               int w, int h, int ref,
-                               const InterpKernel *kernel,
-                               enum mv_precision precision,
-                               int x, int y) {
+                                uint8_t *dst, int dst_stride, const MV *src_mv,
+                                const struct scale_factors *sf, int w, int h,
+                                int ref, const InterpKernel *kernel,
+                                enum mv_precision precision, int x, int y) {
   const int is_q4 = precision == MV_PRECISION_Q4;
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
@@ -59,14 +53,13 @@ void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
 
   src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
 
-  inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
-                  sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
+  inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, sf, w,
+                  h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
 }
 
-void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
-                                   int bw, int bh,
-                                   int x, int y, int w, int h,
-                                   int mi_x, int mi_y) {
+void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, int bw,
+                            int bh, int x, int y, int w, int h, int mi_x,
+                            int mi_y) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   const MODE_INFO *mi = xd->mi[0];
   const int is_compound = has_second_ref(&mi->mbmi);
@@ -79,17 +72,16 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
     struct buf_2d *const dst_buf = &pd->dst;
     uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
     const MV mv = mi->mbmi.sb_type < BLOCK_8X8
-               ? average_split_mvs(pd, mi, ref, block)
-               : mi->mbmi.mv[ref].as_mv;
+                      ? average_split_mvs(pd, mi, ref, block)
+                      : mi->mbmi.mv[ref].as_mv;
 
     // TODO(jkoleszar): This clamping is done in the incorrect place for the
     // scaling case. It needs to be done on the scaled MV, not the pre-scaling
     // MV. Note however that it performs the subsampling aware scaling so
     // that the result is always q4.
     // mv_precision precision is MV_PRECISION_Q4.
-    const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
-                                               pd->subsampling_x,
-                                               pd->subsampling_y);
+    const MV mv_q4 = clamp_mv_to_umv_border_sb(
+        xd, &mv, bw, bh, pd->subsampling_x, pd->subsampling_y);
 
     uint8_t *pre;
     MV32 scaled_mv;
@@ -109,28 +101,26 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
     }
     subpel_x = scaled_mv.col & SUBPEL_MASK;
     subpel_y = scaled_mv.row & SUBPEL_MASK;
-    pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride
-           + (scaled_mv.col >> SUBPEL_BITS);
+    pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride +
+           (scaled_mv.col >> SUBPEL_BITS);
 
 #if CONFIG_VPX_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-      high_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
-                           subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys,
-                           xd->bd);
+      high_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, subpel_x,
+                           subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
     } else {
-      inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
-                      subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
+      inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, subpel_x,
+                      subpel_y, sf, w, h, ref, kernel, xs, ys);
     }
 #else
-    inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
-                    subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
+    inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, subpel_x,
+                    subpel_y, sf, w, h, ref, kernel, xs, ys);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   }
 }
 
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane,
-                                       int i, int ir, int ic,
-                                       int mi_row, int mi_col) {
+void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
+                                       int ir, int ic, int mi_row, int mi_col) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   MODE_INFO *const mi = xd->mi[0];
   const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd);
@@ -146,31 +136,25 @@ void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane,
     const uint8_t *pre =
         &pd->pre[ref].buf[(ir * pd->pre[ref].stride + ic) << 2];
 #if CONFIG_VPX_HIGHBITDEPTH
-  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vp10_highbd_build_inter_predictor(pre, pd->pre[ref].stride,
-                                      dst, pd->dst.stride,
-                                      &mi->bmi[i].as_mv[ref].as_mv,
-                                      &xd->block_refs[ref]->sf, width, height,
-                                      ref, kernel, MV_PRECISION_Q3,
-                                      mi_col * MI_SIZE + 4 * ic,
-                                      mi_row * MI_SIZE + 4 * ir, xd->bd);
-  } else {
-    vp10_build_inter_predictor(pre, pd->pre[ref].stride,
-                               dst, pd->dst.stride,
-                               &mi->bmi[i].as_mv[ref].as_mv,
-                               &xd->block_refs[ref]->sf, width, height, ref,
-                               kernel, MV_PRECISION_Q3,
-                               mi_col * MI_SIZE + 4 * ic,
-                               mi_row * MI_SIZE + 4 * ir);
-  }
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      vp10_highbd_build_inter_predictor(
+          pre, pd->pre[ref].stride, dst, pd->dst.stride,
+          &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
+          ref, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
+          mi_row * MI_SIZE + 4 * ir, xd->bd);
+    } else {
+      vp10_build_inter_predictor(
+          pre, pd->pre[ref].stride, dst, pd->dst.stride,
+          &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
+          ref, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
+          mi_row * MI_SIZE + 4 * ir);
+    }
 #else
-    vp10_build_inter_predictor(pre, pd->pre[ref].stride,
-                               dst, pd->dst.stride,
-                               &mi->bmi[i].as_mv[ref].as_mv,
-                               &xd->block_refs[ref]->sf, width, height, ref,
-                               kernel, MV_PRECISION_Q3,
-                               mi_col * MI_SIZE + 4 * ic,
-                               mi_row * MI_SIZE + 4 * ir);
+    vp10_build_inter_predictor(
+        pre, pd->pre[ref].stride, dst, pd->dst.stride,
+        &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
+        ref, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
+        mi_row * MI_SIZE + 4 * ir);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   }
 }
@@ -200,44 +184,43 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
       assert(pw * num_4x4_w == bw && ph * num_4x4_h == bh);
       for (y = 0; y < num_4x4_h; ++y)
         for (x = 0; x < num_4x4_w; ++x)
-           build_inter_predictors(xd, plane, y * 2 + x, bw, bh,
-                                  4 * x, 4 * y, pw, ph, mi_x, mi_y);
+          build_inter_predictors(xd, plane, y * 2 + x, bw, bh, 4 * x, 4 * y, pw,
+                                 ph, mi_x, mi_y);
     } else {
-      build_inter_predictors(xd, plane, 0, bw, bh,
-                             0, 0, bw, bh, mi_x, mi_y);
+      build_inter_predictors(xd, plane, 0, bw, bh, 0, 0, bw, bh, mi_x, mi_y);
     }
   }
 }
 
 void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                    BLOCK_SIZE bsize) {
+                                     BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
 }
 
 void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                    BLOCK_SIZE bsize, int plane) {
+                                     BLOCK_SIZE bsize, int plane) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, plane, plane);
 }
 
 void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize) {
+                                      BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
                                     MAX_MB_PLANE - 1);
 }
 
 void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                   BLOCK_SIZE bsize) {
+                                    BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
                                     MAX_MB_PLANE - 1);
 }
 
 void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
-                          const YV12_BUFFER_CONFIG *src,
-                          int mi_row, int mi_col) {
+                           const YV12_BUFFER_CONFIG *src, int mi_row,
+                           int mi_col) {
   uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
-      src->v_buffer};
+                                           src->v_buffer };
   const int strides[MAX_MB_PLANE] = { src->y_stride, src->uv_stride,
-      src->uv_stride};
+                                      src->uv_stride };
   int i;
 
   for (i = 0; i < MAX_MB_PLANE; ++i) {
@@ -248,15 +231,14 @@ void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
 }
 
 void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
-                          const YV12_BUFFER_CONFIG *src,
-                          int mi_row, int mi_col,
-                          const struct scale_factors *sf) {
+                           const YV12_BUFFER_CONFIG *src, int mi_row,
+                           int mi_col, const struct scale_factors *sf) {
   if (src != NULL) {
     int i;
     uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
-        src->v_buffer};
+                                             src->v_buffer };
     const int strides[MAX_MB_PLANE] = { src->y_stride, src->uv_stride,
-        src->uv_stride};
+                                        src->uv_stride };
     for (i = 0; i < MAX_MB_PLANE; ++i) {
       struct macroblockd_plane *const pd = &xd->plane[i];
       setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col,
diff --git a/vp10/common/reconinter.h b/vp10/common/reconinter.h
index 451ea08889a8a8b8810dcdff1445bc63b2fea1ca..6af16ae0eee68b1e1eb03517fec179309b5ae7e2 100644
--- a/vp10/common/reconinter.h
+++ b/vp10/common/reconinter.h
@@ -22,29 +22,23 @@ extern "C" {
 
 static INLINE void inter_predictor(const uint8_t *src, int src_stride,
                                    uint8_t *dst, int dst_stride,
-                                   const int subpel_x,
-                                   const int subpel_y,
-                                   const struct scale_factors *sf,
-                                   int w, int h, int ref,
-                                   const InterpKernel *kernel,
-                                   int xs, int ys) {
+                                   const int subpel_x, const int subpel_y,
+                                   const struct scale_factors *sf, int w, int h,
+                                   int ref, const InterpKernel *kernel, int xs,
+                                   int ys) {
   sf->predict[subpel_x != 0][subpel_y != 0][ref](
-      src, src_stride, dst, dst_stride,
-      kernel[subpel_x], xs, kernel[subpel_y], ys, w, h);
+      src, src_stride, dst, dst_stride, kernel[subpel_x], xs, kernel[subpel_y],
+      ys, w, h);
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static INLINE void high_inter_predictor(const uint8_t *src, int src_stride,
-                                        uint8_t *dst, int dst_stride,
-                                        const int subpel_x,
-                                        const int subpel_y,
-                                        const struct scale_factors *sf,
-                                        int w, int h, int ref,
-                                        const InterpKernel *kernel,
-                                        int xs, int ys, int bd) {
+static INLINE void high_inter_predictor(
+    const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
+    const int subpel_x, const int subpel_y, const struct scale_factors *sf,
+    int w, int h, int ref, const InterpKernel *kernel, int xs, int ys, int bd) {
   sf->highbd_predict[subpel_x != 0][subpel_y != 0][ref](
-      src, src_stride, dst, dst_stride,
-      kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd);
+      src, src_stride, dst, dst_stride, kernel[subpel_x], xs, kernel[subpel_y],
+      ys, w, h, bd);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
@@ -53,14 +47,14 @@ static INLINE int round_mv_comp_q4(int value) {
 }
 
 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) {
-  MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row +
-                              mi->bmi[1].as_mv[idx].as_mv.row +
-                              mi->bmi[2].as_mv[idx].as_mv.row +
-                              mi->bmi[3].as_mv[idx].as_mv.row),
-             round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col +
-                              mi->bmi[1].as_mv[idx].as_mv.col +
-                              mi->bmi[2].as_mv[idx].as_mv.col +
-                              mi->bmi[3].as_mv[idx].as_mv.col) };
+  MV res = {
+    round_mv_comp_q4(
+        mi->bmi[0].as_mv[idx].as_mv.row + mi->bmi[1].as_mv[idx].as_mv.row +
+        mi->bmi[2].as_mv[idx].as_mv.row + mi->bmi[3].as_mv[idx].as_mv.row),
+    round_mv_comp_q4(
+        mi->bmi[0].as_mv[idx].as_mv.col + mi->bmi[1].as_mv[idx].as_mv.col +
+        mi->bmi[2].as_mv[idx].as_mv.col + mi->bmi[3].as_mv[idx].as_mv.col)
+  };
   return res;
 }
 
@@ -78,8 +72,8 @@ static MV mi_mv_pred_q2(const MODE_INFO *mi, int idx, int block0, int block1) {
 
 // TODO(jkoleszar): yet another mv clamping function :-(
 static INLINE MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd,
-                                           const MV *src_mv,
-                                           int bw, int bh, int ss_x, int ss_y) {
+                                           const MV *src_mv, int bw, int bh,
+                                           int ss_x, int ss_y) {
   // If the MV points so far into the UMV border that no visible pixels
   // are used for reconstruction, the subpel part of the MV can be
   // discarded and the MV limited to 16 pixels with equivalent results.
@@ -87,15 +81,12 @@ static INLINE MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd,
   const int spel_right = spel_left - SUBPEL_SHIFTS;
   const int spel_top = (VPX_INTERP_EXTEND + bh) << SUBPEL_BITS;
   const int spel_bottom = spel_top - SUBPEL_SHIFTS;
-  MV clamped_mv = {
-    src_mv->row * (1 << (1 - ss_y)),
-    src_mv->col * (1 << (1 - ss_x))
-  };
+  MV clamped_mv = { src_mv->row * (1 << (1 - ss_y)),
+                    src_mv->col * (1 << (1 - ss_x)) };
   assert(ss_x <= 1);
   assert(ss_y <= 1);
 
-  clamp_mv(&clamped_mv,
-           xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left,
+  clamp_mv(&clamped_mv, xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left,
            xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right,
            xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top,
            xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom);
@@ -106,65 +97,48 @@ static INLINE MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd,
 static INLINE MV average_split_mvs(const struct macroblockd_plane *pd,
                                    const MODE_INFO *mi, int ref, int block) {
   const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0);
-  MV res = {0, 0};
+  MV res = { 0, 0 };
   switch (ss_idx) {
-    case 0:
-      res = mi->bmi[block].as_mv[ref].as_mv;
-      break;
-    case 1:
-      res = mi_mv_pred_q2(mi, ref, block, block + 2);
-      break;
-    case 2:
-      res = mi_mv_pred_q2(mi, ref, block, block + 1);
-      break;
-    case 3:
-      res = mi_mv_pred_q4(mi, ref);
-      break;
-    default:
-      assert(ss_idx <= 3 && ss_idx >= 0);
+    case 0: res = mi->bmi[block].as_mv[ref].as_mv; break;
+    case 1: res = mi_mv_pred_q2(mi, ref, block, block + 2); break;
+    case 2: res = mi_mv_pred_q2(mi, ref, block, block + 1); break;
+    case 3: res = mi_mv_pred_q4(mi, ref); break;
+    default: assert(ss_idx <= 3 && ss_idx >= 0);
   }
   return res;
 }
 
-void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
-                                   int bw, int bh,
-                                   int x, int y, int w, int h,
-                                   int mi_x, int mi_y);
+void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, int bw,
+                            int bh, int x, int y, int w, int h, int mi_x,
+                            int mi_y);
 
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane,
-                                       int i, int ir, int ic,
-                                       int mi_row, int mi_col);
+void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
+                                       int ir, int ic, int mi_row, int mi_col);
 
 void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                    BLOCK_SIZE bsize);
+                                     BLOCK_SIZE bsize);
 
 void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                    BLOCK_SIZE bsize, int plane);
+                                     BLOCK_SIZE bsize, int plane);
 
 void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                     BLOCK_SIZE bsize);
+                                      BLOCK_SIZE bsize);
 
 void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
-                                   BLOCK_SIZE bsize);
+                                    BLOCK_SIZE bsize);
 
 void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
-                               uint8_t *dst, int dst_stride,
-                               const MV *mv_q3,
-                               const struct scale_factors *sf,
-                               int w, int h, int do_avg,
-                               const InterpKernel *kernel,
-                               enum mv_precision precision,
-                               int x, int y);
+                                uint8_t *dst, int dst_stride, const MV *mv_q3,
+                                const struct scale_factors *sf, int w, int h,
+                                int do_avg, const InterpKernel *kernel,
+                                enum mv_precision precision, int x, int y);
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
-                                      uint8_t *dst, int dst_stride,
-                                      const MV *mv_q3,
-                                      const struct scale_factors *sf,
-                                      int w, int h, int do_avg,
-                                      const InterpKernel *kernel,
-                                      enum mv_precision precision,
-                                      int x, int y, int bd);
+void vp10_highbd_build_inter_predictor(
+    const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
+    const MV *mv_q3, const struct scale_factors *sf, int w, int h, int do_avg,
+    const InterpKernel *kernel, enum mv_precision precision, int x, int y,
+    int bd);
 #endif
 
 static INLINE int scaled_buffer_offset(int x_offset, int y_offset, int stride,
@@ -174,9 +148,8 @@ static INLINE int scaled_buffer_offset(int x_offset, int y_offset, int stride,
   return y * stride + x;
 }
 
-static INLINE void setup_pred_plane(struct buf_2d *dst,
-                                    uint8_t *src, int stride,
-                                    int mi_row, int mi_col,
+static INLINE void setup_pred_plane(struct buf_2d *dst, uint8_t *src,
+                                    int stride, int mi_row, int mi_col,
                                     const struct scale_factors *scale,
                                     int subsampling_x, int subsampling_y) {
   const int x = (MI_SIZE * mi_col) >> subsampling_x;
@@ -186,12 +159,12 @@ static INLINE void setup_pred_plane(struct buf_2d *dst,
 }
 
 void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
-                          const YV12_BUFFER_CONFIG *src,
-                          int mi_row, int mi_col);
+                           const YV12_BUFFER_CONFIG *src, int mi_row,
+                           int mi_col);
 
 void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
-                          const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
-                          const struct scale_factors *sf);
+                           const YV12_BUFFER_CONFIG *src, int mi_row,
+                           int mi_col, const struct scale_factors *sf);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/common/reconintra.c b/vp10/common/reconintra.c
index 0d5379430cdba337ef0043262a38dc1ee7c1ab2d..8ad3c635c1ca4fb4e2e87689de1e1f7327542cb9 100644
--- a/vp10/common/reconintra.c
+++ b/vp10/common/reconintra.c
@@ -50,16 +50,16 @@ enum {
 };
 
 static const uint8_t extend_modes[INTRA_MODES] = {
-  NEED_ABOVE | NEED_LEFT,       // DC
-  NEED_ABOVE,                   // V
-  NEED_LEFT,                    // H
-  NEED_ABOVERIGHT,              // D45
-  NEED_LEFT | NEED_ABOVE,       // D135
-  NEED_LEFT | NEED_ABOVE,       // D117
-  NEED_LEFT | NEED_ABOVE,       // D153
-  NEED_LEFT,                    // D207
-  NEED_ABOVERIGHT,              // D63
-  NEED_LEFT | NEED_ABOVE,       // TM
+  NEED_ABOVE | NEED_LEFT,  // DC
+  NEED_ABOVE,              // V
+  NEED_LEFT,               // H
+  NEED_ABOVERIGHT,         // D45
+  NEED_LEFT | NEED_ABOVE,  // D135
+  NEED_LEFT | NEED_ABOVE,  // D117
+  NEED_LEFT | NEED_ABOVE,  // D153
+  NEED_LEFT,               // D207
+  NEED_ABOVERIGHT,         // D63
+  NEED_LEFT | NEED_ABOVE,  // TM
 };
 #endif
 
@@ -68,61 +68,40 @@ static const uint8_t orders_64x64[1] = { 0 };
 static const uint8_t orders_64x32[2] = { 0, 1 };
 static const uint8_t orders_32x64[2] = { 0, 1 };
 static const uint8_t orders_32x32[4] = {
-  0, 1,
-  2, 3,
+  0, 1, 2, 3,
 };
 static const uint8_t orders_32x16[8] = {
-  0, 2,
-  1, 3,
-  4, 6,
-  5, 7,
+  0, 2, 1, 3, 4, 6, 5, 7,
 };
 static const uint8_t orders_16x32[8] = {
-  0, 1, 2, 3,
-  4, 5, 6, 7,
+  0, 1, 2, 3, 4, 5, 6, 7,
 };
 static const uint8_t orders_16x16[16] = {
-  0,   1,  4,  5,
-  2,   3,  6,  7,
-  8,   9, 12, 13,
-  10, 11, 14, 15,
+  0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15,
 };
 static const uint8_t orders_16x8[32] = {
-  0,   2,  8, 10,
-  1,   3,  9, 11,
-  4,   6, 12, 14,
-  5,   7, 13, 15,
-  16, 18, 24, 26,
-  17, 19, 25, 27,
-  20, 22, 28, 30,
-  21, 23, 29, 31,
+  0,  2,  8,  10, 1,  3,  9,  11, 4,  6,  12, 14, 5,  7,  13, 15,
+  16, 18, 24, 26, 17, 19, 25, 27, 20, 22, 28, 30, 21, 23, 29, 31,
 };
 static const uint8_t orders_8x16[32] = {
-  0,   1,  2,  3,  8,  9, 10, 11,
-  4,   5,  6,  7, 12, 13, 14, 15,
-  16, 17, 18, 19, 24, 25, 26, 27,
-  20, 21, 22, 23, 28, 29, 30, 31,
+  0,  1,  2,  3,  8,  9,  10, 11, 4,  5,  6,  7,  12, 13, 14, 15,
+  16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31,
 };
 static const uint8_t orders_8x8[64] = {
-  0,   1,  4,  5, 16, 17, 20, 21,
-  2,   3,  6,  7, 18, 19, 22, 23,
-  8,   9, 12, 13, 24, 25, 28, 29,
-  10, 11, 14, 15, 26, 27, 30, 31,
-  32, 33, 36, 37, 48, 49, 52, 53,
-  34, 35, 38, 39, 50, 51, 54, 55,
-  40, 41, 44, 45, 56, 57, 60, 61,
-  42, 43, 46, 47, 58, 59, 62, 63,
+  0,  1,  4,  5,  16, 17, 20, 21, 2,  3,  6,  7,  18, 19, 22, 23,
+  8,  9,  12, 13, 24, 25, 28, 29, 10, 11, 14, 15, 26, 27, 30, 31,
+  32, 33, 36, 37, 48, 49, 52, 53, 34, 35, 38, 39, 50, 51, 54, 55,
+  40, 41, 44, 45, 56, 57, 60, 61, 42, 43, 46, 47, 58, 59, 62, 63,
 };
 static const uint8_t *const orders[BLOCK_SIZES] = {
-  orders_8x8, orders_8x8, orders_8x8, orders_8x8,
-  orders_8x16, orders_16x8, orders_16x16,
-  orders_16x32, orders_32x16, orders_32x32,
+  orders_8x8,   orders_8x8,   orders_8x8,   orders_8x8,   orders_8x16,
+  orders_16x8,  orders_16x16, orders_16x32, orders_32x16, orders_32x32,
   orders_32x64, orders_64x32, orders_64x64,
 };
 
 static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
-                          int right_available,
-                          TX_SIZE txsz, int y, int x, int ss_x) {
+                          int right_available, TX_SIZE txsz, int y, int x,
+                          int ss_x) {
   if (y == 0) {
     int wl = mi_width_log2_lookup[bsize];
     int hl = mi_height_log2_lookup[bsize];
@@ -131,17 +110,14 @@ static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
     const uint8_t *order = orders[bsize];
     int my_order, tr_order;
 
-    if (x + step < w)
-      return 1;
+    if (x + step < w) return 1;
 
     mi_row = (mi_row & 7) >> hl;
     mi_col = (mi_col & 7) >> wl;
 
-    if (mi_row == 0)
-      return right_available;
+    if (mi_row == 0) return right_available;
 
-    if (((mi_col + 1) << wl) >= 8)
-      return 0;
+    if (((mi_col + 1) << wl) >= 8) return 0;
 
     my_order = order[((mi_row + 0) << (3 - wl)) + mi_col + 0];
     tr_order = order[((mi_row - 1) << (3 - wl)) + mi_col + 1];
@@ -157,8 +133,8 @@ static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
 }
 
 static int vp10_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
-                           int bottom_available, TX_SIZE txsz,
-                           int y, int x, int ss_y) {
+                           int bottom_available, TX_SIZE txsz, int y, int x,
+                           int ss_y) {
   if (x == 0) {
     int wl = mi_width_log2_lookup[bsize];
     int hl = mi_height_log2_lookup[bsize];
@@ -174,11 +150,9 @@ static int vp10_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
       return bottom_available &&
              (mi_row << (hl + !ss_y)) + y + step < (8 << !ss_y);
 
-    if (((mi_row + 1) << hl) >= 8)
-      return 0;
+    if (((mi_row + 1) << hl) >= 8) return 0;
 
-    if (y + step < h)
-      return 1;
+    if (y + step < h) return 1;
 
     my_order = order[((mi_row + 0) << (3 - wl)) + mi_col + 0];
     bl_order = order[((mi_row + 1) << (3 - wl)) + mi_col - 1];
@@ -205,12 +179,12 @@ static intra_high_pred_fn dc_pred_high[2][2][4];
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 static void vp10_init_intra_predictors_internal(void) {
-#define INIT_NO_4X4(p, type) \
-  p[TX_8X8] = vpx_##type##_predictor_8x8; \
+#define INIT_NO_4X4(p, type)                  \
+  p[TX_8X8] = vpx_##type##_predictor_8x8;     \
   p[TX_16X16] = vpx_##type##_predictor_16x16; \
   p[TX_32X32] = vpx_##type##_predictor_32x32
 
-#define INIT_ALL_SIZES(p, type) \
+#define INIT_ALL_SIZES(p, type)           \
   p[TX_4X4] = vpx_##type##_predictor_4x4; \
   INIT_NO_4X4(p, type)
 
@@ -263,29 +237,23 @@ static void vp10_init_intra_predictors_internal(void) {
 
 #if CONFIG_MISC_FIXES
 static INLINE void memset16(uint16_t *dst, int val, int n) {
-  while (n--)
-    *dst++ = val;
+  while (n--) *dst++ = val;
 }
 #endif
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static void build_intra_predictors_high(const MACROBLOCKD *xd,
-                                        const uint8_t *ref8,
-                                        int ref_stride,
-                                        uint8_t *dst8,
-                                        int dst_stride,
-                                        PREDICTION_MODE mode,
-                                        TX_SIZE tx_size,
+                                        const uint8_t *ref8, int ref_stride,
+                                        uint8_t *dst8, int dst_stride,
+                                        PREDICTION_MODE mode, TX_SIZE tx_size,
 #if CONFIG_MISC_FIXES
                                         int n_top_px, int n_topright_px,
                                         int n_left_px, int n_bottomleft_px,
 #else
-                                        int up_available,
-                                        int left_available,
+                                        int up_available, int left_available,
                                         int right_available,
 #endif
-                                        int x, int y,
-                                        int plane, int bd) {
+                                        int x, int y, int plane, int bd) {
   int i;
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
@@ -309,27 +277,26 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd,
   const int need_above = extend_modes[mode] & NEED_ABOVE;
   const int need_aboveright = extend_modes[mode] & NEED_ABOVERIGHT;
   int base = 128 << (bd - 8);
-  // 127 127 127 .. 127 127 127 127 127 127
-  // 129  A   B  ..  Y   Z
-  // 129  C   D  ..  W   X
-  // 129  E   F  ..  U   V
-  // 129  G   H  ..  S   T   T   T   T   T
+// 127 127 127 .. 127 127 127 127 127 127
+// 129  A   B  ..  Y   Z
+// 129  C   D  ..  W   X
+// 129  E   F  ..  U   V
+// 129  G   H  ..  S   T   T   T   T   T
 
 #if CONFIG_MISC_FIXES
-  (void) x;
-  (void) y;
-  (void) plane;
-  (void) need_left;
-  (void) need_above;
-  (void) need_aboveright;
+  (void)x;
+  (void)y;
+  (void)plane;
+  (void)need_left;
+  (void)need_above;
+  (void)need_aboveright;
 
   // NEED_LEFT
   if (extend_modes[mode] & NEED_LEFT) {
     const int need_bottom = !!(extend_modes[mode] & NEED_BOTTOMLEFT);
     i = 0;
     if (n_left_px > 0) {
-      for (; i < n_left_px; i++)
-        left_col[i] = ref[i * ref_stride - 1];
+      for (; i < n_left_px; i++) left_col[i] = ref[i * ref_stride - 1];
       if (need_bottom && n_bottomleft_px > 0) {
         assert(i == bs);
         for (; i < bs + n_bottomleft_px; i++)
@@ -361,8 +328,8 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd,
   }
 
   if (extend_modes[mode] & NEED_ABOVELEFT) {
-    above_row[-1] = n_top_px > 0 ?
-        (n_left_px > 0 ? above_ref[-1] : base + 1) : base - 1;
+    above_row[-1] =
+        n_top_px > 0 ? (n_left_px > 0 ? above_ref[-1] : base + 1) : base - 1;
   }
 #else
   // Get current frame pointer, width and height.
@@ -384,8 +351,7 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd,
       if (xd->mb_to_bottom_edge < 0) {
         /* slower path if the block needs border extension */
         if (y0 + bs <= frame_height) {
-          for (i = 0; i < bs; ++i)
-            left_col[i] = ref[i * ref_stride - 1];
+          for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1];
         } else {
           const int extend_bottom = frame_height - y0;
           for (i = 0; i < extend_bottom; ++i)
@@ -395,8 +361,7 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd,
         }
       } else {
         /* faster path if the block does not need extension */
-        for (i = 0; i < bs; ++i)
-          left_col[i] = ref[i * ref_stride - 1];
+        for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1];
       }
     } else {
       // TODO(Peter): this value should probably change for high bitdepth
@@ -488,13 +453,11 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd,
   // predict
   if (mode == DC_PRED) {
 #if CONFIG_MISC_FIXES
-    dc_pred_high[n_left_px > 0][n_top_px > 0][tx_size](dst, dst_stride,
-                                                       const_above_row,
-                                                       left_col, xd->bd);
+    dc_pred_high[n_left_px > 0][n_top_px > 0][tx_size](
+        dst, dst_stride, const_above_row, left_col, xd->bd);
 #else
-    dc_pred_high[left_available][up_available][tx_size](dst, dst_stride,
-                                                        const_above_row,
-                                                        left_col, xd->bd);
+    dc_pred_high[left_available][up_available][tx_size](
+        dst, dst_stride, const_above_row, left_col, xd->bd);
 #endif
   } else {
     pred_high[mode][tx_size](dst, dst_stride, const_above_row, left_col,
@@ -529,18 +492,18 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
   const uint8_t *const_above_row = above_row;
   const int bs = 4 << tx_size;
 
-  // 127 127 127 .. 127 127 127 127 127 127
-  // 129  A   B  ..  Y   Z
-  // 129  C   D  ..  W   X
-  // 129  E   F  ..  U   V
-  // 129  G   H  ..  S   T   T   T   T   T
-  // ..
+// 127 127 127 .. 127 127 127 127 127 127
+// 129  A   B  ..  Y   Z
+// 129  C   D  ..  W   X
+// 129  E   F  ..  U   V
+// 129  G   H  ..  S   T   T   T   T   T
+// ..
 
 #if CONFIG_MISC_FIXES
-  (void) xd;
-  (void) x;
-  (void) y;
-  (void) plane;
+  (void)xd;
+  (void)x;
+  (void)y;
+  (void)plane;
   assert(n_top_px >= 0);
   assert(n_topright_px >= 0);
   assert(n_left_px >= 0);
@@ -566,8 +529,7 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
     const int need_bottom = !!(extend_modes[mode] & NEED_BOTTOMLEFT);
     i = 0;
     if (n_left_px > 0) {
-      for (; i < n_left_px; i++)
-        left_col[i] = ref[i * ref_stride - 1];
+      for (; i < n_left_px; i++) left_col[i] = ref[i * ref_stride - 1];
       if (need_bottom && n_bottomleft_px > 0) {
         assert(i == bs);
         for (; i < bs + n_bottomleft_px; i++)
@@ -583,8 +545,7 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
       if (xd->mb_to_bottom_edge < 0) {
         /* slower path if the block needs border extension */
         if (y0 + bs <= frame_height) {
-          for (i = 0; i < bs; ++i)
-            left_col[i] = ref[i * ref_stride - 1];
+          for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1];
         } else {
           const int extend_bottom = frame_height - y0;
           for (i = 0; i < extend_bottom; ++i)
@@ -594,8 +555,7 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
         }
       } else {
         /* faster path if the block does not need extension */
-        for (i = 0; i < bs; ++i)
-          left_col[i] = ref[i * ref_stride - 1];
+        for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1];
       }
     } else {
       memset(left_col, 129, bs);
@@ -715,10 +675,9 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
 }
 
 void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
-                             TX_SIZE tx_size, PREDICTION_MODE mode,
-                             const uint8_t *ref, int ref_stride,
-                             uint8_t *dst, int dst_stride,
-                             int aoff, int loff, int plane) {
+                              TX_SIZE tx_size, PREDICTION_MODE mode,
+                              const uint8_t *ref, int ref_stride, uint8_t *dst,
+                              int dst_stride, int aoff, int loff, int plane) {
   const int txw = (1 << tx_size);
   const int have_top = loff || xd->up_available;
   const int have_left = aoff || xd->left_available;
@@ -733,14 +692,11 @@ void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
   const struct macroblockd_plane *const pd = &xd->plane[plane];
   const int right_available =
       mi_col + (bw >> !pd->subsampling_x) < xd->tile.mi_col_end;
-  const int have_right = vp10_has_right(bsize, mi_row, mi_col,
-                                        right_available,
-                                        tx_size, loff, aoff,
-                                        pd->subsampling_x);
-  const int have_bottom = vp10_has_bottom(bsize, mi_row, mi_col,
-                                          xd->mb_to_bottom_edge > 0,
-                                          tx_size, loff, aoff,
-                                          pd->subsampling_y);
+  const int have_right = vp10_has_right(bsize, mi_row, mi_col, right_available,
+                                        tx_size, loff, aoff, pd->subsampling_x);
+  const int have_bottom =
+      vp10_has_bottom(bsize, mi_row, mi_col, xd->mb_to_bottom_edge > 0, tx_size,
+                      loff, aoff, pd->subsampling_y);
   const int wpx = 4 * bw;
   const int hpx = 4 * bh;
   const int txpx = 4 * txw;
@@ -757,8 +713,7 @@ void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode,
-                                tx_size,
-                                have_top ? VPXMIN(txpx, xr + txpx) : 0,
+                                tx_size, have_top ? VPXMIN(txpx, xr + txpx) : 0,
                                 have_top && have_right ? VPXMIN(txpx, xr) : 0,
                                 have_left ? VPXMIN(txpx, yd + txpx) : 0,
                                 have_bottom && have_left ? VPXMIN(txpx, yd) : 0,
@@ -766,20 +721,19 @@ void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
     return;
   }
 #endif
-  build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode,
-                         tx_size,
+  build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
                          have_top ? VPXMIN(txpx, xr + txpx) : 0,
                          have_top && have_right ? VPXMIN(txpx, xr) : 0,
                          have_left ? VPXMIN(txpx, yd + txpx) : 0,
-                         have_bottom && have_left ? VPXMIN(txpx, yd) : 0,
-                         x, y, plane);
+                         have_bottom && have_left ? VPXMIN(txpx, yd) : 0, x, y,
+                         plane);
 #else  // CONFIG_MISC_FIXES
-  (void) bhl_in;
+  (void)bhl_in;
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode,
-                                tx_size, have_top, have_left, have_right,
-                                x, y, plane, xd->bd);
+                                tx_size, have_top, have_left, have_right, x, y,
+                                plane, xd->bd);
     return;
   }
 #endif
diff --git a/vp10/common/reconintra.h b/vp10/common/reconintra.h
index f451fb8f7008bf39e324c96d832f7c8c3980c502..a38f05895308d1661dc20e56906f7c772982f34d 100644
--- a/vp10/common/reconintra.h
+++ b/vp10/common/reconintra.h
@@ -21,10 +21,9 @@ extern "C" {
 void vp10_init_intra_predictors(void);
 
 void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
-                             TX_SIZE tx_size, PREDICTION_MODE mode,
-                             const uint8_t *ref, int ref_stride,
-                             uint8_t *dst, int dst_stride,
-                             int aoff, int loff, int plane);
+                              TX_SIZE tx_size, PREDICTION_MODE mode,
+                              const uint8_t *ref, int ref_stride, uint8_t *dst,
+                              int dst_stride, int aoff, int loff, int plane);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/vp10/common/scale.c b/vp10/common/scale.c
index c4287bf8baa97f16f723d23a4f0fe0dc6e03343d..d654b4a292b1888785cb25c43157d8147587ff27 100644
--- a/vp10/common/scale.c
+++ b/vp10/common/scale.c
@@ -22,7 +22,7 @@ static INLINE int scaled_y(int val, const struct scale_factors *sf) {
 }
 
 static int unscaled_value(int val, const struct scale_factors *sf) {
-  (void) sf;
+  (void)sf;
   return val;
 }
 
@@ -37,22 +37,18 @@ static int get_fixed_point_scale_factor(int other_size, int this_size) {
 MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
   const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK;
   const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK;
-  const MV32 res = {
-    scaled_y(mv->row, sf) + y_off_q4,
-    scaled_x(mv->col, sf) + x_off_q4
-  };
+  const MV32 res = { scaled_y(mv->row, sf) + y_off_q4,
+                     scaled_x(mv->col, sf) + x_off_q4 };
   return res;
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf,
-                                       int other_w, int other_h,
-                                       int this_w, int this_h,
-                                       int use_highbd) {
+void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                        int other_h, int this_w, int this_h,
+                                        int use_highbd) {
 #else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf,
-                                       int other_w, int other_h,
-                                       int this_w, int this_h) {
+void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                        int other_h, int this_w, int this_h) {
 #endif
   if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) {
     sf->x_scale_fp = REF_INVALID_SCALE;
diff --git a/vp10/common/scale.h b/vp10/common/scale.h
index 709b5ad74f616c27282093039160882941d894d6..70e301b3e67ac856dd271958a390c3f13b9fde64 100644
--- a/vp10/common/scale.h
+++ b/vp10/common/scale.h
@@ -23,8 +23,8 @@ extern "C" {
 #define REF_INVALID_SCALE -1
 
 struct scale_factors {
-  int x_scale_fp;   // horizontal fixed point scale factor
-  int y_scale_fp;   // vertical fixed point scale factor
+  int x_scale_fp;  // horizontal fixed point scale factor
+  int y_scale_fp;  // vertical fixed point scale factor
   int x_step_q4;
   int y_step_q4;
 
@@ -40,14 +40,12 @@ struct scale_factors {
 MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf,
-                                       int other_w, int other_h,
-                                       int this_w, int this_h,
-                                       int use_high);
+void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                        int other_h, int this_w, int this_h,
+                                        int use_high);
 #else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf,
-                                       int other_w, int other_h,
-                                       int this_w, int this_h);
+void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+                                        int other_h, int this_w, int this_h);
 #endif
 
 static INLINE int vp10_is_valid_scale(const struct scale_factors *sf) {
@@ -61,11 +59,9 @@ static INLINE int vp10_is_scaled(const struct scale_factors *sf) {
 }
 
 static INLINE int valid_ref_frame_size(int ref_width, int ref_height,
-                                      int this_width, int this_height) {
-  return 2 * this_width >= ref_width &&
-         2 * this_height >= ref_height &&
-         this_width <= 16 * ref_width &&
-         this_height <= 16 * ref_height;
+                                       int this_width, int this_height) {
+  return 2 * this_width >= ref_width && 2 * this_height >= ref_height &&
+         this_width <= 16 * ref_width && this_height <= 16 * ref_height;
 }
 
 #ifdef __cplusplus
diff --git a/vp10/common/scan.c b/vp10/common/scan.c
index 7217f6d045926ae85c87bccf785506078e5363eb..8edeea8f1363b6b2b5995c6b37a6e0caf2476d93 100644
--- a/vp10/common/scan.c
+++ b/vp10/common/scan.c
@@ -13,220 +13,181 @@
 #include "vp10/common/scan.h"
 
 DECLARE_ALIGNED(16, static const int16_t, default_scan_4x4[16]) = {
-  0,  4,  1,  5,
-  8,  2, 12,  9,
-  3,  6, 13, 10,
-  7, 14, 11, 15,
+  0, 4, 1, 5, 8, 2, 12, 9, 3, 6, 13, 10, 7, 14, 11, 15,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, col_scan_4x4[16]) = {
-  0,  4,  8,  1,
-  12,  5,  9,  2,
-  13,  6, 10,  3,
-  7, 14, 11, 15,
+  0, 4, 8, 1, 12, 5, 9, 2, 13, 6, 10, 3, 7, 14, 11, 15,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, row_scan_4x4[16]) = {
-  0,  1,  4,  2,
-  5,  3,  6,  8,
-  9,  7, 12, 10,
-  13, 11, 14, 15,
+  0, 1, 4, 2, 5, 3, 6, 8, 9, 7, 12, 10, 13, 11, 14, 15,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, default_scan_8x8[64]) = {
-  0,  8,  1, 16,  9,  2, 17, 24,
-  10,  3, 18, 25, 32, 11,  4, 26,
-  33, 19, 40, 12, 34, 27,  5, 41,
-  20, 48, 13, 35, 42, 28, 21,  6,
-  49, 56, 36, 43, 29,  7, 14, 50,
-  57, 44, 22, 37, 15, 51, 58, 30,
-  45, 23, 52, 59, 38, 31, 60, 53,
-  46, 39, 61, 54, 47, 62, 55, 63,
+  0,  8,  1,  16, 9,  2,  17, 24, 10, 3,  18, 25, 32, 11, 4,  26,
+  33, 19, 40, 12, 34, 27, 5,  41, 20, 48, 13, 35, 42, 28, 21, 6,
+  49, 56, 36, 43, 29, 7,  14, 50, 57, 44, 22, 37, 15, 51, 58, 30,
+  45, 23, 52, 59, 38, 31, 60, 53, 46, 39, 61, 54, 47, 62, 55, 63,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, col_scan_8x8[64]) = {
-  0,  8, 16,  1, 24,  9, 32, 17,
-  2, 40, 25, 10, 33, 18, 48,  3,
-  26, 41, 11, 56, 19, 34,  4, 49,
-  27, 42, 12, 35, 20, 57, 50, 28,
-  5, 43, 13, 36, 58, 51, 21, 44,
-  6, 29, 59, 37, 14, 52, 22,  7,
-  45, 60, 30, 15, 38, 53, 23, 46,
-  31, 61, 39, 54, 47, 62, 55, 63,
+  0,  8,  16, 1,  24, 9,  32, 17, 2,  40, 25, 10, 33, 18, 48, 3,
+  26, 41, 11, 56, 19, 34, 4,  49, 27, 42, 12, 35, 20, 57, 50, 28,
+  5,  43, 13, 36, 58, 51, 21, 44, 6,  29, 59, 37, 14, 52, 22, 7,
+  45, 60, 30, 15, 38, 53, 23, 46, 31, 61, 39, 54, 47, 62, 55, 63,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, row_scan_8x8[64]) = {
-  0,  1,  2,  8,  9,  3, 16, 10,
-  4, 17, 11, 24,  5, 18, 25, 12,
-  19, 26, 32,  6, 13, 20, 33, 27,
-  7, 34, 40, 21, 28, 41, 14, 35,
-  48, 42, 29, 36, 49, 22, 43, 15,
-  56, 37, 50, 44, 30, 57, 23, 51,
-  58, 45, 38, 52, 31, 59, 53, 46,
-  60, 39, 61, 47, 54, 55, 62, 63,
+  0,  1,  2,  8,  9,  3,  16, 10, 4,  17, 11, 24, 5,  18, 25, 12,
+  19, 26, 32, 6,  13, 20, 33, 27, 7,  34, 40, 21, 28, 41, 14, 35,
+  48, 42, 29, 36, 49, 22, 43, 15, 56, 37, 50, 44, 30, 57, 23, 51,
+  58, 45, 38, 52, 31, 59, 53, 46, 60, 39, 61, 47, 54, 55, 62, 63,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, default_scan_16x16[256]) = {
-  0, 16, 1, 32, 17, 2, 48, 33, 18, 3, 64, 34, 49, 19, 65, 80,
-  50, 4, 35, 66, 20, 81, 96, 51, 5, 36, 82, 97, 67, 112, 21, 52,
-  98, 37, 83, 113, 6, 68, 128, 53, 22, 99, 114, 84, 7, 129, 38, 69,
-  100, 115, 144, 130, 85, 54, 23, 8, 145, 39, 70, 116, 101, 131, 160, 146,
-  55, 86, 24, 71, 132, 117, 161, 40, 9, 102, 147, 176, 162, 87, 56, 25,
-  133, 118, 177, 148, 72, 103, 41, 163, 10, 192, 178, 88, 57, 134, 149, 119,
-  26, 164, 73, 104, 193, 42, 179, 208, 11, 135, 89, 165, 120, 150, 58, 194,
-  180, 27, 74, 209, 105, 151, 136, 43, 90, 224, 166, 195, 181, 121, 210, 59,
-  12, 152, 106, 167, 196, 75, 137, 225, 211, 240, 182, 122, 91, 28, 197, 13,
-  226, 168, 183, 153, 44, 212, 138, 107, 241, 60, 29, 123, 198, 184, 227, 169,
-  242, 76, 213, 154, 45, 92, 14, 199, 139, 61, 228, 214, 170, 185, 243, 108,
-  77, 155, 30, 15, 200, 229, 124, 215, 244, 93, 46, 186, 171, 201, 109, 140,
-  230, 62, 216, 245, 31, 125, 78, 156, 231, 47, 187, 202, 217, 94, 246, 141,
-  63, 232, 172, 110, 247, 157, 79, 218, 203, 126, 233, 188, 248, 95, 173, 142,
-  219, 111, 249, 234, 158, 127, 189, 204, 250, 235, 143, 174, 220, 205, 159,
-  251,
+  0,   16,  1,   32,  17,  2,   48,  33,  18,  3,   64,  34,  49,  19,  65,
+  80,  50,  4,   35,  66,  20,  81,  96,  51,  5,   36,  82,  97,  67,  112,
+  21,  52,  98,  37,  83,  113, 6,   68,  128, 53,  22,  99,  114, 84,  7,
+  129, 38,  69,  100, 115, 144, 130, 85,  54,  23,  8,   145, 39,  70,  116,
+  101, 131, 160, 146, 55,  86,  24,  71,  132, 117, 161, 40,  9,   102, 147,
+  176, 162, 87,  56,  25,  133, 118, 177, 148, 72,  103, 41,  163, 10,  192,
+  178, 88,  57,  134, 149, 119, 26,  164, 73,  104, 193, 42,  179, 208, 11,
+  135, 89,  165, 120, 150, 58,  194, 180, 27,  74,  209, 105, 151, 136, 43,
+  90,  224, 166, 195, 181, 121, 210, 59,  12,  152, 106, 167, 196, 75,  137,
+  225, 211, 240, 182, 122, 91,  28,  197, 13,  226, 168, 183, 153, 44,  212,
+  138, 107, 241, 60,  29,  123, 198, 184, 227, 169, 242, 76,  213, 154, 45,
+  92,  14,  199, 139, 61,  228, 214, 170, 185, 243, 108, 77,  155, 30,  15,
+  200, 229, 124, 215, 244, 93,  46,  186, 171, 201, 109, 140, 230, 62,  216,
+  245, 31,  125, 78,  156, 231, 47,  187, 202, 217, 94,  246, 141, 63,  232,
+  172, 110, 247, 157, 79,  218, 203, 126, 233, 188, 248, 95,  173, 142, 219,
+  111, 249, 234, 158, 127, 189, 204, 250, 235, 143, 174, 220, 205, 159, 251,
   190, 221, 175, 236, 237, 191, 206, 252, 222, 253, 207, 238, 223, 254, 239,
   255,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, col_scan_16x16[256]) = {
-  0, 16, 32, 48, 1, 64, 17, 80, 33, 96, 49, 2, 65, 112, 18, 81,
-  34, 128, 50, 97, 3, 66, 144, 19, 113, 35, 82, 160, 98, 51, 129, 4,
-  67, 176, 20, 114, 145, 83, 36, 99, 130, 52, 192, 5, 161, 68, 115, 21,
-  146, 84, 208, 177, 37, 131, 100, 53, 162, 224, 69, 6, 116, 193, 147, 85,
-  22, 240, 132, 38, 178, 101, 163, 54, 209, 117, 70, 7, 148, 194, 86, 179,
-  225, 23, 133, 39, 164, 8, 102, 210, 241, 55, 195, 118, 149, 71, 180, 24,
-  87, 226, 134, 165, 211, 40, 103, 56, 72, 150, 196, 242, 119, 9, 181, 227,
-  88, 166, 25, 135, 41, 104, 212, 57, 151, 197, 120, 73, 243, 182, 136, 167,
-  213, 89, 10, 228, 105, 152, 198, 26, 42, 121, 183, 244, 168, 58, 137, 229,
-  74, 214, 90, 153, 199, 184, 11, 106, 245, 27, 122, 230, 169, 43, 215, 59,
-  200, 138, 185, 246, 75, 12, 91, 154, 216, 231, 107, 28, 44, 201, 123, 170,
-  60, 247, 232, 76, 139, 13, 92, 217, 186, 248, 155, 108, 29, 124, 45, 202,
-  233, 171, 61, 14, 77, 140, 15, 249, 93, 30, 187, 156, 218, 46, 109, 125,
-  62, 172, 78, 203, 31, 141, 234, 94, 47, 188, 63, 157, 110, 250, 219, 79,
-  126, 204, 173, 142, 95, 189, 111, 235, 158, 220, 251, 127, 174, 143, 205,
-  236,
+  0,   16,  32,  48,  1,   64,  17,  80,  33,  96,  49,  2,   65,  112, 18,
+  81,  34,  128, 50,  97,  3,   66,  144, 19,  113, 35,  82,  160, 98,  51,
+  129, 4,   67,  176, 20,  114, 145, 83,  36,  99,  130, 52,  192, 5,   161,
+  68,  115, 21,  146, 84,  208, 177, 37,  131, 100, 53,  162, 224, 69,  6,
+  116, 193, 147, 85,  22,  240, 132, 38,  178, 101, 163, 54,  209, 117, 70,
+  7,   148, 194, 86,  179, 225, 23,  133, 39,  164, 8,   102, 210, 241, 55,
+  195, 118, 149, 71,  180, 24,  87,  226, 134, 165, 211, 40,  103, 56,  72,
+  150, 196, 242, 119, 9,   181, 227, 88,  166, 25,  135, 41,  104, 212, 57,
+  151, 197, 120, 73,  243, 182, 136, 167, 213, 89,  10,  228, 105, 152, 198,
+  26,  42,  121, 183, 244, 168, 58,  137, 229, 74,  214, 90,  153, 199, 184,
+  11,  106, 245, 27,  122, 230, 169, 43,  215, 59,  200, 138, 185, 246, 75,
+  12,  91,  154, 216, 231, 107, 28,  44,  201, 123, 170, 60,  247, 232, 76,
+  139, 13,  92,  217, 186, 248, 155, 108, 29,  124, 45,  202, 233, 171, 61,
+  14,  77,  140, 15,  249, 93,  30,  187, 156, 218, 46,  109, 125, 62,  172,
+  78,  203, 31,  141, 234, 94,  47,  188, 63,  157, 110, 250, 219, 79,  126,
+  204, 173, 142, 95,  189, 111, 235, 158, 220, 251, 127, 174, 143, 205, 236,
   159, 190, 221, 252, 175, 206, 237, 191, 253, 222, 238, 207, 254, 223, 239,
   255,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, row_scan_16x16[256]) = {
-  0, 1, 2, 16, 3, 17, 4, 18, 32, 5, 33, 19, 6, 34, 48, 20,
-  49, 7, 35, 21, 50, 64, 8, 36, 65, 22, 51, 37, 80, 9, 66, 52,
-  23, 38, 81, 67, 10, 53, 24, 82, 68, 96, 39, 11, 54, 83, 97, 69,
-  25, 98, 84, 40, 112, 55, 12, 70, 99, 113, 85, 26, 41, 56, 114, 100,
-  13, 71, 128, 86, 27, 115, 101, 129, 42, 57, 72, 116, 14, 87, 130, 102,
-  144, 73, 131, 117, 28, 58, 15, 88, 43, 145, 103, 132, 146, 118, 74, 160,
-  89, 133, 104, 29, 59, 147, 119, 44, 161, 148, 90, 105, 134, 162, 120, 176,
-  75, 135, 149, 30, 60, 163, 177, 45, 121, 91, 106, 164, 178, 150, 192, 136,
-  165, 179, 31, 151, 193, 76, 122, 61, 137, 194, 107, 152, 180, 208, 46, 166,
-  167, 195, 92, 181, 138, 209, 123, 153, 224, 196, 77, 168, 210, 182, 240, 108,
-  197, 62, 154, 225, 183, 169, 211, 47, 139, 93, 184, 226, 212, 241, 198, 170,
-  124, 155, 199, 78, 213, 185, 109, 227, 200, 63, 228, 242, 140, 214, 171, 186,
-  156, 229, 243, 125, 94, 201, 244, 215, 216, 230, 141, 187, 202, 79, 172, 110,
-  157, 245, 217, 231, 95, 246, 232, 126, 203, 247, 233, 173, 218, 142, 111,
-  158,
-  188, 248, 127, 234, 219, 249, 189, 204, 143, 174, 159, 250, 235, 205, 220,
-  175,
+  0,   1,   2,   16,  3,   17,  4,   18,  32,  5,   33,  19,  6,   34,  48,
+  20,  49,  7,   35,  21,  50,  64,  8,   36,  65,  22,  51,  37,  80,  9,
+  66,  52,  23,  38,  81,  67,  10,  53,  24,  82,  68,  96,  39,  11,  54,
+  83,  97,  69,  25,  98,  84,  40,  112, 55,  12,  70,  99,  113, 85,  26,
+  41,  56,  114, 100, 13,  71,  128, 86,  27,  115, 101, 129, 42,  57,  72,
+  116, 14,  87,  130, 102, 144, 73,  131, 117, 28,  58,  15,  88,  43,  145,
+  103, 132, 146, 118, 74,  160, 89,  133, 104, 29,  59,  147, 119, 44,  161,
+  148, 90,  105, 134, 162, 120, 176, 75,  135, 149, 30,  60,  163, 177, 45,
+  121, 91,  106, 164, 178, 150, 192, 136, 165, 179, 31,  151, 193, 76,  122,
+  61,  137, 194, 107, 152, 180, 208, 46,  166, 167, 195, 92,  181, 138, 209,
+  123, 153, 224, 196, 77,  168, 210, 182, 240, 108, 197, 62,  154, 225, 183,
+  169, 211, 47,  139, 93,  184, 226, 212, 241, 198, 170, 124, 155, 199, 78,
+  213, 185, 109, 227, 200, 63,  228, 242, 140, 214, 171, 186, 156, 229, 243,
+  125, 94,  201, 244, 215, 216, 230, 141, 187, 202, 79,  172, 110, 157, 245,
+  217, 231, 95,  246, 232, 126, 203, 247, 233, 173, 218, 142, 111, 158, 188,
+  248, 127, 234, 219, 249, 189, 204, 143, 174, 159, 250, 235, 205, 220, 175,
   190, 251, 221, 191, 206, 236, 207, 237, 252, 222, 253, 223, 238, 239, 254,
   255,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, default_scan_32x32[1024]) = {
-  0, 32, 1, 64, 33, 2, 96, 65, 34, 128, 3, 97, 66, 160,
-  129, 35, 98, 4, 67, 130, 161, 192, 36, 99, 224, 5, 162, 193,
-  68, 131, 37, 100,
-  225, 194, 256, 163, 69, 132, 6, 226, 257, 288, 195, 101, 164, 38,
-  258, 7, 227, 289, 133, 320, 70, 196, 165, 290, 259, 228, 39, 321,
-  102, 352, 8, 197,
-  71, 134, 322, 291, 260, 353, 384, 229, 166, 103, 40, 354, 323, 292,
-  135, 385, 198, 261, 72, 9, 416, 167, 386, 355, 230, 324, 104, 293,
-  41, 417, 199, 136,
-  262, 387, 448, 325, 356, 10, 73, 418, 231, 168, 449, 294, 388, 105,
-  419, 263, 42, 200, 357, 450, 137, 480, 74, 326, 232, 11, 389, 169,
-  295, 420, 106, 451,
-  481, 358, 264, 327, 201, 43, 138, 512, 482, 390, 296, 233, 170, 421,
-  75, 452, 359, 12, 513, 265, 483, 328, 107, 202, 514, 544, 422, 391,
-  453, 139, 44, 234,
-  484, 297, 360, 171, 76, 515, 545, 266, 329, 454, 13, 423, 203, 108,
-  546, 485, 576, 298, 235, 140, 361, 330, 172, 547, 45, 455, 267, 577,
-  486, 77, 204, 362,
-  608, 14, 299, 578, 109, 236, 487, 609, 331, 141, 579, 46, 15, 173,
-  610, 363, 78, 205, 16, 110, 237, 611, 142, 47, 174, 79, 206, 17,
-  111, 238, 48, 143,
-  80, 175, 112, 207, 49, 18, 239, 81, 113, 19, 50, 82, 114, 51,
-  83, 115, 640, 516, 392, 268, 144, 20, 672, 641, 548, 517, 424,
-  393, 300, 269, 176, 145,
-  52, 21, 704, 673, 642, 580, 549, 518, 456, 425, 394, 332, 301,
-  270, 208, 177, 146, 84, 53, 22, 736, 705, 674, 643, 612, 581,
-  550, 519, 488, 457, 426, 395,
-  364, 333, 302, 271, 240, 209, 178, 147, 116, 85, 54, 23, 737,
-  706, 675, 613, 582, 551, 489, 458, 427, 365, 334, 303, 241,
-  210, 179, 117, 86, 55, 738, 707,
-  614, 583, 490, 459, 366, 335, 242, 211, 118, 87, 739, 615, 491,
-  367, 243, 119, 768, 644, 520, 396, 272, 148, 24, 800, 769, 676,
-  645, 552, 521, 428, 397, 304,
-  273, 180, 149, 56, 25, 832, 801, 770, 708, 677, 646, 584, 553,
-  522, 460, 429, 398, 336, 305, 274, 212, 181, 150, 88, 57, 26,
-  864, 833, 802, 771, 740, 709,
-  678, 647, 616, 585, 554, 523, 492, 461, 430, 399, 368, 337, 306,
-  275, 244, 213, 182, 151, 120, 89, 58, 27, 865, 834, 803, 741,
-  710, 679, 617, 586, 555, 493,
-  462, 431, 369, 338, 307, 245, 214, 183, 121, 90, 59, 866, 835,
-  742, 711, 618, 587, 494, 463, 370, 339, 246, 215, 122, 91, 867,
-  743, 619, 495, 371, 247, 123,
-  896, 772, 648, 524, 400, 276, 152, 28, 928, 897, 804, 773, 680,
-  649, 556, 525, 432, 401, 308, 277, 184, 153, 60, 29, 960, 929,
-  898, 836, 805, 774, 712, 681,
-  650, 588, 557, 526, 464, 433, 402, 340, 309, 278, 216, 185, 154,
-  92, 61, 30, 992, 961, 930, 899, 868, 837, 806, 775, 744, 713, 682,
-  651, 620, 589, 558, 527,
-  496, 465, 434, 403, 372, 341, 310, 279, 248, 217, 186, 155, 124,
-  93, 62, 31, 993, 962, 931, 869, 838, 807, 745, 714, 683, 621, 590,
-  559, 497, 466, 435, 373,
-  342, 311, 249, 218, 187, 125, 94, 63, 994, 963, 870, 839, 746, 715,
-  622, 591, 498, 467, 374, 343, 250, 219, 126, 95, 995, 871, 747, 623,
-  499, 375, 251, 127,
-  900, 776, 652, 528, 404, 280, 156, 932, 901, 808, 777, 684, 653, 560,
-  529, 436, 405, 312, 281, 188, 157, 964, 933, 902, 840, 809, 778, 716,
-  685, 654, 592, 561,
-  530, 468, 437, 406, 344, 313, 282, 220, 189, 158, 996, 965, 934, 903,
-  872, 841, 810, 779, 748, 717, 686, 655, 624, 593, 562, 531, 500, 469,
-  438, 407, 376, 345,
-  314, 283, 252, 221, 190, 159, 997, 966, 935, 873, 842, 811, 749, 718,
-  687, 625, 594, 563, 501, 470, 439, 377, 346, 315, 253, 222, 191, 998,
-  967, 874, 843, 750,
-  719, 626, 595, 502, 471, 378, 347, 254, 223, 999, 875, 751, 627, 503,
-  379, 255, 904, 780, 656, 532, 408, 284, 936, 905, 812, 781, 688, 657,
-  564, 533, 440, 409,
-  316, 285, 968, 937, 906, 844, 813, 782, 720, 689, 658, 596, 565, 534,
-  472, 441, 410, 348, 317, 286, 1000, 969, 938, 907, 876, 845, 814, 783,
-  752, 721, 690, 659,
-  628, 597, 566, 535, 504, 473, 442, 411, 380, 349, 318, 287, 1001, 970,
-  939, 877, 846, 815, 753, 722, 691, 629, 598, 567, 505, 474, 443, 381,
-  350, 319, 1002, 971,
-  878, 847, 754, 723, 630, 599, 506, 475, 382, 351, 1003, 879, 755, 631,
-  507, 383, 908, 784, 660, 536, 412, 940, 909, 816, 785, 692, 661, 568,
-  537, 444, 413, 972,
-  941, 910, 848, 817, 786, 724, 693, 662, 600, 569, 538, 476, 445, 414,
-  1004, 973, 942, 911, 880, 849, 818, 787, 756, 725, 694, 663, 632, 601,
-  570, 539, 508, 477,
-  446, 415, 1005, 974, 943, 881, 850, 819, 757, 726, 695, 633, 602, 571,
-  509, 478, 447, 1006, 975, 882, 851, 758, 727, 634, 603, 510, 479,
-  1007, 883, 759, 635, 511,
-  912, 788, 664, 540, 944, 913, 820, 789, 696, 665, 572, 541, 976, 945,
-  914, 852, 821, 790, 728, 697, 666, 604, 573, 542, 1008, 977, 946, 915,
-  884, 853, 822, 791,
-  760, 729, 698, 667, 636, 605, 574, 543, 1009, 978, 947, 885, 854, 823,
-  761, 730, 699, 637, 606, 575, 1010, 979, 886, 855, 762, 731, 638, 607,
-  1011, 887, 763, 639,
-  916, 792, 668, 948, 917, 824, 793, 700, 669, 980, 949, 918, 856, 825,
-  794, 732, 701, 670, 1012, 981, 950, 919, 888, 857, 826, 795, 764, 733,
-  702, 671, 1013, 982,
-  951, 889, 858, 827, 765, 734, 703, 1014, 983, 890, 859, 766, 735, 1015,
-  891, 767, 920, 796, 952, 921, 828, 797, 984, 953, 922, 860, 829, 798,
-  1016, 985, 954, 923,
-  892, 861, 830, 799, 1017, 986, 955, 893, 862, 831, 1018, 987, 894, 863,
-  1019, 895, 924, 956, 925, 988, 957, 926, 1020, 989, 958, 927, 1021,
-  990, 959, 1022, 991, 1023,
+  0,    32,   1,    64,  33,   2,    96,   65,   34,   128,  3,    97,   66,
+  160,  129,  35,   98,  4,    67,   130,  161,  192,  36,   99,   224,  5,
+  162,  193,  68,   131, 37,   100,  225,  194,  256,  163,  69,   132,  6,
+  226,  257,  288,  195, 101,  164,  38,   258,  7,    227,  289,  133,  320,
+  70,   196,  165,  290, 259,  228,  39,   321,  102,  352,  8,    197,  71,
+  134,  322,  291,  260, 353,  384,  229,  166,  103,  40,   354,  323,  292,
+  135,  385,  198,  261, 72,   9,    416,  167,  386,  355,  230,  324,  104,
+  293,  41,   417,  199, 136,  262,  387,  448,  325,  356,  10,   73,   418,
+  231,  168,  449,  294, 388,  105,  419,  263,  42,   200,  357,  450,  137,
+  480,  74,   326,  232, 11,   389,  169,  295,  420,  106,  451,  481,  358,
+  264,  327,  201,  43,  138,  512,  482,  390,  296,  233,  170,  421,  75,
+  452,  359,  12,   513, 265,  483,  328,  107,  202,  514,  544,  422,  391,
+  453,  139,  44,   234, 484,  297,  360,  171,  76,   515,  545,  266,  329,
+  454,  13,   423,  203, 108,  546,  485,  576,  298,  235,  140,  361,  330,
+  172,  547,  45,   455, 267,  577,  486,  77,   204,  362,  608,  14,   299,
+  578,  109,  236,  487, 609,  331,  141,  579,  46,   15,   173,  610,  363,
+  78,   205,  16,   110, 237,  611,  142,  47,   174,  79,   206,  17,   111,
+  238,  48,   143,  80,  175,  112,  207,  49,   18,   239,  81,   113,  19,
+  50,   82,   114,  51,  83,   115,  640,  516,  392,  268,  144,  20,   672,
+  641,  548,  517,  424, 393,  300,  269,  176,  145,  52,   21,   704,  673,
+  642,  580,  549,  518, 456,  425,  394,  332,  301,  270,  208,  177,  146,
+  84,   53,   22,   736, 705,  674,  643,  612,  581,  550,  519,  488,  457,
+  426,  395,  364,  333, 302,  271,  240,  209,  178,  147,  116,  85,   54,
+  23,   737,  706,  675, 613,  582,  551,  489,  458,  427,  365,  334,  303,
+  241,  210,  179,  117, 86,   55,   738,  707,  614,  583,  490,  459,  366,
+  335,  242,  211,  118, 87,   739,  615,  491,  367,  243,  119,  768,  644,
+  520,  396,  272,  148, 24,   800,  769,  676,  645,  552,  521,  428,  397,
+  304,  273,  180,  149, 56,   25,   832,  801,  770,  708,  677,  646,  584,
+  553,  522,  460,  429, 398,  336,  305,  274,  212,  181,  150,  88,   57,
+  26,   864,  833,  802, 771,  740,  709,  678,  647,  616,  585,  554,  523,
+  492,  461,  430,  399, 368,  337,  306,  275,  244,  213,  182,  151,  120,
+  89,   58,   27,   865, 834,  803,  741,  710,  679,  617,  586,  555,  493,
+  462,  431,  369,  338, 307,  245,  214,  183,  121,  90,   59,   866,  835,
+  742,  711,  618,  587, 494,  463,  370,  339,  246,  215,  122,  91,   867,
+  743,  619,  495,  371, 247,  123,  896,  772,  648,  524,  400,  276,  152,
+  28,   928,  897,  804, 773,  680,  649,  556,  525,  432,  401,  308,  277,
+  184,  153,  60,   29,  960,  929,  898,  836,  805,  774,  712,  681,  650,
+  588,  557,  526,  464, 433,  402,  340,  309,  278,  216,  185,  154,  92,
+  61,   30,   992,  961, 930,  899,  868,  837,  806,  775,  744,  713,  682,
+  651,  620,  589,  558, 527,  496,  465,  434,  403,  372,  341,  310,  279,
+  248,  217,  186,  155, 124,  93,   62,   31,   993,  962,  931,  869,  838,
+  807,  745,  714,  683, 621,  590,  559,  497,  466,  435,  373,  342,  311,
+  249,  218,  187,  125, 94,   63,   994,  963,  870,  839,  746,  715,  622,
+  591,  498,  467,  374, 343,  250,  219,  126,  95,   995,  871,  747,  623,
+  499,  375,  251,  127, 900,  776,  652,  528,  404,  280,  156,  932,  901,
+  808,  777,  684,  653, 560,  529,  436,  405,  312,  281,  188,  157,  964,
+  933,  902,  840,  809, 778,  716,  685,  654,  592,  561,  530,  468,  437,
+  406,  344,  313,  282, 220,  189,  158,  996,  965,  934,  903,  872,  841,
+  810,  779,  748,  717, 686,  655,  624,  593,  562,  531,  500,  469,  438,
+  407,  376,  345,  314, 283,  252,  221,  190,  159,  997,  966,  935,  873,
+  842,  811,  749,  718, 687,  625,  594,  563,  501,  470,  439,  377,  346,
+  315,  253,  222,  191, 998,  967,  874,  843,  750,  719,  626,  595,  502,
+  471,  378,  347,  254, 223,  999,  875,  751,  627,  503,  379,  255,  904,
+  780,  656,  532,  408, 284,  936,  905,  812,  781,  688,  657,  564,  533,
+  440,  409,  316,  285, 968,  937,  906,  844,  813,  782,  720,  689,  658,
+  596,  565,  534,  472, 441,  410,  348,  317,  286,  1000, 969,  938,  907,
+  876,  845,  814,  783, 752,  721,  690,  659,  628,  597,  566,  535,  504,
+  473,  442,  411,  380, 349,  318,  287,  1001, 970,  939,  877,  846,  815,
+  753,  722,  691,  629, 598,  567,  505,  474,  443,  381,  350,  319,  1002,
+  971,  878,  847,  754, 723,  630,  599,  506,  475,  382,  351,  1003, 879,
+  755,  631,  507,  383, 908,  784,  660,  536,  412,  940,  909,  816,  785,
+  692,  661,  568,  537, 444,  413,  972,  941,  910,  848,  817,  786,  724,
+  693,  662,  600,  569, 538,  476,  445,  414,  1004, 973,  942,  911,  880,
+  849,  818,  787,  756, 725,  694,  663,  632,  601,  570,  539,  508,  477,
+  446,  415,  1005, 974, 943,  881,  850,  819,  757,  726,  695,  633,  602,
+  571,  509,  478,  447, 1006, 975,  882,  851,  758,  727,  634,  603,  510,
+  479,  1007, 883,  759, 635,  511,  912,  788,  664,  540,  944,  913,  820,
+  789,  696,  665,  572, 541,  976,  945,  914,  852,  821,  790,  728,  697,
+  666,  604,  573,  542, 1008, 977,  946,  915,  884,  853,  822,  791,  760,
+  729,  698,  667,  636, 605,  574,  543,  1009, 978,  947,  885,  854,  823,
+  761,  730,  699,  637, 606,  575,  1010, 979,  886,  855,  762,  731,  638,
+  607,  1011, 887,  763, 639,  916,  792,  668,  948,  917,  824,  793,  700,
+  669,  980,  949,  918, 856,  825,  794,  732,  701,  670,  1012, 981,  950,
+  919,  888,  857,  826, 795,  764,  733,  702,  671,  1013, 982,  951,  889,
+  858,  827,  765,  734, 703,  1014, 983,  890,  859,  766,  735,  1015, 891,
+  767,  920,  796,  952, 921,  828,  797,  984,  953,  922,  860,  829,  798,
+  1016, 985,  954,  923, 892,  861,  830,  799,  1017, 986,  955,  893,  862,
+  831,  1018, 987,  894, 863,  1019, 895,  924,  956,  925,  988,  957,  926,
+  1020, 989,  958,  927, 1021, 990,  959,  1022, 991,  1023,
 };
 
 // Neighborhood 5-tuples for various scans and blocksizes,
@@ -235,301 +196,321 @@ DECLARE_ALIGNED(16, static const int16_t, default_scan_32x32[1024]) = {
 // -1 indicates the neighbor does not exist.
 DECLARE_ALIGNED(16, static const int16_t,
                 default_scan_4x4_neighbors[17 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 0, 0, 1, 4, 4, 4, 1, 1, 8, 8, 5, 8, 2, 2, 2, 5, 9, 12, 6, 9,
-  3, 6, 10, 13, 7, 10, 11, 14, 0, 0,
+  0, 0, 0, 0, 0,  0, 1, 4, 4, 4,  1,  1, 8,  8,  5,  8, 2,
+  2, 2, 5, 9, 12, 6, 9, 3, 6, 10, 13, 7, 10, 11, 14, 0, 0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t,
                 col_scan_4x4_neighbors[17 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 4, 4, 0, 0, 8, 8, 1, 1, 5, 5, 1, 1, 9, 9, 2, 2, 6, 6, 2, 2, 3,
-  3, 10, 10, 7, 7, 11, 11, 0, 0,
+  0, 0, 0, 0, 4, 4, 0, 0, 8, 8,  1,  1, 5, 5,  1,  1, 9,
+  9, 2, 2, 6, 6, 2, 2, 3, 3, 10, 10, 7, 7, 11, 11, 0, 0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t,
                 row_scan_4x4_neighbors[17 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 0, 0, 1, 1, 4, 4, 2, 2, 5, 5, 4, 4, 8, 8, 6, 6, 8, 8, 9, 9, 12,
-  12, 10, 10, 13, 13, 14, 14, 0, 0,
+  0, 0, 0, 0, 0, 0, 1, 1,  4,  4,  2,  2,  5,  5,  4,  4, 8,
+  8, 6, 6, 8, 8, 9, 9, 12, 12, 10, 10, 13, 13, 14, 14, 0, 0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t,
                 col_scan_8x8_neighbors[65 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 8, 8, 0, 0, 16, 16, 1, 1, 24, 24, 9, 9, 1, 1, 32, 32, 17, 17, 2,
-  2, 25, 25, 10, 10, 40, 40, 2, 2, 18, 18, 33, 33, 3, 3, 48, 48, 11, 11, 26,
-  26, 3, 3, 41, 41, 19, 19, 34, 34, 4, 4, 27, 27, 12, 12, 49, 49, 42, 42, 20,
-  20, 4, 4, 35, 35, 5, 5, 28, 28, 50, 50, 43, 43, 13, 13, 36, 36, 5, 5, 21, 21,
-  51, 51, 29, 29, 6, 6, 44, 44, 14, 14, 6, 6, 37, 37, 52, 52, 22, 22, 7, 7, 30,
-  30, 45, 45, 15, 15, 38, 38, 23, 23, 53, 53, 31, 31, 46, 46, 39, 39, 54, 54,
-  47, 47, 55, 55, 0, 0,
+  0,  0,  0,  0,  8,  8,  0,  0,  16, 16, 1,  1,  24, 24, 9,  9,  1,  1,  32,
+  32, 17, 17, 2,  2,  25, 25, 10, 10, 40, 40, 2,  2,  18, 18, 33, 33, 3,  3,
+  48, 48, 11, 11, 26, 26, 3,  3,  41, 41, 19, 19, 34, 34, 4,  4,  27, 27, 12,
+  12, 49, 49, 42, 42, 20, 20, 4,  4,  35, 35, 5,  5,  28, 28, 50, 50, 43, 43,
+  13, 13, 36, 36, 5,  5,  21, 21, 51, 51, 29, 29, 6,  6,  44, 44, 14, 14, 6,
+  6,  37, 37, 52, 52, 22, 22, 7,  7,  30, 30, 45, 45, 15, 15, 38, 38, 23, 23,
+  53, 53, 31, 31, 46, 46, 39, 39, 54, 54, 47, 47, 55, 55, 0,  0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t,
                 row_scan_8x8_neighbors[65 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 1, 1, 0, 0, 8, 8, 2, 2, 8, 8, 9, 9, 3, 3, 16, 16, 10, 10, 16, 16,
-  4, 4, 17, 17, 24, 24, 11, 11, 18, 18, 25, 25, 24, 24, 5, 5, 12, 12, 19, 19,
-  32, 32, 26, 26, 6, 6, 33, 33, 32, 32, 20, 20, 27, 27, 40, 40, 13, 13, 34, 34,
-  40, 40, 41, 41, 28, 28, 35, 35, 48, 48, 21, 21, 42, 42, 14, 14, 48, 48, 36,
-  36, 49, 49, 43, 43, 29, 29, 56, 56, 22, 22, 50, 50, 57, 57, 44, 44, 37, 37,
-  51, 51, 30, 30, 58, 58, 52, 52, 45, 45, 59, 59, 38, 38, 60, 60, 46, 46, 53,
-  53, 54, 54, 61, 61, 62, 62, 0, 0,
+  0,  0,  0,  0,  1,  1,  0,  0,  8,  8,  2,  2,  8,  8,  9,  9,  3,  3,  16,
+  16, 10, 10, 16, 16, 4,  4,  17, 17, 24, 24, 11, 11, 18, 18, 25, 25, 24, 24,
+  5,  5,  12, 12, 19, 19, 32, 32, 26, 26, 6,  6,  33, 33, 32, 32, 20, 20, 27,
+  27, 40, 40, 13, 13, 34, 34, 40, 40, 41, 41, 28, 28, 35, 35, 48, 48, 21, 21,
+  42, 42, 14, 14, 48, 48, 36, 36, 49, 49, 43, 43, 29, 29, 56, 56, 22, 22, 50,
+  50, 57, 57, 44, 44, 37, 37, 51, 51, 30, 30, 58, 58, 52, 52, 45, 45, 59, 59,
+  38, 38, 60, 60, 46, 46, 53, 53, 54, 54, 61, 61, 62, 62, 0,  0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t,
                 default_scan_8x8_neighbors[65 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 0, 0, 8, 8, 1, 8, 1, 1, 9, 16, 16, 16, 2, 9, 2, 2, 10, 17, 17,
-  24, 24, 24, 3, 10, 3, 3, 18, 25, 25, 32, 11, 18, 32, 32, 4, 11, 26, 33, 19,
-  26, 4, 4, 33, 40, 12, 19, 40, 40, 5, 12, 27, 34, 34, 41, 20, 27, 13, 20, 5,
-  5, 41, 48, 48, 48, 28, 35, 35, 42, 21, 28, 6, 6, 6, 13, 42, 49, 49, 56, 36,
-  43, 14, 21, 29, 36, 7, 14, 43, 50, 50, 57, 22, 29, 37, 44, 15, 22, 44, 51,
-  51, 58, 30, 37, 23, 30, 52, 59, 45, 52, 38, 45, 31, 38, 53, 60, 46, 53, 39,
-  46, 54, 61, 47, 54, 55, 62, 0, 0,
+  0,  0,  0,  0,  0,  0,  8,  8,  1,  8,  1,  1,  9,  16, 16, 16, 2,  9,  2,
+  2,  10, 17, 17, 24, 24, 24, 3,  10, 3,  3,  18, 25, 25, 32, 11, 18, 32, 32,
+  4,  11, 26, 33, 19, 26, 4,  4,  33, 40, 12, 19, 40, 40, 5,  12, 27, 34, 34,
+  41, 20, 27, 13, 20, 5,  5,  41, 48, 48, 48, 28, 35, 35, 42, 21, 28, 6,  6,
+  6,  13, 42, 49, 49, 56, 36, 43, 14, 21, 29, 36, 7,  14, 43, 50, 50, 57, 22,
+  29, 37, 44, 15, 22, 44, 51, 51, 58, 30, 37, 23, 30, 52, 59, 45, 52, 38, 45,
+  31, 38, 53, 60, 46, 53, 39, 46, 54, 61, 47, 54, 55, 62, 0,  0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t,
                 col_scan_16x16_neighbors[257 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 16, 16, 32, 32, 0, 0, 48, 48, 1, 1, 64, 64,
-  17, 17, 80, 80, 33, 33, 1, 1, 49, 49, 96, 96, 2, 2, 65, 65,
-  18, 18, 112, 112, 34, 34, 81, 81, 2, 2, 50, 50, 128, 128, 3, 3,
-  97, 97, 19, 19, 66, 66, 144, 144, 82, 82, 35, 35, 113, 113, 3, 3,
-  51, 51, 160, 160, 4, 4, 98, 98, 129, 129, 67, 67, 20, 20, 83, 83,
-  114, 114, 36, 36, 176, 176, 4, 4, 145, 145, 52, 52, 99, 99, 5, 5,
-  130, 130, 68, 68, 192, 192, 161, 161, 21, 21, 115, 115, 84, 84, 37, 37,
-  146, 146, 208, 208, 53, 53, 5, 5, 100, 100, 177, 177, 131, 131, 69, 69,
-  6, 6, 224, 224, 116, 116, 22, 22, 162, 162, 85, 85, 147, 147, 38, 38,
-  193, 193, 101, 101, 54, 54, 6, 6, 132, 132, 178, 178, 70, 70, 163, 163,
-  209, 209, 7, 7, 117, 117, 23, 23, 148, 148, 7, 7, 86, 86, 194, 194,
-  225, 225, 39, 39, 179, 179, 102, 102, 133, 133, 55, 55, 164, 164, 8, 8,
-  71, 71, 210, 210, 118, 118, 149, 149, 195, 195, 24, 24, 87, 87, 40, 40,
-  56, 56, 134, 134, 180, 180, 226, 226, 103, 103, 8, 8, 165, 165, 211, 211,
-  72, 72, 150, 150, 9, 9, 119, 119, 25, 25, 88, 88, 196, 196, 41, 41,
-  135, 135, 181, 181, 104, 104, 57, 57, 227, 227, 166, 166, 120, 120, 151, 151,
-  197, 197, 73, 73, 9, 9, 212, 212, 89, 89, 136, 136, 182, 182, 10, 10,
-  26, 26, 105, 105, 167, 167, 228, 228, 152, 152, 42, 42, 121, 121, 213, 213,
-  58, 58, 198, 198, 74, 74, 137, 137, 183, 183, 168, 168, 10, 10, 90, 90,
-  229, 229, 11, 11, 106, 106, 214, 214, 153, 153, 27, 27, 199, 199, 43, 43,
-  184, 184, 122, 122, 169, 169, 230, 230, 59, 59, 11, 11, 75, 75, 138, 138,
-  200, 200, 215, 215, 91, 91, 12, 12, 28, 28, 185, 185, 107, 107, 154, 154,
-  44, 44, 231, 231, 216, 216, 60, 60, 123, 123, 12, 12, 76, 76, 201, 201,
-  170, 170, 232, 232, 139, 139, 92, 92, 13, 13, 108, 108, 29, 29, 186, 186,
-  217, 217, 155, 155, 45, 45, 13, 13, 61, 61, 124, 124, 14, 14, 233, 233,
-  77, 77, 14, 14, 171, 171, 140, 140, 202, 202, 30, 30, 93, 93, 109, 109,
-  46, 46, 156, 156, 62, 62, 187, 187, 15, 15, 125, 125, 218, 218, 78, 78,
-  31, 31, 172, 172, 47, 47, 141, 141, 94, 94, 234, 234, 203, 203, 63, 63,
-  110, 110, 188, 188, 157, 157, 126, 126, 79, 79, 173, 173, 95, 95, 219, 219,
-  142, 142, 204, 204, 235, 235, 111, 111, 158, 158, 127, 127, 189, 189, 220,
-  220, 143, 143, 174, 174, 205, 205, 236, 236, 159, 159, 190, 190, 221, 221,
-  175, 175, 237, 237, 206, 206, 222, 222, 191, 191, 238, 238, 207, 207, 223,
-  223, 239, 239, 0, 0,
+  0,   0,   0,   0,   16,  16,  32,  32,  0,   0,   48,  48,  1,   1,   64,
+  64,  17,  17,  80,  80,  33,  33,  1,   1,   49,  49,  96,  96,  2,   2,
+  65,  65,  18,  18,  112, 112, 34,  34,  81,  81,  2,   2,   50,  50,  128,
+  128, 3,   3,   97,  97,  19,  19,  66,  66,  144, 144, 82,  82,  35,  35,
+  113, 113, 3,   3,   51,  51,  160, 160, 4,   4,   98,  98,  129, 129, 67,
+  67,  20,  20,  83,  83,  114, 114, 36,  36,  176, 176, 4,   4,   145, 145,
+  52,  52,  99,  99,  5,   5,   130, 130, 68,  68,  192, 192, 161, 161, 21,
+  21,  115, 115, 84,  84,  37,  37,  146, 146, 208, 208, 53,  53,  5,   5,
+  100, 100, 177, 177, 131, 131, 69,  69,  6,   6,   224, 224, 116, 116, 22,
+  22,  162, 162, 85,  85,  147, 147, 38,  38,  193, 193, 101, 101, 54,  54,
+  6,   6,   132, 132, 178, 178, 70,  70,  163, 163, 209, 209, 7,   7,   117,
+  117, 23,  23,  148, 148, 7,   7,   86,  86,  194, 194, 225, 225, 39,  39,
+  179, 179, 102, 102, 133, 133, 55,  55,  164, 164, 8,   8,   71,  71,  210,
+  210, 118, 118, 149, 149, 195, 195, 24,  24,  87,  87,  40,  40,  56,  56,
+  134, 134, 180, 180, 226, 226, 103, 103, 8,   8,   165, 165, 211, 211, 72,
+  72,  150, 150, 9,   9,   119, 119, 25,  25,  88,  88,  196, 196, 41,  41,
+  135, 135, 181, 181, 104, 104, 57,  57,  227, 227, 166, 166, 120, 120, 151,
+  151, 197, 197, 73,  73,  9,   9,   212, 212, 89,  89,  136, 136, 182, 182,
+  10,  10,  26,  26,  105, 105, 167, 167, 228, 228, 152, 152, 42,  42,  121,
+  121, 213, 213, 58,  58,  198, 198, 74,  74,  137, 137, 183, 183, 168, 168,
+  10,  10,  90,  90,  229, 229, 11,  11,  106, 106, 214, 214, 153, 153, 27,
+  27,  199, 199, 43,  43,  184, 184, 122, 122, 169, 169, 230, 230, 59,  59,
+  11,  11,  75,  75,  138, 138, 200, 200, 215, 215, 91,  91,  12,  12,  28,
+  28,  185, 185, 107, 107, 154, 154, 44,  44,  231, 231, 216, 216, 60,  60,
+  123, 123, 12,  12,  76,  76,  201, 201, 170, 170, 232, 232, 139, 139, 92,
+  92,  13,  13,  108, 108, 29,  29,  186, 186, 217, 217, 155, 155, 45,  45,
+  13,  13,  61,  61,  124, 124, 14,  14,  233, 233, 77,  77,  14,  14,  171,
+  171, 140, 140, 202, 202, 30,  30,  93,  93,  109, 109, 46,  46,  156, 156,
+  62,  62,  187, 187, 15,  15,  125, 125, 218, 218, 78,  78,  31,  31,  172,
+  172, 47,  47,  141, 141, 94,  94,  234, 234, 203, 203, 63,  63,  110, 110,
+  188, 188, 157, 157, 126, 126, 79,  79,  173, 173, 95,  95,  219, 219, 142,
+  142, 204, 204, 235, 235, 111, 111, 158, 158, 127, 127, 189, 189, 220, 220,
+  143, 143, 174, 174, 205, 205, 236, 236, 159, 159, 190, 190, 221, 221, 175,
+  175, 237, 237, 206, 206, 222, 222, 191, 191, 238, 238, 207, 207, 223, 223,
+  239, 239, 0,   0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t,
                 row_scan_16x16_neighbors[257 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 16, 16, 3, 3, 17, 17,
-  16, 16, 4, 4, 32, 32, 18, 18, 5, 5, 33, 33, 32, 32, 19, 19,
-  48, 48, 6, 6, 34, 34, 20, 20, 49, 49, 48, 48, 7, 7, 35, 35,
-  64, 64, 21, 21, 50, 50, 36, 36, 64, 64, 8, 8, 65, 65, 51, 51,
-  22, 22, 37, 37, 80, 80, 66, 66, 9, 9, 52, 52, 23, 23, 81, 81,
-  67, 67, 80, 80, 38, 38, 10, 10, 53, 53, 82, 82, 96, 96, 68, 68,
-  24, 24, 97, 97, 83, 83, 39, 39, 96, 96, 54, 54, 11, 11, 69, 69,
-  98, 98, 112, 112, 84, 84, 25, 25, 40, 40, 55, 55, 113, 113, 99, 99,
-  12, 12, 70, 70, 112, 112, 85, 85, 26, 26, 114, 114, 100, 100, 128, 128,
-  41, 41, 56, 56, 71, 71, 115, 115, 13, 13, 86, 86, 129, 129, 101, 101,
-  128, 128, 72, 72, 130, 130, 116, 116, 27, 27, 57, 57, 14, 14, 87, 87,
-  42, 42, 144, 144, 102, 102, 131, 131, 145, 145, 117, 117, 73, 73, 144, 144,
-  88, 88, 132, 132, 103, 103, 28, 28, 58, 58, 146, 146, 118, 118, 43, 43,
-  160, 160, 147, 147, 89, 89, 104, 104, 133, 133, 161, 161, 119, 119, 160, 160,
-  74, 74, 134, 134, 148, 148, 29, 29, 59, 59, 162, 162, 176, 176, 44, 44,
-  120, 120, 90, 90, 105, 105, 163, 163, 177, 177, 149, 149, 176, 176, 135, 135,
-  164, 164, 178, 178, 30, 30, 150, 150, 192, 192, 75, 75, 121, 121, 60, 60,
-  136, 136, 193, 193, 106, 106, 151, 151, 179, 179, 192, 192, 45, 45, 165, 165,
-  166, 166, 194, 194, 91, 91, 180, 180, 137, 137, 208, 208, 122, 122, 152, 152,
-  208, 208, 195, 195, 76, 76, 167, 167, 209, 209, 181, 181, 224, 224, 107, 107,
-  196, 196, 61, 61, 153, 153, 224, 224, 182, 182, 168, 168, 210, 210, 46, 46,
-  138, 138, 92, 92, 183, 183, 225, 225, 211, 211, 240, 240, 197, 197, 169, 169,
-  123, 123, 154, 154, 198, 198, 77, 77, 212, 212, 184, 184, 108, 108, 226, 226,
-  199, 199, 62, 62, 227, 227, 241, 241, 139, 139, 213, 213, 170, 170, 185, 185,
-  155, 155, 228, 228, 242, 242, 124, 124, 93, 93, 200, 200, 243, 243, 214, 214,
-  215, 215, 229, 229, 140, 140, 186, 186, 201, 201, 78, 78, 171, 171, 109, 109,
-  156, 156, 244, 244, 216, 216, 230, 230, 94, 94, 245, 245, 231, 231, 125, 125,
-  202, 202, 246, 246, 232, 232, 172, 172, 217, 217, 141, 141, 110, 110, 157,
-  157, 187, 187, 247, 247, 126, 126, 233, 233, 218, 218, 248, 248, 188, 188,
-  203, 203, 142, 142, 173, 173, 158, 158, 249, 249, 234, 234, 204, 204, 219,
-  219, 174, 174, 189, 189, 250, 250, 220, 220, 190, 190, 205, 205, 235, 235,
-  206, 206, 236, 236, 251, 251, 221, 221, 252, 252, 222, 222, 237, 237, 238,
-  238, 253, 253, 254, 254, 0, 0,
+  0,   0,   0,   0,   1,   1,   0,   0,   2,   2,   16,  16,  3,   3,   17,
+  17,  16,  16,  4,   4,   32,  32,  18,  18,  5,   5,   33,  33,  32,  32,
+  19,  19,  48,  48,  6,   6,   34,  34,  20,  20,  49,  49,  48,  48,  7,
+  7,   35,  35,  64,  64,  21,  21,  50,  50,  36,  36,  64,  64,  8,   8,
+  65,  65,  51,  51,  22,  22,  37,  37,  80,  80,  66,  66,  9,   9,   52,
+  52,  23,  23,  81,  81,  67,  67,  80,  80,  38,  38,  10,  10,  53,  53,
+  82,  82,  96,  96,  68,  68,  24,  24,  97,  97,  83,  83,  39,  39,  96,
+  96,  54,  54,  11,  11,  69,  69,  98,  98,  112, 112, 84,  84,  25,  25,
+  40,  40,  55,  55,  113, 113, 99,  99,  12,  12,  70,  70,  112, 112, 85,
+  85,  26,  26,  114, 114, 100, 100, 128, 128, 41,  41,  56,  56,  71,  71,
+  115, 115, 13,  13,  86,  86,  129, 129, 101, 101, 128, 128, 72,  72,  130,
+  130, 116, 116, 27,  27,  57,  57,  14,  14,  87,  87,  42,  42,  144, 144,
+  102, 102, 131, 131, 145, 145, 117, 117, 73,  73,  144, 144, 88,  88,  132,
+  132, 103, 103, 28,  28,  58,  58,  146, 146, 118, 118, 43,  43,  160, 160,
+  147, 147, 89,  89,  104, 104, 133, 133, 161, 161, 119, 119, 160, 160, 74,
+  74,  134, 134, 148, 148, 29,  29,  59,  59,  162, 162, 176, 176, 44,  44,
+  120, 120, 90,  90,  105, 105, 163, 163, 177, 177, 149, 149, 176, 176, 135,
+  135, 164, 164, 178, 178, 30,  30,  150, 150, 192, 192, 75,  75,  121, 121,
+  60,  60,  136, 136, 193, 193, 106, 106, 151, 151, 179, 179, 192, 192, 45,
+  45,  165, 165, 166, 166, 194, 194, 91,  91,  180, 180, 137, 137, 208, 208,
+  122, 122, 152, 152, 208, 208, 195, 195, 76,  76,  167, 167, 209, 209, 181,
+  181, 224, 224, 107, 107, 196, 196, 61,  61,  153, 153, 224, 224, 182, 182,
+  168, 168, 210, 210, 46,  46,  138, 138, 92,  92,  183, 183, 225, 225, 211,
+  211, 240, 240, 197, 197, 169, 169, 123, 123, 154, 154, 198, 198, 77,  77,
+  212, 212, 184, 184, 108, 108, 226, 226, 199, 199, 62,  62,  227, 227, 241,
+  241, 139, 139, 213, 213, 170, 170, 185, 185, 155, 155, 228, 228, 242, 242,
+  124, 124, 93,  93,  200, 200, 243, 243, 214, 214, 215, 215, 229, 229, 140,
+  140, 186, 186, 201, 201, 78,  78,  171, 171, 109, 109, 156, 156, 244, 244,
+  216, 216, 230, 230, 94,  94,  245, 245, 231, 231, 125, 125, 202, 202, 246,
+  246, 232, 232, 172, 172, 217, 217, 141, 141, 110, 110, 157, 157, 187, 187,
+  247, 247, 126, 126, 233, 233, 218, 218, 248, 248, 188, 188, 203, 203, 142,
+  142, 173, 173, 158, 158, 249, 249, 234, 234, 204, 204, 219, 219, 174, 174,
+  189, 189, 250, 250, 220, 220, 190, 190, 205, 205, 235, 235, 206, 206, 236,
+  236, 251, 251, 221, 221, 252, 252, 222, 222, 237, 237, 238, 238, 253, 253,
+  254, 254, 0,   0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t,
                 default_scan_16x16_neighbors[257 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 0, 0, 16, 16, 1, 16, 1, 1, 32, 32, 17, 32,
-  2, 17, 2, 2, 48, 48, 18, 33, 33, 48, 3, 18, 49, 64, 64, 64,
-  34, 49, 3, 3, 19, 34, 50, 65, 4, 19, 65, 80, 80, 80, 35, 50,
-  4, 4, 20, 35, 66, 81, 81, 96, 51, 66, 96, 96, 5, 20, 36, 51,
-  82, 97, 21, 36, 67, 82, 97, 112, 5, 5, 52, 67, 112, 112, 37, 52,
-  6, 21, 83, 98, 98, 113, 68, 83, 6, 6, 113, 128, 22, 37, 53, 68,
-  84, 99, 99, 114, 128, 128, 114, 129, 69, 84, 38, 53, 7, 22, 7, 7,
-  129, 144, 23, 38, 54, 69, 100, 115, 85, 100, 115, 130, 144, 144, 130, 145,
-  39, 54, 70, 85, 8, 23, 55, 70, 116, 131, 101, 116, 145, 160, 24, 39,
-  8, 8, 86, 101, 131, 146, 160, 160, 146, 161, 71, 86, 40, 55, 9, 24,
-  117, 132, 102, 117, 161, 176, 132, 147, 56, 71, 87, 102, 25, 40, 147, 162,
-  9, 9, 176, 176, 162, 177, 72, 87, 41, 56, 118, 133, 133, 148, 103, 118,
-  10, 25, 148, 163, 57, 72, 88, 103, 177, 192, 26, 41, 163, 178, 192, 192,
-  10, 10, 119, 134, 73, 88, 149, 164, 104, 119, 134, 149, 42, 57, 178, 193,
-  164, 179, 11, 26, 58, 73, 193, 208, 89, 104, 135, 150, 120, 135, 27, 42,
-  74, 89, 208, 208, 150, 165, 179, 194, 165, 180, 105, 120, 194, 209, 43, 58,
-  11, 11, 136, 151, 90, 105, 151, 166, 180, 195, 59, 74, 121, 136, 209, 224,
-  195, 210, 224, 224, 166, 181, 106, 121, 75, 90, 12, 27, 181, 196, 12, 12,
-  210, 225, 152, 167, 167, 182, 137, 152, 28, 43, 196, 211, 122, 137, 91, 106,
-  225, 240, 44, 59, 13, 28, 107, 122, 182, 197, 168, 183, 211, 226, 153, 168,
-  226, 241, 60, 75, 197, 212, 138, 153, 29, 44, 76, 91, 13, 13, 183, 198,
-  123, 138, 45, 60, 212, 227, 198, 213, 154, 169, 169, 184, 227, 242, 92, 107,
-  61, 76, 139, 154, 14, 29, 14, 14, 184, 199, 213, 228, 108, 123, 199, 214,
-  228, 243, 77, 92, 30, 45, 170, 185, 155, 170, 185, 200, 93, 108, 124, 139,
-  214, 229, 46, 61, 200, 215, 229, 244, 15, 30, 109, 124, 62, 77, 140, 155,
-  215, 230, 31, 46, 171, 186, 186, 201, 201, 216, 78, 93, 230, 245, 125, 140,
-  47, 62, 216, 231, 156, 171, 94, 109, 231, 246, 141, 156, 63, 78, 202, 217,
-  187, 202, 110, 125, 217, 232, 172, 187, 232, 247, 79, 94, 157, 172, 126, 141,
-  203, 218, 95, 110, 233, 248, 218, 233, 142, 157, 111, 126, 173, 188, 188, 203,
-  234, 249, 219, 234, 127, 142, 158, 173, 204, 219, 189, 204, 143, 158, 235,
-  250, 174, 189, 205, 220, 159, 174, 220, 235, 221, 236, 175, 190, 190, 205,
-  236, 251, 206, 221, 237, 252, 191, 206, 222, 237, 207, 222, 238, 253, 223,
-  238, 239, 254, 0, 0,
+  0,   0,   0,   0,   0,   0,   16,  16,  1,   16,  1,   1,   32,  32,  17,
+  32,  2,   17,  2,   2,   48,  48,  18,  33,  33,  48,  3,   18,  49,  64,
+  64,  64,  34,  49,  3,   3,   19,  34,  50,  65,  4,   19,  65,  80,  80,
+  80,  35,  50,  4,   4,   20,  35,  66,  81,  81,  96,  51,  66,  96,  96,
+  5,   20,  36,  51,  82,  97,  21,  36,  67,  82,  97,  112, 5,   5,   52,
+  67,  112, 112, 37,  52,  6,   21,  83,  98,  98,  113, 68,  83,  6,   6,
+  113, 128, 22,  37,  53,  68,  84,  99,  99,  114, 128, 128, 114, 129, 69,
+  84,  38,  53,  7,   22,  7,   7,   129, 144, 23,  38,  54,  69,  100, 115,
+  85,  100, 115, 130, 144, 144, 130, 145, 39,  54,  70,  85,  8,   23,  55,
+  70,  116, 131, 101, 116, 145, 160, 24,  39,  8,   8,   86,  101, 131, 146,
+  160, 160, 146, 161, 71,  86,  40,  55,  9,   24,  117, 132, 102, 117, 161,
+  176, 132, 147, 56,  71,  87,  102, 25,  40,  147, 162, 9,   9,   176, 176,
+  162, 177, 72,  87,  41,  56,  118, 133, 133, 148, 103, 118, 10,  25,  148,
+  163, 57,  72,  88,  103, 177, 192, 26,  41,  163, 178, 192, 192, 10,  10,
+  119, 134, 73,  88,  149, 164, 104, 119, 134, 149, 42,  57,  178, 193, 164,
+  179, 11,  26,  58,  73,  193, 208, 89,  104, 135, 150, 120, 135, 27,  42,
+  74,  89,  208, 208, 150, 165, 179, 194, 165, 180, 105, 120, 194, 209, 43,
+  58,  11,  11,  136, 151, 90,  105, 151, 166, 180, 195, 59,  74,  121, 136,
+  209, 224, 195, 210, 224, 224, 166, 181, 106, 121, 75,  90,  12,  27,  181,
+  196, 12,  12,  210, 225, 152, 167, 167, 182, 137, 152, 28,  43,  196, 211,
+  122, 137, 91,  106, 225, 240, 44,  59,  13,  28,  107, 122, 182, 197, 168,
+  183, 211, 226, 153, 168, 226, 241, 60,  75,  197, 212, 138, 153, 29,  44,
+  76,  91,  13,  13,  183, 198, 123, 138, 45,  60,  212, 227, 198, 213, 154,
+  169, 169, 184, 227, 242, 92,  107, 61,  76,  139, 154, 14,  29,  14,  14,
+  184, 199, 213, 228, 108, 123, 199, 214, 228, 243, 77,  92,  30,  45,  170,
+  185, 155, 170, 185, 200, 93,  108, 124, 139, 214, 229, 46,  61,  200, 215,
+  229, 244, 15,  30,  109, 124, 62,  77,  140, 155, 215, 230, 31,  46,  171,
+  186, 186, 201, 201, 216, 78,  93,  230, 245, 125, 140, 47,  62,  216, 231,
+  156, 171, 94,  109, 231, 246, 141, 156, 63,  78,  202, 217, 187, 202, 110,
+  125, 217, 232, 172, 187, 232, 247, 79,  94,  157, 172, 126, 141, 203, 218,
+  95,  110, 233, 248, 218, 233, 142, 157, 111, 126, 173, 188, 188, 203, 234,
+  249, 219, 234, 127, 142, 158, 173, 204, 219, 189, 204, 143, 158, 235, 250,
+  174, 189, 205, 220, 159, 174, 220, 235, 221, 236, 175, 190, 190, 205, 236,
+  251, 206, 221, 237, 252, 191, 206, 222, 237, 207, 222, 238, 253, 223, 238,
+  239, 254, 0,   0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t,
                 default_scan_32x32_neighbors[1025 * MAX_NEIGHBORS]) = {
-  0, 0, 0, 0, 0, 0, 32, 32, 1, 32, 1, 1, 64, 64, 33, 64,
-  2, 33, 96, 96, 2, 2, 65, 96, 34, 65, 128, 128, 97, 128, 3, 34,
-  66, 97, 3, 3, 35, 66, 98, 129, 129, 160, 160, 160, 4, 35, 67, 98,
-  192, 192, 4, 4, 130, 161, 161, 192, 36, 67, 99, 130, 5, 36, 68, 99,
-  193, 224, 162, 193, 224, 224, 131, 162, 37, 68, 100, 131, 5, 5, 194, 225,
-  225, 256, 256, 256, 163, 194, 69, 100, 132, 163, 6, 37, 226, 257, 6, 6,
-  195, 226, 257, 288, 101, 132, 288, 288, 38, 69, 164, 195, 133, 164, 258, 289,
-  227, 258, 196, 227, 7, 38, 289, 320, 70, 101, 320, 320, 7, 7, 165, 196,
-  39, 70, 102, 133, 290, 321, 259, 290, 228, 259, 321, 352, 352, 352, 197, 228,
-  134, 165, 71, 102, 8, 39, 322, 353, 291, 322, 260, 291, 103, 134, 353, 384,
-  166, 197, 229, 260, 40, 71, 8, 8, 384, 384, 135, 166, 354, 385, 323, 354,
-  198, 229, 292, 323, 72, 103, 261, 292, 9, 40, 385, 416, 167, 198, 104, 135,
-  230, 261, 355, 386, 416, 416, 293, 324, 324, 355, 9, 9, 41, 72, 386, 417,
-  199, 230, 136, 167, 417, 448, 262, 293, 356, 387, 73, 104, 387, 418, 231, 262,
-  10, 41, 168, 199, 325, 356, 418, 449, 105, 136, 448, 448, 42, 73, 294, 325,
-  200, 231, 10, 10, 357, 388, 137, 168, 263, 294, 388, 419, 74, 105, 419, 450,
-  449, 480, 326, 357, 232, 263, 295, 326, 169, 200, 11, 42, 106, 137, 480, 480,
-  450, 481, 358, 389, 264, 295, 201, 232, 138, 169, 389, 420, 43, 74, 420, 451,
-  327, 358, 11, 11, 481, 512, 233, 264, 451, 482, 296, 327, 75, 106, 170, 201,
-  482, 513, 512, 512, 390, 421, 359, 390, 421, 452, 107, 138, 12, 43, 202, 233,
-  452, 483, 265, 296, 328, 359, 139, 170, 44, 75, 483, 514, 513, 544, 234, 265,
-  297, 328, 422, 453, 12, 12, 391, 422, 171, 202, 76, 107, 514, 545, 453, 484,
-  544, 544, 266, 297, 203, 234, 108, 139, 329, 360, 298, 329, 140, 171, 515,
-  546, 13, 44, 423, 454, 235, 266, 545, 576, 454, 485, 45, 76, 172, 203, 330,
-  361, 576, 576, 13, 13, 267, 298, 546, 577, 77, 108, 204, 235, 455, 486, 577,
-  608, 299, 330, 109, 140, 547, 578, 14, 45, 14, 14, 141, 172, 578, 609, 331,
-  362, 46, 77, 173, 204, 15, 15, 78, 109, 205, 236, 579, 610, 110, 141, 15, 46,
-  142, 173, 47, 78, 174, 205, 16, 16, 79, 110, 206, 237, 16, 47, 111, 142,
-  48, 79, 143, 174, 80, 111, 175, 206, 17, 48, 17, 17, 207, 238, 49, 80,
-  81, 112, 18, 18, 18, 49, 50, 81, 82, 113, 19, 50, 51, 82, 83, 114, 608, 608,
-  484, 515, 360, 391, 236, 267, 112, 143, 19, 19, 640, 640, 609, 640, 516, 547,
-  485, 516, 392, 423, 361, 392, 268, 299, 237, 268, 144, 175, 113, 144, 20, 51,
-  20, 20, 672, 672, 641, 672, 610, 641, 548, 579, 517, 548, 486, 517, 424, 455,
-  393, 424, 362, 393, 300, 331, 269, 300, 238, 269, 176, 207, 145, 176, 114,
-  145, 52, 83, 21, 52, 21, 21, 704, 704, 673, 704, 642, 673, 611, 642, 580,
-  611, 549, 580, 518, 549, 487, 518, 456, 487, 425, 456, 394, 425, 363, 394,
-  332, 363, 301, 332, 270, 301, 239, 270, 208, 239, 177, 208, 146, 177, 115,
-  146, 84, 115, 53, 84, 22, 53, 22, 22, 705, 736, 674, 705, 643, 674, 581, 612,
-  550, 581, 519, 550, 457, 488, 426, 457, 395, 426, 333, 364, 302, 333, 271,
-  302, 209, 240, 178, 209, 147, 178, 85, 116, 54, 85, 23, 54, 706, 737, 675,
-  706, 582, 613, 551, 582, 458, 489, 427, 458, 334, 365, 303, 334, 210, 241,
-  179, 210, 86, 117, 55, 86, 707, 738, 583, 614, 459, 490, 335, 366, 211, 242,
-  87, 118, 736, 736, 612, 643, 488, 519, 364, 395, 240, 271, 116, 147, 23, 23,
-  768, 768, 737, 768, 644, 675, 613, 644, 520, 551, 489, 520, 396, 427, 365,
-  396, 272, 303, 241, 272, 148, 179, 117, 148, 24, 55, 24, 24, 800, 800, 769,
-  800, 738, 769, 676, 707, 645, 676, 614, 645, 552, 583, 521, 552, 490, 521,
-  428, 459, 397, 428, 366, 397, 304, 335, 273, 304, 242, 273, 180, 211, 149,
-  180, 118, 149, 56, 87, 25, 56, 25, 25, 832, 832, 801, 832, 770, 801, 739,
-  770, 708, 739, 677, 708, 646, 677, 615, 646, 584, 615, 553, 584, 522, 553,
-  491, 522, 460, 491, 429, 460, 398, 429, 367, 398, 336, 367, 305, 336, 274,
-  305, 243, 274, 212, 243, 181, 212, 150, 181, 119, 150, 88, 119, 57, 88, 26,
-  57, 26, 26, 833, 864, 802, 833, 771, 802, 709, 740, 678, 709, 647, 678, 585,
-  616, 554, 585, 523, 554, 461, 492, 430, 461, 399, 430, 337, 368, 306, 337,
-  275, 306, 213, 244, 182, 213, 151, 182, 89, 120, 58, 89, 27, 58, 834, 865,
-  803, 834, 710, 741, 679, 710, 586, 617, 555, 586, 462, 493, 431, 462, 338,
-  369, 307, 338, 214, 245, 183, 214, 90, 121, 59, 90, 835, 866, 711, 742, 587,
-  618, 463, 494, 339, 370, 215, 246, 91, 122, 864, 864, 740, 771, 616, 647,
-  492, 523, 368, 399, 244, 275, 120, 151, 27, 27, 896, 896, 865, 896, 772, 803,
-  741, 772, 648, 679, 617, 648, 524, 555, 493, 524, 400, 431, 369, 400, 276,
-  307, 245, 276, 152, 183, 121, 152, 28, 59, 28, 28, 928, 928, 897, 928, 866,
-  897, 804, 835, 773, 804, 742, 773, 680, 711, 649, 680, 618, 649, 556, 587,
-  525, 556, 494, 525, 432, 463, 401, 432, 370, 401, 308, 339, 277, 308, 246,
-  277, 184, 215, 153, 184, 122, 153, 60, 91, 29, 60, 29, 29, 960, 960, 929,
-  960, 898, 929, 867, 898, 836, 867, 805, 836, 774, 805, 743, 774, 712, 743,
-  681, 712, 650, 681, 619, 650, 588, 619, 557, 588, 526, 557, 495, 526, 464,
-  495, 433, 464, 402, 433, 371, 402, 340, 371, 309, 340, 278, 309, 247, 278,
-  216, 247, 185, 216, 154, 185, 123, 154, 92, 123, 61, 92, 30, 61, 30, 30,
-  961, 992, 930, 961, 899, 930, 837, 868, 806, 837, 775, 806, 713, 744, 682,
-  713, 651, 682, 589, 620, 558, 589, 527, 558, 465, 496, 434, 465, 403, 434,
-  341, 372, 310, 341, 279, 310, 217, 248, 186, 217, 155, 186, 93, 124, 62, 93,
-  31, 62, 962, 993, 931, 962, 838, 869, 807, 838, 714, 745, 683, 714, 590, 621,
-  559, 590, 466, 497, 435, 466, 342, 373, 311, 342, 218, 249, 187, 218, 94,
-  125, 63, 94, 963, 994, 839, 870, 715, 746, 591, 622, 467, 498, 343, 374, 219,
-  250, 95, 126, 868, 899, 744, 775, 620, 651, 496, 527, 372, 403, 248, 279,
-  124, 155, 900, 931, 869, 900, 776, 807, 745, 776, 652, 683, 621, 652, 528,
-  559, 497, 528, 404, 435, 373, 404, 280, 311, 249, 280, 156, 187, 125, 156,
-  932, 963, 901, 932, 870, 901, 808, 839, 777, 808, 746, 777, 684, 715, 653,
-  684, 622, 653, 560, 591, 529, 560, 498, 529, 436, 467, 405, 436, 374, 405,
-  312, 343, 281, 312, 250, 281, 188, 219, 157, 188, 126, 157, 964, 995, 933,
-  964, 902, 933, 871, 902, 840, 871, 809, 840, 778, 809, 747, 778, 716, 747,
-  685, 716, 654, 685, 623, 654, 592, 623, 561, 592, 530, 561, 499, 530, 468,
-  499, 437, 468, 406, 437, 375, 406, 344, 375, 313, 344, 282, 313, 251, 282,
-  220, 251, 189, 220, 158, 189, 127, 158, 965, 996, 934, 965, 903, 934, 841,
-  872, 810, 841, 779, 810, 717, 748, 686, 717, 655, 686, 593, 624, 562, 593,
-  531, 562, 469, 500, 438, 469, 407, 438, 345, 376, 314, 345, 283, 314, 221,
-  252, 190, 221, 159, 190, 966, 997, 935, 966, 842, 873, 811, 842, 718, 749,
-  687, 718, 594, 625, 563, 594, 470, 501, 439, 470, 346, 377, 315, 346, 222,
-  253, 191, 222, 967, 998, 843, 874, 719, 750, 595, 626, 471, 502, 347, 378,
-  223, 254, 872, 903, 748, 779, 624, 655, 500, 531, 376, 407, 252, 283, 904,
-  935, 873, 904, 780, 811, 749, 780, 656, 687, 625, 656, 532, 563, 501, 532,
-  408, 439, 377, 408, 284, 315, 253, 284, 936, 967, 905, 936, 874, 905, 812,
-  843, 781, 812, 750, 781, 688, 719, 657, 688, 626, 657, 564, 595, 533, 564,
-  502, 533, 440, 471, 409, 440, 378, 409, 316, 347, 285, 316, 254, 285, 968,
-  999, 937, 968, 906, 937, 875, 906, 844, 875, 813, 844, 782, 813, 751, 782,
-  720, 751, 689, 720, 658, 689, 627, 658, 596, 627, 565, 596, 534, 565, 503,
-  534, 472, 503, 441, 472, 410, 441, 379, 410, 348, 379, 317, 348, 286, 317,
-  255, 286, 969, 1000, 938, 969, 907, 938, 845, 876, 814, 845, 783, 814, 721,
-  752, 690, 721, 659, 690, 597, 628, 566, 597, 535, 566, 473, 504, 442, 473,
-  411, 442, 349, 380, 318, 349, 287, 318, 970, 1001, 939, 970, 846, 877, 815,
-  846, 722, 753, 691, 722, 598, 629, 567, 598, 474, 505, 443, 474, 350, 381,
-  319, 350, 971, 1002, 847, 878, 723, 754, 599, 630, 475, 506, 351, 382, 876,
-  907, 752, 783, 628, 659, 504, 535, 380, 411, 908, 939, 877, 908, 784, 815,
-  753, 784, 660, 691, 629, 660, 536, 567, 505, 536, 412, 443, 381, 412, 940,
-  971, 909, 940, 878, 909, 816, 847, 785, 816, 754, 785, 692, 723, 661, 692,
-  630, 661, 568, 599, 537, 568, 506, 537, 444, 475, 413, 444, 382, 413, 972,
-  1003, 941, 972, 910, 941, 879, 910, 848, 879, 817, 848, 786, 817, 755, 786,
-  724, 755, 693, 724, 662, 693, 631, 662, 600, 631, 569, 600, 538, 569, 507,
-  538, 476, 507, 445, 476, 414, 445, 383, 414, 973, 1004, 942, 973, 911, 942,
-  849, 880, 818, 849, 787, 818, 725, 756, 694, 725, 663, 694, 601, 632, 570,
-  601, 539, 570, 477, 508, 446, 477, 415, 446, 974, 1005, 943, 974, 850, 881,
-  819, 850, 726, 757, 695, 726, 602, 633, 571, 602, 478, 509, 447, 478, 975,
-  1006, 851, 882, 727, 758, 603, 634, 479, 510, 880, 911, 756, 787, 632, 663,
-  508, 539, 912, 943, 881, 912, 788, 819, 757, 788, 664, 695, 633, 664, 540,
-  571, 509, 540, 944, 975, 913, 944, 882, 913, 820, 851, 789, 820, 758, 789,
-  696, 727, 665, 696, 634, 665, 572, 603, 541, 572, 510, 541, 976, 1007, 945,
-  976, 914, 945, 883, 914, 852, 883, 821, 852, 790, 821, 759, 790, 728, 759,
-  697, 728, 666, 697, 635, 666, 604, 635, 573, 604, 542, 573, 511, 542, 977,
-  1008, 946, 977, 915, 946, 853, 884, 822, 853, 791, 822, 729, 760, 698, 729,
-  667, 698, 605, 636, 574, 605, 543, 574, 978, 1009, 947, 978, 854, 885, 823,
-  854, 730, 761, 699, 730, 606, 637, 575, 606, 979, 1010, 855, 886, 731, 762,
-  607, 638, 884, 915, 760, 791, 636, 667, 916, 947, 885, 916, 792, 823, 761,
-  792, 668, 699, 637, 668, 948, 979, 917, 948, 886, 917, 824, 855, 793, 824,
-  762, 793, 700, 731, 669, 700, 638, 669, 980, 1011, 949, 980, 918, 949, 887,
-  918, 856, 887, 825, 856, 794, 825, 763, 794, 732, 763, 701, 732, 670, 701,
-  639, 670, 981, 1012, 950, 981, 919, 950, 857, 888, 826, 857, 795, 826, 733,
-  764, 702, 733, 671, 702, 982, 1013, 951, 982, 858, 889, 827, 858, 734, 765,
-  703, 734, 983, 1014, 859, 890, 735, 766, 888, 919, 764, 795, 920, 951, 889,
-  920, 796, 827, 765, 796, 952, 983, 921, 952, 890, 921, 828, 859, 797, 828,
-  766, 797, 984, 1015, 953, 984, 922, 953, 891, 922, 860, 891, 829, 860, 798,
-  829, 767, 798, 985, 1016, 954, 985, 923, 954, 861, 892, 830, 861, 799, 830,
-  986, 1017, 955, 986, 862, 893, 831, 862, 987, 1018, 863, 894, 892, 923, 924,
-  955, 893, 924, 956, 987, 925, 956, 894, 925, 988, 1019, 957, 988, 926, 957,
-  895, 926, 989, 1020, 958, 989, 927, 958, 990, 1021, 959, 990, 991, 1022, 0, 0,
+  0,   0,    0,   0,    0,   0,    32,  32,   1,   32,  1,   1,    64,  64,
+  33,  64,   2,   33,   96,  96,   2,   2,    65,  96,  34,  65,   128, 128,
+  97,  128,  3,   34,   66,  97,   3,   3,    35,  66,  98,  129,  129, 160,
+  160, 160,  4,   35,   67,  98,   192, 192,  4,   4,   130, 161,  161, 192,
+  36,  67,   99,  130,  5,   36,   68,  99,   193, 224, 162, 193,  224, 224,
+  131, 162,  37,  68,   100, 131,  5,   5,    194, 225, 225, 256,  256, 256,
+  163, 194,  69,  100,  132, 163,  6,   37,   226, 257, 6,   6,    195, 226,
+  257, 288,  101, 132,  288, 288,  38,  69,   164, 195, 133, 164,  258, 289,
+  227, 258,  196, 227,  7,   38,   289, 320,  70,  101, 320, 320,  7,   7,
+  165, 196,  39,  70,   102, 133,  290, 321,  259, 290, 228, 259,  321, 352,
+  352, 352,  197, 228,  134, 165,  71,  102,  8,   39,  322, 353,  291, 322,
+  260, 291,  103, 134,  353, 384,  166, 197,  229, 260, 40,  71,   8,   8,
+  384, 384,  135, 166,  354, 385,  323, 354,  198, 229, 292, 323,  72,  103,
+  261, 292,  9,   40,   385, 416,  167, 198,  104, 135, 230, 261,  355, 386,
+  416, 416,  293, 324,  324, 355,  9,   9,    41,  72,  386, 417,  199, 230,
+  136, 167,  417, 448,  262, 293,  356, 387,  73,  104, 387, 418,  231, 262,
+  10,  41,   168, 199,  325, 356,  418, 449,  105, 136, 448, 448,  42,  73,
+  294, 325,  200, 231,  10,  10,   357, 388,  137, 168, 263, 294,  388, 419,
+  74,  105,  419, 450,  449, 480,  326, 357,  232, 263, 295, 326,  169, 200,
+  11,  42,   106, 137,  480, 480,  450, 481,  358, 389, 264, 295,  201, 232,
+  138, 169,  389, 420,  43,  74,   420, 451,  327, 358, 11,  11,   481, 512,
+  233, 264,  451, 482,  296, 327,  75,  106,  170, 201, 482, 513,  512, 512,
+  390, 421,  359, 390,  421, 452,  107, 138,  12,  43,  202, 233,  452, 483,
+  265, 296,  328, 359,  139, 170,  44,  75,   483, 514, 513, 544,  234, 265,
+  297, 328,  422, 453,  12,  12,   391, 422,  171, 202, 76,  107,  514, 545,
+  453, 484,  544, 544,  266, 297,  203, 234,  108, 139, 329, 360,  298, 329,
+  140, 171,  515, 546,  13,  44,   423, 454,  235, 266, 545, 576,  454, 485,
+  45,  76,   172, 203,  330, 361,  576, 576,  13,  13,  267, 298,  546, 577,
+  77,  108,  204, 235,  455, 486,  577, 608,  299, 330, 109, 140,  547, 578,
+  14,  45,   14,  14,   141, 172,  578, 609,  331, 362, 46,  77,   173, 204,
+  15,  15,   78,  109,  205, 236,  579, 610,  110, 141, 15,  46,   142, 173,
+  47,  78,   174, 205,  16,  16,   79,  110,  206, 237, 16,  47,   111, 142,
+  48,  79,   143, 174,  80,  111,  175, 206,  17,  48,  17,  17,   207, 238,
+  49,  80,   81,  112,  18,  18,   18,  49,   50,  81,  82,  113,  19,  50,
+  51,  82,   83,  114,  608, 608,  484, 515,  360, 391, 236, 267,  112, 143,
+  19,  19,   640, 640,  609, 640,  516, 547,  485, 516, 392, 423,  361, 392,
+  268, 299,  237, 268,  144, 175,  113, 144,  20,  51,  20,  20,   672, 672,
+  641, 672,  610, 641,  548, 579,  517, 548,  486, 517, 424, 455,  393, 424,
+  362, 393,  300, 331,  269, 300,  238, 269,  176, 207, 145, 176,  114, 145,
+  52,  83,   21,  52,   21,  21,   704, 704,  673, 704, 642, 673,  611, 642,
+  580, 611,  549, 580,  518, 549,  487, 518,  456, 487, 425, 456,  394, 425,
+  363, 394,  332, 363,  301, 332,  270, 301,  239, 270, 208, 239,  177, 208,
+  146, 177,  115, 146,  84,  115,  53,  84,   22,  53,  22,  22,   705, 736,
+  674, 705,  643, 674,  581, 612,  550, 581,  519, 550, 457, 488,  426, 457,
+  395, 426,  333, 364,  302, 333,  271, 302,  209, 240, 178, 209,  147, 178,
+  85,  116,  54,  85,   23,  54,   706, 737,  675, 706, 582, 613,  551, 582,
+  458, 489,  427, 458,  334, 365,  303, 334,  210, 241, 179, 210,  86,  117,
+  55,  86,   707, 738,  583, 614,  459, 490,  335, 366, 211, 242,  87,  118,
+  736, 736,  612, 643,  488, 519,  364, 395,  240, 271, 116, 147,  23,  23,
+  768, 768,  737, 768,  644, 675,  613, 644,  520, 551, 489, 520,  396, 427,
+  365, 396,  272, 303,  241, 272,  148, 179,  117, 148, 24,  55,   24,  24,
+  800, 800,  769, 800,  738, 769,  676, 707,  645, 676, 614, 645,  552, 583,
+  521, 552,  490, 521,  428, 459,  397, 428,  366, 397, 304, 335,  273, 304,
+  242, 273,  180, 211,  149, 180,  118, 149,  56,  87,  25,  56,   25,  25,
+  832, 832,  801, 832,  770, 801,  739, 770,  708, 739, 677, 708,  646, 677,
+  615, 646,  584, 615,  553, 584,  522, 553,  491, 522, 460, 491,  429, 460,
+  398, 429,  367, 398,  336, 367,  305, 336,  274, 305, 243, 274,  212, 243,
+  181, 212,  150, 181,  119, 150,  88,  119,  57,  88,  26,  57,   26,  26,
+  833, 864,  802, 833,  771, 802,  709, 740,  678, 709, 647, 678,  585, 616,
+  554, 585,  523, 554,  461, 492,  430, 461,  399, 430, 337, 368,  306, 337,
+  275, 306,  213, 244,  182, 213,  151, 182,  89,  120, 58,  89,   27,  58,
+  834, 865,  803, 834,  710, 741,  679, 710,  586, 617, 555, 586,  462, 493,
+  431, 462,  338, 369,  307, 338,  214, 245,  183, 214, 90,  121,  59,  90,
+  835, 866,  711, 742,  587, 618,  463, 494,  339, 370, 215, 246,  91,  122,
+  864, 864,  740, 771,  616, 647,  492, 523,  368, 399, 244, 275,  120, 151,
+  27,  27,   896, 896,  865, 896,  772, 803,  741, 772, 648, 679,  617, 648,
+  524, 555,  493, 524,  400, 431,  369, 400,  276, 307, 245, 276,  152, 183,
+  121, 152,  28,  59,   28,  28,   928, 928,  897, 928, 866, 897,  804, 835,
+  773, 804,  742, 773,  680, 711,  649, 680,  618, 649, 556, 587,  525, 556,
+  494, 525,  432, 463,  401, 432,  370, 401,  308, 339, 277, 308,  246, 277,
+  184, 215,  153, 184,  122, 153,  60,  91,   29,  60,  29,  29,   960, 960,
+  929, 960,  898, 929,  867, 898,  836, 867,  805, 836, 774, 805,  743, 774,
+  712, 743,  681, 712,  650, 681,  619, 650,  588, 619, 557, 588,  526, 557,
+  495, 526,  464, 495,  433, 464,  402, 433,  371, 402, 340, 371,  309, 340,
+  278, 309,  247, 278,  216, 247,  185, 216,  154, 185, 123, 154,  92,  123,
+  61,  92,   30,  61,   30,  30,   961, 992,  930, 961, 899, 930,  837, 868,
+  806, 837,  775, 806,  713, 744,  682, 713,  651, 682, 589, 620,  558, 589,
+  527, 558,  465, 496,  434, 465,  403, 434,  341, 372, 310, 341,  279, 310,
+  217, 248,  186, 217,  155, 186,  93,  124,  62,  93,  31,  62,   962, 993,
+  931, 962,  838, 869,  807, 838,  714, 745,  683, 714, 590, 621,  559, 590,
+  466, 497,  435, 466,  342, 373,  311, 342,  218, 249, 187, 218,  94,  125,
+  63,  94,   963, 994,  839, 870,  715, 746,  591, 622, 467, 498,  343, 374,
+  219, 250,  95,  126,  868, 899,  744, 775,  620, 651, 496, 527,  372, 403,
+  248, 279,  124, 155,  900, 931,  869, 900,  776, 807, 745, 776,  652, 683,
+  621, 652,  528, 559,  497, 528,  404, 435,  373, 404, 280, 311,  249, 280,
+  156, 187,  125, 156,  932, 963,  901, 932,  870, 901, 808, 839,  777, 808,
+  746, 777,  684, 715,  653, 684,  622, 653,  560, 591, 529, 560,  498, 529,
+  436, 467,  405, 436,  374, 405,  312, 343,  281, 312, 250, 281,  188, 219,
+  157, 188,  126, 157,  964, 995,  933, 964,  902, 933, 871, 902,  840, 871,
+  809, 840,  778, 809,  747, 778,  716, 747,  685, 716, 654, 685,  623, 654,
+  592, 623,  561, 592,  530, 561,  499, 530,  468, 499, 437, 468,  406, 437,
+  375, 406,  344, 375,  313, 344,  282, 313,  251, 282, 220, 251,  189, 220,
+  158, 189,  127, 158,  965, 996,  934, 965,  903, 934, 841, 872,  810, 841,
+  779, 810,  717, 748,  686, 717,  655, 686,  593, 624, 562, 593,  531, 562,
+  469, 500,  438, 469,  407, 438,  345, 376,  314, 345, 283, 314,  221, 252,
+  190, 221,  159, 190,  966, 997,  935, 966,  842, 873, 811, 842,  718, 749,
+  687, 718,  594, 625,  563, 594,  470, 501,  439, 470, 346, 377,  315, 346,
+  222, 253,  191, 222,  967, 998,  843, 874,  719, 750, 595, 626,  471, 502,
+  347, 378,  223, 254,  872, 903,  748, 779,  624, 655, 500, 531,  376, 407,
+  252, 283,  904, 935,  873, 904,  780, 811,  749, 780, 656, 687,  625, 656,
+  532, 563,  501, 532,  408, 439,  377, 408,  284, 315, 253, 284,  936, 967,
+  905, 936,  874, 905,  812, 843,  781, 812,  750, 781, 688, 719,  657, 688,
+  626, 657,  564, 595,  533, 564,  502, 533,  440, 471, 409, 440,  378, 409,
+  316, 347,  285, 316,  254, 285,  968, 999,  937, 968, 906, 937,  875, 906,
+  844, 875,  813, 844,  782, 813,  751, 782,  720, 751, 689, 720,  658, 689,
+  627, 658,  596, 627,  565, 596,  534, 565,  503, 534, 472, 503,  441, 472,
+  410, 441,  379, 410,  348, 379,  317, 348,  286, 317, 255, 286,  969, 1000,
+  938, 969,  907, 938,  845, 876,  814, 845,  783, 814, 721, 752,  690, 721,
+  659, 690,  597, 628,  566, 597,  535, 566,  473, 504, 442, 473,  411, 442,
+  349, 380,  318, 349,  287, 318,  970, 1001, 939, 970, 846, 877,  815, 846,
+  722, 753,  691, 722,  598, 629,  567, 598,  474, 505, 443, 474,  350, 381,
+  319, 350,  971, 1002, 847, 878,  723, 754,  599, 630, 475, 506,  351, 382,
+  876, 907,  752, 783,  628, 659,  504, 535,  380, 411, 908, 939,  877, 908,
+  784, 815,  753, 784,  660, 691,  629, 660,  536, 567, 505, 536,  412, 443,
+  381, 412,  940, 971,  909, 940,  878, 909,  816, 847, 785, 816,  754, 785,
+  692, 723,  661, 692,  630, 661,  568, 599,  537, 568, 506, 537,  444, 475,
+  413, 444,  382, 413,  972, 1003, 941, 972,  910, 941, 879, 910,  848, 879,
+  817, 848,  786, 817,  755, 786,  724, 755,  693, 724, 662, 693,  631, 662,
+  600, 631,  569, 600,  538, 569,  507, 538,  476, 507, 445, 476,  414, 445,
+  383, 414,  973, 1004, 942, 973,  911, 942,  849, 880, 818, 849,  787, 818,
+  725, 756,  694, 725,  663, 694,  601, 632,  570, 601, 539, 570,  477, 508,
+  446, 477,  415, 446,  974, 1005, 943, 974,  850, 881, 819, 850,  726, 757,
+  695, 726,  602, 633,  571, 602,  478, 509,  447, 478, 975, 1006, 851, 882,
+  727, 758,  603, 634,  479, 510,  880, 911,  756, 787, 632, 663,  508, 539,
+  912, 943,  881, 912,  788, 819,  757, 788,  664, 695, 633, 664,  540, 571,
+  509, 540,  944, 975,  913, 944,  882, 913,  820, 851, 789, 820,  758, 789,
+  696, 727,  665, 696,  634, 665,  572, 603,  541, 572, 510, 541,  976, 1007,
+  945, 976,  914, 945,  883, 914,  852, 883,  821, 852, 790, 821,  759, 790,
+  728, 759,  697, 728,  666, 697,  635, 666,  604, 635, 573, 604,  542, 573,
+  511, 542,  977, 1008, 946, 977,  915, 946,  853, 884, 822, 853,  791, 822,
+  729, 760,  698, 729,  667, 698,  605, 636,  574, 605, 543, 574,  978, 1009,
+  947, 978,  854, 885,  823, 854,  730, 761,  699, 730, 606, 637,  575, 606,
+  979, 1010, 855, 886,  731, 762,  607, 638,  884, 915, 760, 791,  636, 667,
+  916, 947,  885, 916,  792, 823,  761, 792,  668, 699, 637, 668,  948, 979,
+  917, 948,  886, 917,  824, 855,  793, 824,  762, 793, 700, 731,  669, 700,
+  638, 669,  980, 1011, 949, 980,  918, 949,  887, 918, 856, 887,  825, 856,
+  794, 825,  763, 794,  732, 763,  701, 732,  670, 701, 639, 670,  981, 1012,
+  950, 981,  919, 950,  857, 888,  826, 857,  795, 826, 733, 764,  702, 733,
+  671, 702,  982, 1013, 951, 982,  858, 889,  827, 858, 734, 765,  703, 734,
+  983, 1014, 859, 890,  735, 766,  888, 919,  764, 795, 920, 951,  889, 920,
+  796, 827,  765, 796,  952, 983,  921, 952,  890, 921, 828, 859,  797, 828,
+  766, 797,  984, 1015, 953, 984,  922, 953,  891, 922, 860, 891,  829, 860,
+  798, 829,  767, 798,  985, 1016, 954, 985,  923, 954, 861, 892,  830, 861,
+  799, 830,  986, 1017, 955, 986,  862, 893,  831, 862, 987, 1018, 863, 894,
+  892, 923,  924, 955,  893, 924,  956, 987,  925, 956, 894, 925,  988, 1019,
+  957, 988,  926, 957,  895, 926,  989, 1020, 958, 989, 927, 958,  990, 1021,
+  959, 990,  991, 1022, 0,   0,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x4[16]) = {
@@ -545,183 +526,205 @@ DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_4x4[16]) = {
 };
 
 DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_8x8[64]) = {
-  0, 3, 8, 15, 22, 32, 40, 47, 1, 5, 11, 18, 26, 34, 44, 51,
-  2, 7, 13, 20, 28, 38, 46, 54, 4, 10, 16, 24, 31, 41, 50, 56,
-  6, 12, 21, 27, 35, 43, 52, 58, 9, 17, 25, 33, 39, 48, 55, 60,
+  0,  3,  8,  15, 22, 32, 40, 47, 1,  5,  11, 18, 26, 34, 44, 51,
+  2,  7,  13, 20, 28, 38, 46, 54, 4,  10, 16, 24, 31, 41, 50, 56,
+  6,  12, 21, 27, 35, 43, 52, 58, 9,  17, 25, 33, 39, 48, 55, 60,
   14, 23, 30, 37, 45, 53, 59, 62, 19, 29, 36, 42, 49, 57, 61, 63,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_8x8[64]) = {
-  0, 1, 2, 5, 8, 12, 19, 24, 3, 4, 7, 10, 15, 20, 30, 39,
-  6, 9, 13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52,
+  0,  1,  2,  5,  8,  12, 19, 24, 3,  4,  7,  10, 15, 20, 30, 39,
+  6,  9,  13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52,
   18, 22, 25, 31, 35, 41, 50, 57, 26, 29, 33, 38, 43, 49, 55, 59,
   32, 36, 42, 47, 51, 54, 60, 61, 40, 45, 48, 53, 56, 58, 62, 63,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x8[64]) = {
-  0, 2, 5, 9, 14, 22, 31, 37, 1, 4, 8, 13, 19, 26, 38, 44,
-  3, 6, 10, 17, 24, 30, 42, 49, 7, 11, 15, 21, 29, 36, 47, 53,
+  0,  2,  5,  9,  14, 22, 31, 37, 1,  4,  8,  13, 19, 26, 38, 44,
+  3,  6,  10, 17, 24, 30, 42, 49, 7,  11, 15, 21, 29, 36, 47, 53,
   12, 16, 20, 27, 34, 43, 52, 57, 18, 23, 28, 35, 41, 48, 56, 60,
   25, 32, 39, 45, 50, 55, 59, 62, 33, 40, 46, 51, 54, 58, 61, 63,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_16x16[256]) = {
-  0, 4, 11, 20, 31, 43, 59, 75, 85, 109, 130, 150, 165, 181, 195, 198,
-  1, 6, 14, 23, 34, 47, 64, 81, 95, 114, 135, 153, 171, 188, 201, 212,
-  2, 8, 16, 25, 38, 52, 67, 83, 101, 116, 136, 157, 172, 190, 205, 216,
-  3, 10, 18, 29, 41, 55, 71, 89, 103, 119, 141, 159, 176, 194, 208, 218,
-  5, 12, 21, 32, 45, 58, 74, 93, 104, 123, 144, 164, 179, 196, 210, 223,
-  7, 15, 26, 37, 49, 63, 78, 96, 112, 129, 146, 166, 182, 200, 215, 228,
-  9, 19, 28, 39, 54, 69, 86, 102, 117, 132, 151, 170, 187, 206, 220, 230,
-  13, 24, 35, 46, 60, 73, 91, 108, 122, 137, 154, 174, 189, 207, 224, 235,
-  17, 30, 40, 53, 66, 82, 98, 115, 126, 142, 161, 180, 197, 213, 227, 237,
-  22, 36, 48, 62, 76, 92, 105, 120, 133, 147, 167, 186, 203, 219, 232, 240,
-  27, 44, 56, 70, 84, 99, 113, 127, 140, 156, 175, 193, 209, 226, 236, 244,
-  33, 51, 68, 79, 94, 110, 125, 138, 149, 162, 184, 202, 217, 229, 241, 247,
-  42, 61, 77, 90, 106, 121, 134, 148, 160, 173, 191, 211, 225, 238, 245, 251,
-  50, 72, 87, 100, 118, 128, 145, 158, 168, 183, 204, 222, 233, 242, 249, 253,
-  57, 80, 97, 111, 131, 143, 155, 169, 178, 192, 214, 231, 239, 246, 250, 254,
+  0,  4,  11,  20,  31,  43,  59,  75,  85,  109, 130, 150, 165, 181, 195, 198,
+  1,  6,  14,  23,  34,  47,  64,  81,  95,  114, 135, 153, 171, 188, 201, 212,
+  2,  8,  16,  25,  38,  52,  67,  83,  101, 116, 136, 157, 172, 190, 205, 216,
+  3,  10, 18,  29,  41,  55,  71,  89,  103, 119, 141, 159, 176, 194, 208, 218,
+  5,  12, 21,  32,  45,  58,  74,  93,  104, 123, 144, 164, 179, 196, 210, 223,
+  7,  15, 26,  37,  49,  63,  78,  96,  112, 129, 146, 166, 182, 200, 215, 228,
+  9,  19, 28,  39,  54,  69,  86,  102, 117, 132, 151, 170, 187, 206, 220, 230,
+  13, 24, 35,  46,  60,  73,  91,  108, 122, 137, 154, 174, 189, 207, 224, 235,
+  17, 30, 40,  53,  66,  82,  98,  115, 126, 142, 161, 180, 197, 213, 227, 237,
+  22, 36, 48,  62,  76,  92,  105, 120, 133, 147, 167, 186, 203, 219, 232, 240,
+  27, 44, 56,  70,  84,  99,  113, 127, 140, 156, 175, 193, 209, 226, 236, 244,
+  33, 51, 68,  79,  94,  110, 125, 138, 149, 162, 184, 202, 217, 229, 241, 247,
+  42, 61, 77,  90,  106, 121, 134, 148, 160, 173, 191, 211, 225, 238, 245, 251,
+  50, 72, 87,  100, 118, 128, 145, 158, 168, 183, 204, 222, 233, 242, 249, 253,
+  57, 80, 97,  111, 131, 143, 155, 169, 178, 192, 214, 231, 239, 246, 250, 254,
   65, 88, 107, 124, 139, 152, 163, 177, 185, 199, 221, 234, 243, 248, 252, 255,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_16x16[256]) = {
-  0, 1, 2, 4, 6, 9, 12, 17, 22, 29, 36, 43, 54, 64, 76, 86,
-  3, 5, 7, 11, 15, 19, 25, 32, 38, 48, 59, 68, 84, 99, 115, 130,
-  8, 10, 13, 18, 23, 27, 33, 42, 51, 60, 72, 88, 103, 119, 142, 167,
-  14, 16, 20, 26, 31, 37, 44, 53, 61, 73, 85, 100, 116, 135, 161, 185,
-  21, 24, 30, 35, 40, 47, 55, 65, 74, 81, 94, 112, 133, 154, 179, 205,
-  28, 34, 39, 45, 50, 58, 67, 77, 87, 96, 106, 121, 146, 169, 196, 212,
-  41, 46, 49, 56, 63, 70, 79, 90, 98, 107, 122, 138, 159, 182, 207, 222,
-  52, 57, 62, 69, 75, 83, 93, 102, 110, 120, 134, 150, 176, 195, 215, 226,
-  66, 71, 78, 82, 91, 97, 108, 113, 127, 136, 148, 168, 188, 202, 221, 232,
-  80, 89, 92, 101, 105, 114, 125, 131, 139, 151, 162, 177, 192, 208, 223, 234,
-  95, 104, 109, 117, 123, 128, 143, 144, 155, 165, 175, 190, 206, 219, 233, 239,
-  111, 118, 124, 129, 140, 147, 157, 164, 170, 181, 191, 203, 224, 230, 240,
-  243, 126, 132, 137, 145, 153, 160, 174, 178, 184, 197, 204, 216, 231, 237,
-  244, 246, 141, 149, 156, 166, 172, 180, 189, 199, 200, 210, 220, 228, 238,
-  242, 249, 251, 152, 163, 171, 183, 186, 193, 201, 211, 214, 218, 227, 236,
-  245, 247, 252, 253, 158, 173, 187, 194, 198, 209, 213, 217, 225, 229, 235,
-  241, 248, 250, 254, 255,
+  0,   1,   2,   4,   6,   9,   12,  17,  22,  29,  36,  43,  54,  64,  76,
+  86,  3,   5,   7,   11,  15,  19,  25,  32,  38,  48,  59,  68,  84,  99,
+  115, 130, 8,   10,  13,  18,  23,  27,  33,  42,  51,  60,  72,  88,  103,
+  119, 142, 167, 14,  16,  20,  26,  31,  37,  44,  53,  61,  73,  85,  100,
+  116, 135, 161, 185, 21,  24,  30,  35,  40,  47,  55,  65,  74,  81,  94,
+  112, 133, 154, 179, 205, 28,  34,  39,  45,  50,  58,  67,  77,  87,  96,
+  106, 121, 146, 169, 196, 212, 41,  46,  49,  56,  63,  70,  79,  90,  98,
+  107, 122, 138, 159, 182, 207, 222, 52,  57,  62,  69,  75,  83,  93,  102,
+  110, 120, 134, 150, 176, 195, 215, 226, 66,  71,  78,  82,  91,  97,  108,
+  113, 127, 136, 148, 168, 188, 202, 221, 232, 80,  89,  92,  101, 105, 114,
+  125, 131, 139, 151, 162, 177, 192, 208, 223, 234, 95,  104, 109, 117, 123,
+  128, 143, 144, 155, 165, 175, 190, 206, 219, 233, 239, 111, 118, 124, 129,
+  140, 147, 157, 164, 170, 181, 191, 203, 224, 230, 240, 243, 126, 132, 137,
+  145, 153, 160, 174, 178, 184, 197, 204, 216, 231, 237, 244, 246, 141, 149,
+  156, 166, 172, 180, 189, 199, 200, 210, 220, 228, 238, 242, 249, 251, 152,
+  163, 171, 183, 186, 193, 201, 211, 214, 218, 227, 236, 245, 247, 252, 253,
+  158, 173, 187, 194, 198, 209, 213, 217, 225, 229, 235, 241, 248, 250, 254,
+  255,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x16[256]) = {
-  0, 2, 5, 9, 17, 24, 36, 44, 55, 72, 88, 104, 128, 143, 166, 179,
-  1, 4, 8, 13, 20, 30, 40, 54, 66, 79, 96, 113, 141, 154, 178, 196,
-  3, 7, 11, 18, 25, 33, 46, 57, 71, 86, 101, 119, 148, 164, 186, 201,
-  6, 12, 16, 23, 31, 39, 53, 64, 78, 92, 110, 127, 153, 169, 193, 208,
-  10, 14, 19, 28, 37, 47, 58, 67, 84, 98, 114, 133, 161, 176, 198, 214,
-  15, 21, 26, 34, 43, 52, 65, 77, 91, 106, 120, 140, 165, 185, 205, 221,
-  22, 27, 32, 41, 48, 60, 73, 85, 99, 116, 130, 151, 175, 190, 211, 225,
-  29, 35, 42, 49, 59, 69, 81, 95, 108, 125, 139, 155, 182, 197, 217, 229,
-  38, 45, 51, 61, 68, 80, 93, 105, 118, 134, 150, 168, 191, 207, 223, 234,
-  50, 56, 63, 74, 83, 94, 109, 117, 129, 147, 163, 177, 199, 213, 228, 238,
-  62, 70, 76, 87, 97, 107, 122, 131, 145, 159, 172, 188, 210, 222, 235, 242,
-  75, 82, 90, 102, 112, 124, 138, 146, 157, 173, 187, 202, 219, 230, 240, 245,
-  89, 100, 111, 123, 132, 142, 156, 167, 180, 189, 203, 216, 231, 237, 246, 250,
-  103, 115, 126, 136, 149, 162, 171, 183, 194, 204, 215, 224, 236, 241, 248,
-  252, 121, 135, 144, 158, 170, 181, 192, 200, 209, 218, 227, 233, 243, 244,
-  251, 254, 137, 152, 160, 174, 184, 195, 206, 212, 220, 226, 232, 239, 247,
-  249, 253, 255,
+  0,   2,   5,   9,   17,  24,  36,  44,  55,  72,  88,  104, 128, 143, 166,
+  179, 1,   4,   8,   13,  20,  30,  40,  54,  66,  79,  96,  113, 141, 154,
+  178, 196, 3,   7,   11,  18,  25,  33,  46,  57,  71,  86,  101, 119, 148,
+  164, 186, 201, 6,   12,  16,  23,  31,  39,  53,  64,  78,  92,  110, 127,
+  153, 169, 193, 208, 10,  14,  19,  28,  37,  47,  58,  67,  84,  98,  114,
+  133, 161, 176, 198, 214, 15,  21,  26,  34,  43,  52,  65,  77,  91,  106,
+  120, 140, 165, 185, 205, 221, 22,  27,  32,  41,  48,  60,  73,  85,  99,
+  116, 130, 151, 175, 190, 211, 225, 29,  35,  42,  49,  59,  69,  81,  95,
+  108, 125, 139, 155, 182, 197, 217, 229, 38,  45,  51,  61,  68,  80,  93,
+  105, 118, 134, 150, 168, 191, 207, 223, 234, 50,  56,  63,  74,  83,  94,
+  109, 117, 129, 147, 163, 177, 199, 213, 228, 238, 62,  70,  76,  87,  97,
+  107, 122, 131, 145, 159, 172, 188, 210, 222, 235, 242, 75,  82,  90,  102,
+  112, 124, 138, 146, 157, 173, 187, 202, 219, 230, 240, 245, 89,  100, 111,
+  123, 132, 142, 156, 167, 180, 189, 203, 216, 231, 237, 246, 250, 103, 115,
+  126, 136, 149, 162, 171, 183, 194, 204, 215, 224, 236, 241, 248, 252, 121,
+  135, 144, 158, 170, 181, 192, 200, 209, 218, 227, 233, 243, 244, 251, 254,
+  137, 152, 160, 174, 184, 195, 206, 212, 220, 226, 232, 239, 247, 249, 253,
+  255,
 };
 
 DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x32[1024]) = {
-  0, 2, 5, 10, 17, 25, 38, 47, 62, 83, 101, 121, 145, 170, 193, 204,
-  210, 219, 229, 233, 245, 257, 275, 299, 342, 356, 377, 405, 455, 471, 495,
-  527, 1, 4, 8, 15, 22, 30, 45, 58, 74, 92, 112, 133, 158, 184, 203, 215, 222,
-  228, 234, 237, 256, 274, 298, 317, 355, 376, 404, 426, 470, 494, 526, 551,
-  3, 7, 12, 18, 28, 36, 52, 64, 82, 102, 118, 142, 164, 189, 208, 217, 224,
-  231, 235, 238, 273, 297, 316, 329, 375, 403, 425, 440, 493, 525, 550, 567,
-  6, 11, 16, 23, 31, 43, 60, 73, 90, 109, 126, 150, 173, 196, 211, 220, 226,
-  232, 236, 239, 296, 315, 328, 335, 402, 424, 439, 447, 524, 549, 566, 575,
-  9, 14, 19, 29, 37, 50, 65, 78, 95, 116, 134, 157, 179, 201, 214, 223, 244,
-  255, 272, 295, 341, 354, 374, 401, 454, 469, 492, 523, 582, 596, 617, 645,
-  13, 20, 26, 35, 44, 54, 72, 85, 105, 123, 140, 163, 182, 205, 216, 225,
-  254, 271, 294, 314, 353, 373, 400, 423, 468, 491, 522, 548, 595, 616, 644,
-  666, 21, 27, 33, 42, 53, 63, 80, 94, 113, 132, 151, 172, 190, 209, 218, 227,
-  270, 293, 313, 327, 372, 399, 422, 438, 490, 521, 547, 565, 615, 643, 665,
-  680, 24, 32, 39, 48, 57, 71, 88, 104, 120, 139, 159, 178, 197, 212, 221, 230,
-  292, 312, 326, 334, 398, 421, 437, 446, 520, 546, 564, 574, 642, 664, 679,
-  687, 34, 40, 46, 56, 68, 81, 96, 111, 130, 147, 167, 186, 243, 253, 269, 291,
-  340, 352, 371, 397, 453, 467, 489, 519, 581, 594, 614, 641, 693, 705, 723,
-  747, 41, 49, 55, 67, 77, 91, 107, 124, 138, 161, 177, 194, 252, 268, 290,
-  311, 351, 370, 396, 420, 466, 488, 518, 545, 593, 613, 640, 663, 704, 722,
-  746, 765, 51, 59, 66, 76, 89, 99, 119, 131, 149, 168, 181, 200, 267, 289,
-  310, 325, 369, 395, 419, 436, 487, 517, 544, 563, 612, 639, 662, 678, 721,
-  745, 764, 777, 61, 69, 75, 87, 100, 114, 129, 144, 162, 180, 191, 207, 288,
-  309, 324, 333, 394, 418, 435, 445, 516, 543, 562, 573, 638, 661, 677, 686,
-  744, 763, 776, 783, 70, 79, 86, 97, 108, 122, 137, 155, 242, 251, 266, 287,
-  339, 350, 368, 393, 452, 465, 486, 515, 580, 592, 611, 637, 692, 703, 720,
-  743, 788, 798, 813, 833, 84, 93, 103, 110, 125, 141, 154, 171, 250, 265, 286,
-  308, 349, 367, 392, 417, 464, 485, 514, 542, 591, 610, 636, 660, 702, 719,
-  742, 762, 797, 812, 832, 848, 98, 106, 115, 127, 143, 156, 169, 185, 264,
-  285, 307, 323, 366, 391, 416, 434, 484, 513, 541, 561, 609, 635, 659, 676,
-  718, 741, 761, 775, 811, 831, 847, 858, 117, 128, 136, 148, 160, 175, 188,
-  198, 284, 306, 322, 332, 390, 415, 433, 444, 512, 540, 560, 572, 634, 658,
-  675, 685, 740, 760, 774, 782, 830, 846, 857, 863, 135, 146, 152, 165, 241,
-  249, 263, 283, 338, 348, 365, 389, 451, 463, 483, 511, 579, 590, 608, 633,
-  691, 701, 717, 739, 787, 796, 810, 829, 867, 875, 887, 903, 153, 166, 174,
-  183, 248, 262, 282, 305, 347, 364, 388, 414, 462, 482, 510, 539, 589, 607,
-  632, 657, 700, 716, 738, 759, 795, 809, 828, 845, 874, 886, 902, 915, 176,
-  187, 195, 202, 261, 281, 304, 321, 363, 387, 413, 432, 481, 509, 538, 559,
-  606, 631, 656, 674, 715, 737, 758, 773, 808, 827, 844, 856, 885, 901, 914,
-  923, 192, 199, 206, 213, 280, 303, 320, 331, 386, 412, 431, 443, 508, 537,
-  558, 571, 630, 655, 673, 684, 736, 757, 772, 781, 826, 843, 855, 862, 900,
-  913, 922, 927, 240, 247, 260, 279, 337, 346, 362, 385, 450, 461, 480, 507,
-  578, 588, 605, 629, 690, 699, 714, 735, 786, 794, 807, 825, 866, 873, 884,
-  899, 930, 936, 945, 957, 246, 259, 278, 302, 345, 361, 384, 411, 460, 479,
-  506, 536, 587, 604, 628, 654, 698, 713, 734, 756, 793, 806, 824, 842, 872,
-  883, 898, 912, 935, 944, 956, 966, 258, 277, 301, 319, 360, 383, 410, 430,
-  478, 505, 535, 557, 603, 627, 653, 672, 712, 733, 755, 771, 805, 823, 841,
-  854, 882, 897, 911, 921, 943, 955, 965, 972, 276, 300, 318, 330, 382, 409,
-  429, 442, 504, 534, 556, 570, 626, 652, 671, 683, 732, 754, 770, 780, 822,
-  840, 853, 861, 896, 910, 920, 926, 954, 964, 971, 975, 336, 344, 359, 381,
-  449, 459, 477, 503, 577, 586, 602, 625, 689, 697, 711, 731, 785, 792, 804,
-  821, 865, 871, 881, 895, 929, 934, 942, 953, 977, 981, 987, 995, 343, 358,
-  380, 408, 458, 476, 502, 533, 585, 601, 624, 651, 696, 710, 730, 753, 791,
-  803, 820, 839, 870, 880, 894, 909, 933, 941, 952, 963, 980, 986, 994, 1001,
-  357, 379, 407, 428, 475, 501, 532, 555, 600, 623, 650, 670, 709, 729, 752,
-  769, 802, 819, 838, 852, 879, 893, 908, 919, 940, 951, 962, 970, 985, 993,
-  1000, 1005, 378, 406, 427, 441, 500, 531, 554, 569, 622, 649, 669, 682, 728,
-  751, 768, 779, 818, 837, 851, 860, 892, 907, 918, 925, 950, 961, 969, 974,
-  992, 999, 1004, 1007, 448, 457, 474, 499, 576, 584, 599, 621, 688, 695, 708,
-  727, 784, 790, 801, 817, 864, 869, 878, 891, 928, 932, 939, 949, 976, 979,
-  984, 991, 1008, 1010, 1013, 1017, 456, 473, 498, 530, 583, 598, 620, 648,
-  694, 707, 726, 750, 789, 800, 816, 836, 868, 877, 890, 906, 931, 938, 948,
-  960, 978, 983, 990, 998, 1009, 1012, 1016, 1020, 472, 497, 529, 553, 597,
-  619, 647, 668, 706, 725, 749, 767, 799, 815, 835, 850, 876, 889, 905, 917,
-  937, 947, 959, 968, 982, 989, 997, 1003, 1011, 1015, 1019, 1022, 496, 528,
-  552, 568, 618, 646, 667, 681, 724, 748, 766, 778, 814, 834, 849, 859, 888,
-  904, 916, 924, 946, 958, 967, 973, 988, 996, 1002, 1006, 1014, 1018, 1021,
-  1023,
+  0,    2,    5,    10,   17,   25,   38,   47,   62,   83,   101,  121,  145,
+  170,  193,  204,  210,  219,  229,  233,  245,  257,  275,  299,  342,  356,
+  377,  405,  455,  471,  495,  527,  1,    4,    8,    15,   22,   30,   45,
+  58,   74,   92,   112,  133,  158,  184,  203,  215,  222,  228,  234,  237,
+  256,  274,  298,  317,  355,  376,  404,  426,  470,  494,  526,  551,  3,
+  7,    12,   18,   28,   36,   52,   64,   82,   102,  118,  142,  164,  189,
+  208,  217,  224,  231,  235,  238,  273,  297,  316,  329,  375,  403,  425,
+  440,  493,  525,  550,  567,  6,    11,   16,   23,   31,   43,   60,   73,
+  90,   109,  126,  150,  173,  196,  211,  220,  226,  232,  236,  239,  296,
+  315,  328,  335,  402,  424,  439,  447,  524,  549,  566,  575,  9,    14,
+  19,   29,   37,   50,   65,   78,   95,   116,  134,  157,  179,  201,  214,
+  223,  244,  255,  272,  295,  341,  354,  374,  401,  454,  469,  492,  523,
+  582,  596,  617,  645,  13,   20,   26,   35,   44,   54,   72,   85,   105,
+  123,  140,  163,  182,  205,  216,  225,  254,  271,  294,  314,  353,  373,
+  400,  423,  468,  491,  522,  548,  595,  616,  644,  666,  21,   27,   33,
+  42,   53,   63,   80,   94,   113,  132,  151,  172,  190,  209,  218,  227,
+  270,  293,  313,  327,  372,  399,  422,  438,  490,  521,  547,  565,  615,
+  643,  665,  680,  24,   32,   39,   48,   57,   71,   88,   104,  120,  139,
+  159,  178,  197,  212,  221,  230,  292,  312,  326,  334,  398,  421,  437,
+  446,  520,  546,  564,  574,  642,  664,  679,  687,  34,   40,   46,   56,
+  68,   81,   96,   111,  130,  147,  167,  186,  243,  253,  269,  291,  340,
+  352,  371,  397,  453,  467,  489,  519,  581,  594,  614,  641,  693,  705,
+  723,  747,  41,   49,   55,   67,   77,   91,   107,  124,  138,  161,  177,
+  194,  252,  268,  290,  311,  351,  370,  396,  420,  466,  488,  518,  545,
+  593,  613,  640,  663,  704,  722,  746,  765,  51,   59,   66,   76,   89,
+  99,   119,  131,  149,  168,  181,  200,  267,  289,  310,  325,  369,  395,
+  419,  436,  487,  517,  544,  563,  612,  639,  662,  678,  721,  745,  764,
+  777,  61,   69,   75,   87,   100,  114,  129,  144,  162,  180,  191,  207,
+  288,  309,  324,  333,  394,  418,  435,  445,  516,  543,  562,  573,  638,
+  661,  677,  686,  744,  763,  776,  783,  70,   79,   86,   97,   108,  122,
+  137,  155,  242,  251,  266,  287,  339,  350,  368,  393,  452,  465,  486,
+  515,  580,  592,  611,  637,  692,  703,  720,  743,  788,  798,  813,  833,
+  84,   93,   103,  110,  125,  141,  154,  171,  250,  265,  286,  308,  349,
+  367,  392,  417,  464,  485,  514,  542,  591,  610,  636,  660,  702,  719,
+  742,  762,  797,  812,  832,  848,  98,   106,  115,  127,  143,  156,  169,
+  185,  264,  285,  307,  323,  366,  391,  416,  434,  484,  513,  541,  561,
+  609,  635,  659,  676,  718,  741,  761,  775,  811,  831,  847,  858,  117,
+  128,  136,  148,  160,  175,  188,  198,  284,  306,  322,  332,  390,  415,
+  433,  444,  512,  540,  560,  572,  634,  658,  675,  685,  740,  760,  774,
+  782,  830,  846,  857,  863,  135,  146,  152,  165,  241,  249,  263,  283,
+  338,  348,  365,  389,  451,  463,  483,  511,  579,  590,  608,  633,  691,
+  701,  717,  739,  787,  796,  810,  829,  867,  875,  887,  903,  153,  166,
+  174,  183,  248,  262,  282,  305,  347,  364,  388,  414,  462,  482,  510,
+  539,  589,  607,  632,  657,  700,  716,  738,  759,  795,  809,  828,  845,
+  874,  886,  902,  915,  176,  187,  195,  202,  261,  281,  304,  321,  363,
+  387,  413,  432,  481,  509,  538,  559,  606,  631,  656,  674,  715,  737,
+  758,  773,  808,  827,  844,  856,  885,  901,  914,  923,  192,  199,  206,
+  213,  280,  303,  320,  331,  386,  412,  431,  443,  508,  537,  558,  571,
+  630,  655,  673,  684,  736,  757,  772,  781,  826,  843,  855,  862,  900,
+  913,  922,  927,  240,  247,  260,  279,  337,  346,  362,  385,  450,  461,
+  480,  507,  578,  588,  605,  629,  690,  699,  714,  735,  786,  794,  807,
+  825,  866,  873,  884,  899,  930,  936,  945,  957,  246,  259,  278,  302,
+  345,  361,  384,  411,  460,  479,  506,  536,  587,  604,  628,  654,  698,
+  713,  734,  756,  793,  806,  824,  842,  872,  883,  898,  912,  935,  944,
+  956,  966,  258,  277,  301,  319,  360,  383,  410,  430,  478,  505,  535,
+  557,  603,  627,  653,  672,  712,  733,  755,  771,  805,  823,  841,  854,
+  882,  897,  911,  921,  943,  955,  965,  972,  276,  300,  318,  330,  382,
+  409,  429,  442,  504,  534,  556,  570,  626,  652,  671,  683,  732,  754,
+  770,  780,  822,  840,  853,  861,  896,  910,  920,  926,  954,  964,  971,
+  975,  336,  344,  359,  381,  449,  459,  477,  503,  577,  586,  602,  625,
+  689,  697,  711,  731,  785,  792,  804,  821,  865,  871,  881,  895,  929,
+  934,  942,  953,  977,  981,  987,  995,  343,  358,  380,  408,  458,  476,
+  502,  533,  585,  601,  624,  651,  696,  710,  730,  753,  791,  803,  820,
+  839,  870,  880,  894,  909,  933,  941,  952,  963,  980,  986,  994,  1001,
+  357,  379,  407,  428,  475,  501,  532,  555,  600,  623,  650,  670,  709,
+  729,  752,  769,  802,  819,  838,  852,  879,  893,  908,  919,  940,  951,
+  962,  970,  985,  993,  1000, 1005, 378,  406,  427,  441,  500,  531,  554,
+  569,  622,  649,  669,  682,  728,  751,  768,  779,  818,  837,  851,  860,
+  892,  907,  918,  925,  950,  961,  969,  974,  992,  999,  1004, 1007, 448,
+  457,  474,  499,  576,  584,  599,  621,  688,  695,  708,  727,  784,  790,
+  801,  817,  864,  869,  878,  891,  928,  932,  939,  949,  976,  979,  984,
+  991,  1008, 1010, 1013, 1017, 456,  473,  498,  530,  583,  598,  620,  648,
+  694,  707,  726,  750,  789,  800,  816,  836,  868,  877,  890,  906,  931,
+  938,  948,  960,  978,  983,  990,  998,  1009, 1012, 1016, 1020, 472,  497,
+  529,  553,  597,  619,  647,  668,  706,  725,  749,  767,  799,  815,  835,
+  850,  876,  889,  905,  917,  937,  947,  959,  968,  982,  989,  997,  1003,
+  1011, 1015, 1019, 1022, 496,  528,  552,  568,  618,  646,  667,  681,  724,
+  748,  766,  778,  814,  834,  849,  859,  888,  904,  916,  924,  946,  958,
+  967,  973,  988,  996,  1002, 1006, 1014, 1018, 1021, 1023,
 };
 
 const scan_order vp10_default_scan_orders[TX_SIZES] = {
-  {default_scan_4x4,   vp10_default_iscan_4x4,   default_scan_4x4_neighbors},
-  {default_scan_8x8,   vp10_default_iscan_8x8,   default_scan_8x8_neighbors},
-  {default_scan_16x16, vp10_default_iscan_16x16, default_scan_16x16_neighbors},
-  {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors},
+  { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
+  { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
+  { default_scan_16x16, vp10_default_iscan_16x16,
+    default_scan_16x16_neighbors },
+  { default_scan_32x32, vp10_default_iscan_32x32,
+    default_scan_32x32_neighbors },
 };
 
 const scan_order vp10_scan_orders[TX_SIZES][TX_TYPES] = {
-  {  // TX_4X4
-    {default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
-    {row_scan_4x4,     vp10_row_iscan_4x4,     row_scan_4x4_neighbors},
-    {col_scan_4x4,     vp10_col_iscan_4x4,     col_scan_4x4_neighbors},
-    {default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors}
-  }, {  // TX_8X8
-    {default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
-    {row_scan_8x8,     vp10_row_iscan_8x8,     row_scan_8x8_neighbors},
-    {col_scan_8x8,     vp10_col_iscan_8x8,     col_scan_8x8_neighbors},
-    {default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors}
-  }, {  // TX_16X16
-    {default_scan_16x16, vp10_default_iscan_16x16, default_scan_16x16_neighbors},
-    {row_scan_16x16,     vp10_row_iscan_16x16,     row_scan_16x16_neighbors},
-    {col_scan_16x16,     vp10_col_iscan_16x16,     col_scan_16x16_neighbors},
-    {default_scan_16x16, vp10_default_iscan_16x16, default_scan_16x16_neighbors}
-  }, {  // TX_32X32
-    {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors},
-    {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors},
-    {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors},
-    {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors},
+  { // TX_4X4
+    { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
+    { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
+    { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
+    { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors } },
+  { // TX_8X8
+    { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
+    { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
+    { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
+    { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors } },
+  { // TX_16X16
+    { default_scan_16x16, vp10_default_iscan_16x16,
+      default_scan_16x16_neighbors },
+    { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
+    { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
+    { default_scan_16x16, vp10_default_iscan_16x16,
+      default_scan_16x16_neighbors } },
+  {
+      // TX_32X32
+      { default_scan_32x32, vp10_default_iscan_32x32,
+        default_scan_32x32_neighbors },
+      { default_scan_32x32, vp10_default_iscan_32x32,
+        default_scan_32x32_neighbors },
+      { default_scan_32x32, vp10_default_iscan_32x32,
+        default_scan_32x32_neighbors },
+      { default_scan_32x32, vp10_default_iscan_32x32,
+        default_scan_32x32_neighbors },
   }
 };
diff --git a/vp10/common/scan.h b/vp10/common/scan.h
index f5a020f1e744bae35be5ffe1b4dfff7305190763..acff121a858a46785526652de7a801feddada6a4 100644
--- a/vp10/common/scan.h
+++ b/vp10/common/scan.h
@@ -35,7 +35,8 @@ extern const scan_order vp10_scan_orders[TX_SIZES][TX_TYPES];
 static INLINE int get_coef_context(const int16_t *neighbors,
                                    const uint8_t *token_cache, int c) {
   return (1 + token_cache[neighbors[MAX_NEIGHBORS * c + 0]] +
-          token_cache[neighbors[MAX_NEIGHBORS * c + 1]]) >> 1;
+          token_cache[neighbors[MAX_NEIGHBORS * c + 1]]) >>
+         1;
 }
 
 static INLINE const scan_order *get_scan(TX_SIZE tx_size, TX_TYPE tx_type) {
diff --git a/vp10/common/seg_common.c b/vp10/common/seg_common.c
index 1bf09b9a0f97542f0c5f7ca242758dab06988f47..7e5dcc2bb81bb8ecd366d553b98fe86aaa48ab27 100644
--- a/vp10/common/seg_common.c
+++ b/vp10/common/seg_common.c
@@ -17,8 +17,8 @@
 
 static const int seg_feature_data_signed[SEG_LVL_MAX] = { 1, 1, 0, 0 };
 
-static const int seg_feature_data_max[SEG_LVL_MAX] = {
-  MAXQ, MAX_LOOP_FILTER, 3, 0 };
+static const int seg_feature_data_max[SEG_LVL_MAX] = { MAXQ, MAX_LOOP_FILTER, 3,
+                                                       0 };
 
 // These functions provide access to new segment level features.
 // Eventually these function may be "optimized out" but for the moment,
@@ -31,7 +31,7 @@ void vp10_clearall_segfeatures(struct segmentation *seg) {
 }
 
 void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
-                           SEG_LVL_FEATURES feature_id) {
+                            SEG_LVL_FEATURES feature_id) {
   seg->feature_mask[segment_id] |= 1 << feature_id;
 }
 
@@ -44,7 +44,7 @@ int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
 }
 
 void vp10_set_segdata(struct segmentation *seg, int segment_id,
-                     SEG_LVL_FEATURES feature_id, int seg_data) {
+                      SEG_LVL_FEATURES feature_id, int seg_data) {
   assert(seg_data <= seg_feature_data_max[feature_id]);
   if (seg_data < 0) {
     assert(seg_feature_data_signed[feature_id]);
@@ -55,9 +55,7 @@ void vp10_set_segdata(struct segmentation *seg, int segment_id,
 }
 
 const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
-  2,  4,  6,  8, 10, 12,
-  0, -1, -2, -3, -4, -5, -6, -7
+  2, 4, 6, 8, 10, 12, 0, -1, -2, -3, -4, -5, -6, -7
 };
 
-
 // TBD? Functions to read and write segment data with range / validity checking
diff --git a/vp10/common/seg_common.h b/vp10/common/seg_common.h
index cd38e8ee0d1070c72f83c5c848a9bef3ae0e4f39..4f8b80e801430a4fa03ffd5d0a686d20165cb0a5 100644
--- a/vp10/common/seg_common.h
+++ b/vp10/common/seg_common.h
@@ -17,24 +17,23 @@
 extern "C" {
 #endif
 
-#define SEGMENT_DELTADATA   0
-#define SEGMENT_ABSDATA     1
+#define SEGMENT_DELTADATA 0
+#define SEGMENT_ABSDATA 1
 
-#define MAX_SEGMENTS     8
-#define SEG_TREE_PROBS   (MAX_SEGMENTS-1)
+#define MAX_SEGMENTS 8
+#define SEG_TREE_PROBS (MAX_SEGMENTS - 1)
 
 #define PREDICTION_PROBS 3
 
 // Segment level features.
 typedef enum {
-  SEG_LVL_ALT_Q = 0,               // Use alternate Quantizer ....
-  SEG_LVL_ALT_LF = 1,              // Use alternate loop filter value...
-  SEG_LVL_REF_FRAME = 2,           // Optional Segment reference frame
-  SEG_LVL_SKIP = 3,                // Optional Segment (0,0) + skip mode
-  SEG_LVL_MAX = 4                  // Number of features supported
+  SEG_LVL_ALT_Q = 0,      // Use alternate Quantizer ....
+  SEG_LVL_ALT_LF = 1,     // Use alternate loop filter value...
+  SEG_LVL_REF_FRAME = 2,  // Optional Segment reference frame
+  SEG_LVL_SKIP = 3,       // Optional Segment (0,0) + skip mode
+  SEG_LVL_MAX = 4         // Number of features supported
 } SEG_LVL_FEATURES;
 
-
 struct segmentation {
   uint8_t enabled;
   uint8_t update_map;
@@ -54,24 +53,20 @@ struct segmentation_probs {
 static INLINE int segfeature_active(const struct segmentation *seg,
                                     int segment_id,
                                     SEG_LVL_FEATURES feature_id) {
-  return seg->enabled &&
-         (seg->feature_mask[segment_id] & (1 << feature_id));
+  return seg->enabled && (seg->feature_mask[segment_id] & (1 << feature_id));
 }
 
 void vp10_clearall_segfeatures(struct segmentation *seg);
 
-void vp10_enable_segfeature(struct segmentation *seg,
-                           int segment_id,
-                           SEG_LVL_FEATURES feature_id);
+void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
+                            SEG_LVL_FEATURES feature_id);
 
 int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
 
 int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
 
-void vp10_set_segdata(struct segmentation *seg,
-                     int segment_id,
-                     SEG_LVL_FEATURES feature_id,
-                     int seg_data);
+void vp10_set_segdata(struct segmentation *seg, int segment_id,
+                      SEG_LVL_FEATURES feature_id, int seg_data);
 
 static INLINE int get_segdata(const struct segmentation *seg, int segment_id,
                               SEG_LVL_FEATURES feature_id) {
@@ -85,4 +80,3 @@ extern const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
 #endif
 
 #endif  // VP10_COMMON_SEG_COMMON_H_
-
diff --git a/vp10/common/thread_common.c b/vp10/common/thread_common.c
index bb4c8bb67aa9f46f74cf80dec02d2a88d3683520..3678c3bcbcd38a9d1c56b10086a856679cf74f16 100644
--- a/vp10/common/thread_common.c
+++ b/vp10/common/thread_common.c
@@ -29,8 +29,7 @@ static INLINE void mutex_lock(pthread_mutex_t *const mutex) {
     }
   }
 
-  if (!locked)
-    pthread_mutex_lock(mutex);
+  if (!locked) pthread_mutex_lock(mutex);
 }
 #endif  // CONFIG_MULTITHREAD
 
@@ -64,8 +63,7 @@ static INLINE void sync_write(VP10LfSync *const lf_sync, int r, int c,
 
   if (c < sb_cols - 1) {
     cur = c;
-    if (c % nsync)
-      sig = 0;
+    if (c % nsync) sig = 0;
   } else {
     cur = sb_cols + nsync;
   }
@@ -87,12 +85,10 @@ static INLINE void sync_write(VP10LfSync *const lf_sync, int r, int c,
 }
 
 // Implement row loopfiltering for each thread.
-static INLINE
-void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer,
-                             VP10_COMMON *const cm,
-                             struct macroblockd_plane planes[MAX_MB_PLANE],
-                             int start, int stop, int y_only,
-                             VP10LfSync *const lf_sync) {
+static INLINE void thread_loop_filter_rows(
+    const YV12_BUFFER_CONFIG *const frame_buffer, VP10_COMMON *const cm,
+    struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop,
+    int y_only, VP10LfSync *const lf_sync) {
   const int num_planes = y_only ? 1 : MAX_MB_PLANE;
   const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
   int mi_row, mi_col;
@@ -121,8 +117,7 @@ void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer,
       vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
       // TODO(JBB): Make setup_mask work for non 420.
-      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride,
-                     &lfm);
+      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
 
       vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
       for (plane = 1; plane < num_planes; ++plane) {
@@ -135,7 +130,7 @@ void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer,
             break;
           case LF_PATH_SLOW:
             vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
-                                          mi_row, mi_col);
+                                           mi_row, mi_col);
             break;
         }
       }
@@ -154,8 +149,7 @@ static int loop_filter_row_worker(VP10LfSync *const lf_sync,
   return 1;
 }
 
-static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame,
-                                VP10_COMMON *cm,
+static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
                                 struct macroblockd_plane planes[MAX_MB_PLANE],
                                 int start, int stop, int y_only,
                                 VPxWorker *workers, int nworkers,
@@ -214,13 +208,11 @@ static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame,
   }
 }
 
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame,
-                              VP10_COMMON *cm,
-                              struct macroblockd_plane planes[MAX_MB_PLANE],
-                              int frame_filter_level,
-                              int y_only, int partial_frame,
-                              VPxWorker *workers, int num_workers,
-                              VP10LfSync *lf_sync) {
+void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+                               struct macroblockd_plane planes[MAX_MB_PLANE],
+                               int frame_filter_level, int y_only,
+                               int partial_frame, VPxWorker *workers,
+                               int num_workers, VP10LfSync *lf_sync) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
 
   if (!frame_filter_level) return;
@@ -235,8 +227,8 @@ void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame,
   end_mi_row = start_mi_row + mi_rows_to_filter;
   vp10_loop_filter_frame_init(cm, frame_filter_level);
 
-  loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row,
-                      y_only, workers, num_workers, lf_sync);
+  loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only,
+                      workers, num_workers, lf_sync);
 }
 
 // Set up nsync by width.
@@ -255,7 +247,7 @@ static INLINE int get_sync_range(int width) {
 
 // Allocate memory for lf row synchronization
 void vp10_loop_filter_alloc(VP10LfSync *lf_sync, VP10_COMMON *cm, int rows,
-                           int width, int num_workers) {
+                            int width, int num_workers) {
   lf_sync->rows = rows;
 #if CONFIG_MULTITHREAD
   {
@@ -319,7 +311,7 @@ void vp10_loop_filter_dealloc(VP10LfSync *lf_sync) {
 
 // Accumulate frame counts.
 void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
-                                 int is_dec) {
+                                  int is_dec) {
   int i, j, k, l, m;
 
   for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
@@ -355,11 +347,11 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
             for (m = 0; m < COEFF_CONTEXTS; m++)
               cm->counts.eob_branch[i][j][k][l][m] +=
                   counts->eob_branch[i][j][k][l][m];
-                // In the encoder, cm->counts.coef is only updated at frame
-                // level, so not need to accumulate it here.
-                // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++)
-                //   cm->counts.coef[i][j][k][l][m][n] +=
-                //       counts->coef[i][j][k][l][m][n];
+    // In the encoder, cm->counts.coef is only updated at frame
+    // level, so not need to accumulate it here.
+    // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++)
+    //   cm->counts.coef[i][j][k][l][m][n] +=
+    //       counts->coef[i][j][k][l][m][n];
   }
 
   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
@@ -381,11 +373,10 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
   for (i = 0; i < REF_CONTEXTS; i++)
     for (j = 0; j < 2; j++)
       for (k = 0; k < 2; k++)
-      cm->counts.single_ref[i][j][k] += counts->single_ref[i][j][k];
+        cm->counts.single_ref[i][j][k] += counts->single_ref[i][j][k];
 
   for (i = 0; i < REF_CONTEXTS; i++)
-    for (j = 0; j < 2; j++)
-      cm->counts.comp_ref[i][j] += counts->comp_ref[i][j];
+    for (j = 0; j < 2; j++) cm->counts.comp_ref[i][j] += counts->comp_ref[i][j];
 
   for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
     for (j = 0; j < TX_SIZES; j++)
@@ -402,8 +393,7 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
     cm->counts.tx.tx_totals[i] += counts->tx.tx_totals[i];
 
   for (i = 0; i < SKIP_CONTEXTS; i++)
-    for (j = 0; j < 2; j++)
-      cm->counts.skip[i][j] += counts->skip[i][j];
+    for (j = 0; j < 2; j++) cm->counts.skip[i][j] += counts->skip[i][j];
 
   for (i = 0; i < MV_JOINTS; i++)
     cm->counts.mv.joints[i] += counts->mv.joints[i];
@@ -418,8 +408,7 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
       comps->hp[i] += comps_t->hp[i];
     }
 
-    for (i = 0; i < MV_CLASSES; i++)
-      comps->classes[i] += comps_t->classes[i];
+    for (i = 0; i < MV_CLASSES; i++) comps->classes[i] += comps_t->classes[i];
 
     for (i = 0; i < CLASS0_SIZE; i++) {
       comps->class0[i] += comps_t->class0[i];
@@ -428,11 +417,9 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
     }
 
     for (i = 0; i < MV_OFFSET_BITS; i++)
-      for (j = 0; j < 2; j++)
-        comps->bits[i][j] += comps_t->bits[i][j];
+      for (j = 0; j < 2; j++) comps->bits[i][j] += comps_t->bits[i][j];
 
-    for (i = 0; i < MV_FP_SIZE; i++)
-      comps->fp[i] += comps_t->fp[i];
+    for (i = 0; i < MV_FP_SIZE; i++) comps->fp[i] += comps_t->fp[i];
   }
 
   for (i = 0; i < EXT_TX_SIZES; i++) {
@@ -448,8 +435,7 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
 
 #if CONFIG_MISC_FIXES
   for (i = 0; i < PREDICTION_PROBS; i++)
-    for (j = 0; j < 2; j++)
-      cm->counts.seg.pred[i][j] += counts->seg.pred[i][j];
+    for (j = 0; j < 2; j++) cm->counts.seg.pred[i][j] += counts->seg.pred[i][j];
 
   for (i = 0; i < MAX_SEGMENTS; i++) {
     cm->counts.seg.tree_total[i] += counts->seg.tree_total[i];
diff --git a/vp10/common/thread_common.h b/vp10/common/thread_common.h
index 908065e92f9608878e95e3d0790634b439a891f6..1ceb680e1f61205cd584cef9c3d68a2e22cc4178 100644
--- a/vp10/common/thread_common.h
+++ b/vp10/common/thread_common.h
@@ -47,16 +47,14 @@ void vp10_loop_filter_alloc(VP10LfSync *lf_sync, struct VP10Common *cm,
 void vp10_loop_filter_dealloc(VP10LfSync *lf_sync);
 
 // Multi-threaded loopfilter that uses the tile threads.
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame,
-                              struct VP10Common *cm,
-                              struct macroblockd_plane planes[MAX_MB_PLANE],
-                              int frame_filter_level,
-                              int y_only, int partial_frame,
-                              VPxWorker *workers, int num_workers,
-                              VP10LfSync *lf_sync);
+void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
+                               struct macroblockd_plane planes[MAX_MB_PLANE],
+                               int frame_filter_level, int y_only,
+                               int partial_frame, VPxWorker *workers,
+                               int num_workers, VP10LfSync *lf_sync);
 
 void vp10_accumulate_frame_counts(struct VP10Common *cm,
-                                 struct FRAME_COUNTS *counts, int is_dec);
+                                  struct FRAME_COUNTS *counts, int is_dec);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/common/tile_common.c b/vp10/common/tile_common.c
index 4d92b4c6bf3a8779e4b00eaa13922252ee83505a..e38a704a1bd913b92ffca5a3e06bce99a3ae0cbc 100644
--- a/vp10/common/tile_common.c
+++ b/vp10/common/tile_common.c
@@ -38,20 +38,18 @@ void vp10_tile_init(TileInfo *tile, const VP10_COMMON *cm, int row, int col) {
 
 static int get_min_log2_tile_cols(const int sb64_cols) {
   int min_log2 = 0;
-  while ((MAX_TILE_WIDTH_B64 << min_log2) < sb64_cols)
-    ++min_log2;
+  while ((MAX_TILE_WIDTH_B64 << min_log2) < sb64_cols) ++min_log2;
   return min_log2;
 }
 
 static int get_max_log2_tile_cols(const int sb64_cols) {
   int max_log2 = 1;
-  while ((sb64_cols >> max_log2) >= MIN_TILE_WIDTH_B64)
-    ++max_log2;
+  while ((sb64_cols >> max_log2) >= MIN_TILE_WIDTH_B64) ++max_log2;
   return max_log2 - 1;
 }
 
-void vp10_get_tile_n_bits(int mi_cols,
-                         int *min_log2_tile_cols, int *max_log2_tile_cols) {
+void vp10_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
+                          int *max_log2_tile_cols) {
   const int sb64_cols = mi_cols_aligned_to_sb(mi_cols) >> MI_BLOCK_SIZE_LOG2;
   *min_log2_tile_cols = get_min_log2_tile_cols(sb64_cols);
   *max_log2_tile_cols = get_max_log2_tile_cols(sb64_cols);
diff --git a/vp10/common/tile_common.h b/vp10/common/tile_common.h
index 09cf060d8a644dac90c4bdc09889a160f727b5b5..6591e864b3d3655995ad7ae9f9b8c4d0f69c96cc 100644
--- a/vp10/common/tile_common.h
+++ b/vp10/common/tile_common.h
@@ -24,14 +24,14 @@ typedef struct TileInfo {
 
 // initializes 'tile->mi_(row|col)_(start|end)' for (row, col) based on
 // 'cm->log2_tile_(rows|cols)' & 'cm->mi_(rows|cols)'
-void vp10_tile_init(TileInfo *tile, const struct VP10Common *cm,
-                   int row, int col);
+void vp10_tile_init(TileInfo *tile, const struct VP10Common *cm, int row,
+                    int col);
 
 void vp10_tile_set_row(TileInfo *tile, const struct VP10Common *cm, int row);
 void vp10_tile_set_col(TileInfo *tile, const struct VP10Common *cm, int col);
 
-void vp10_get_tile_n_bits(int mi_cols,
-                         int *min_log2_tile_cols, int *max_log2_tile_cols);
+void vp10_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
+                          int *max_log2_tile_cols);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/common/vp10_fwd_txfm.c b/vp10/common/vp10_fwd_txfm.c
index 805f5590ec7badf308d056f0527694d79e531018..8ec231602c2e9f077ae091f19ea502b2dab91ac6 100644
--- a/vp10/common/vp10_fwd_txfm.c
+++ b/vp10/common/vp10_fwd_txfm.c
@@ -71,8 +71,7 @@ void vp10_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   {
     int i, j;
     for (i = 0; i < 4; ++i) {
-      for (j = 0; j < 4; ++j)
-        output[j + i * 4] = (output[j + i * 4] + 1) >> 2;
+      for (j = 0; j < 4; ++j) output[j + i * 4] = (output[j + i * 4] + 1) >> 2;
     }
   }
 }
@@ -81,15 +80,14 @@ void vp10_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 4; ++r)
-    for (c = 0; c < 4; ++c)
-      sum += input[r * stride + c];
+    for (c = 0; c < 4; ++c) sum += input[r * stride + c];
 
   output[0] = sum << 1;
   output[1] = 0;
 }
 
-void vp10_fdct8x8_c(const int16_t *input,
-    tran_low_t *final_output, int stride) {
+void vp10_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+                    int stride) {
   int i, j;
   tran_low_t intermediate[64];
   int pass;
@@ -134,8 +132,8 @@ void vp10_fdct8x8_c(const int16_t *input,
       x3 = s0 - s3;
       t0 = (x0 + x1) * cospi_16_64;
       t1 = (x0 - x1) * cospi_16_64;
-      t2 =  x2 * cospi_24_64 + x3 *  cospi_8_64;
-      t3 = -x2 * cospi_8_64  + x3 * cospi_24_64;
+      t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+      t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
       output[0] = (tran_low_t)fdct_round_shift(t0);
       output[2] = (tran_low_t)fdct_round_shift(t2);
       output[4] = (tran_low_t)fdct_round_shift(t1);
@@ -154,24 +152,23 @@ void vp10_fdct8x8_c(const int16_t *input,
       x3 = s7 + t3;
 
       // Stage 4
-      t0 = x0 * cospi_28_64 + x3 *   cospi_4_64;
-      t1 = x1 * cospi_12_64 + x2 *  cospi_20_64;
+      t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+      t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
       t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
-      t3 = x3 * cospi_28_64 + x0 *  -cospi_4_64;
+      t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
       output[1] = (tran_low_t)fdct_round_shift(t0);
       output[3] = (tran_low_t)fdct_round_shift(t2);
       output[5] = (tran_low_t)fdct_round_shift(t1);
       output[7] = (tran_low_t)fdct_round_shift(t3);
       output += 8;
     }
-    in  = intermediate;
+    in = intermediate;
     output = final_output;
   }
 
   // Rows
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      final_output[j + i * 8] /= 2;
+    for (j = 0; j < 8; ++j) final_output[j + i * 8] /= 2;
   }
 }
 
@@ -179,8 +176,7 @@ void vp10_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 8; ++r)
-    for (c = 0; c < 8; ++c)
-      sum += input[r * stride + c];
+    for (c = 0; c < 8; ++c) sum += input[r * stride + c];
 
   output[0] = sum;
   output[1] = 0;
@@ -216,11 +212,11 @@ void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         input[3] = (in_pass0[3 * stride] + in_pass0[12 * stride]) * 4;
         input[4] = (in_pass0[4 * stride] + in_pass0[11 * stride]) * 4;
         input[5] = (in_pass0[5 * stride] + in_pass0[10 * stride]) * 4;
-        input[6] = (in_pass0[6 * stride] + in_pass0[ 9 * stride]) * 4;
-        input[7] = (in_pass0[7 * stride] + in_pass0[ 8 * stride]) * 4;
+        input[6] = (in_pass0[6 * stride] + in_pass0[9 * stride]) * 4;
+        input[7] = (in_pass0[7 * stride] + in_pass0[8 * stride]) * 4;
         // Calculate input for the next 8 results.
-        step1[0] = (in_pass0[7 * stride] - in_pass0[ 8 * stride]) * 4;
-        step1[1] = (in_pass0[6 * stride] - in_pass0[ 9 * stride]) * 4;
+        step1[0] = (in_pass0[7 * stride] - in_pass0[8 * stride]) * 4;
+        step1[1] = (in_pass0[6 * stride] - in_pass0[9 * stride]) * 4;
         step1[2] = (in_pass0[5 * stride] - in_pass0[10 * stride]) * 4;
         step1[3] = (in_pass0[4 * stride] - in_pass0[11 * stride]) * 4;
         step1[4] = (in_pass0[3 * stride] - in_pass0[12 * stride]) * 4;
@@ -235,11 +231,11 @@ void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         input[3] = ((in[3 * 16] + 1) >> 2) + ((in[12 * 16] + 1) >> 2);
         input[4] = ((in[4 * 16] + 1) >> 2) + ((in[11 * 16] + 1) >> 2);
         input[5] = ((in[5 * 16] + 1) >> 2) + ((in[10 * 16] + 1) >> 2);
-        input[6] = ((in[6 * 16] + 1) >> 2) + ((in[ 9 * 16] + 1) >> 2);
-        input[7] = ((in[7 * 16] + 1) >> 2) + ((in[ 8 * 16] + 1) >> 2);
+        input[6] = ((in[6 * 16] + 1) >> 2) + ((in[9 * 16] + 1) >> 2);
+        input[7] = ((in[7 * 16] + 1) >> 2) + ((in[8 * 16] + 1) >> 2);
         // Calculate input for the next 8 results.
-        step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[ 8 * 16] + 1) >> 2);
-        step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[ 9 * 16] + 1) >> 2);
+        step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[8 * 16] + 1) >> 2);
+        step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[9 * 16] + 1) >> 2);
         step1[2] = ((in[5 * 16] + 1) >> 2) - ((in[10 * 16] + 1) >> 2);
         step1[3] = ((in[4 * 16] + 1) >> 2) - ((in[11 * 16] + 1) >> 2);
         step1[4] = ((in[3 * 16] + 1) >> 2) - ((in[12 * 16] + 1) >> 2);
@@ -270,7 +266,7 @@ void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         x3 = s0 - s3;
         t0 = (x0 + x1) * cospi_16_64;
         t1 = (x0 - x1) * cospi_16_64;
-        t2 = x3 * cospi_8_64  + x2 * cospi_24_64;
+        t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
         t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
         out[0] = (tran_low_t)fdct_round_shift(t0);
         out[4] = (tran_low_t)fdct_round_shift(t2);
@@ -290,10 +286,10 @@ void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         x3 = s7 + t3;
 
         // Stage 4
-        t0 = x0 * cospi_28_64 + x3 *   cospi_4_64;
-        t1 = x1 * cospi_12_64 + x2 *  cospi_20_64;
+        t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+        t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
         t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
-        t3 = x3 * cospi_28_64 + x0 *  -cospi_4_64;
+        t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
         out[2] = (tran_low_t)fdct_round_shift(t0);
         out[6] = (tran_low_t)fdct_round_shift(t2);
         out[10] = (tran_low_t)fdct_round_shift(t1);
@@ -320,12 +316,12 @@ void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         step3[6] = step1[6] + step2[5];
         step3[7] = step1[7] + step2[4];
         // step 4
-        temp1 = step3[1] *  -cospi_8_64 + step3[6] * cospi_24_64;
-        temp2 = step3[2] * cospi_24_64 + step3[5] *  cospi_8_64;
+        temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
+        temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64;
         step2[1] = fdct_round_shift(temp1);
         step2[2] = fdct_round_shift(temp2);
         temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64;
-        temp2 = step3[1] * cospi_24_64 + step3[6] *  cospi_8_64;
+        temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
         step2[5] = fdct_round_shift(temp1);
         step2[6] = fdct_round_shift(temp2);
         // step 5
@@ -338,20 +334,20 @@ void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         step1[6] = step3[7] - step2[6];
         step1[7] = step3[7] + step2[6];
         // step 6
-        temp1 = step1[0] * cospi_30_64 + step1[7] *  cospi_2_64;
+        temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
         temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
         out[1] = (tran_low_t)fdct_round_shift(temp1);
         out[9] = (tran_low_t)fdct_round_shift(temp2);
         temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
-        temp2 = step1[3] *  cospi_6_64 + step1[4] * cospi_26_64;
+        temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
         out[5] = (tran_low_t)fdct_round_shift(temp1);
         out[13] = (tran_low_t)fdct_round_shift(temp2);
-        temp1 = step1[3] * -cospi_26_64 + step1[4] *  cospi_6_64;
+        temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
         temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
         out[3] = (tran_low_t)fdct_round_shift(temp1);
         out[11] = (tran_low_t)fdct_round_shift(temp2);
         temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
-        temp2 = step1[0] *  -cospi_2_64 + step1[7] * cospi_30_64;
+        temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
         out[7] = (tran_low_t)fdct_round_shift(temp1);
         out[15] = (tran_low_t)fdct_round_shift(temp2);
       }
@@ -370,8 +366,7 @@ void vp10_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 16; ++r)
-    for (c = 0; c < 16; ++c)
-      sum += input[r * stride + c];
+    for (c = 0; c < 16; ++c) sum += input[r * stride + c];
 
   output[0] = sum >> 1;
   output[1] = 0;
@@ -678,36 +673,36 @@ void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
   step[31] = output[31] + output[30];
 
   // Final stage --- outputs indices are bit-reversed.
-  output[0]  = step[0];
+  output[0] = step[0];
   output[16] = step[1];
-  output[8]  = step[2];
+  output[8] = step[2];
   output[24] = step[3];
-  output[4]  = step[4];
+  output[4] = step[4];
   output[20] = step[5];
   output[12] = step[6];
   output[28] = step[7];
-  output[2]  = step[8];
+  output[2] = step[8];
   output[18] = step[9];
   output[10] = step[10];
   output[26] = step[11];
-  output[6]  = step[12];
+  output[6] = step[12];
   output[22] = step[13];
   output[14] = step[14];
   output[30] = step[15];
 
-  output[1]  = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64);
+  output[1] = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64);
   output[17] = dct_32_round(step[17] * cospi_15_64 + step[30] * cospi_17_64);
-  output[9]  = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64);
+  output[9] = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64);
   output[25] = dct_32_round(step[19] * cospi_7_64 + step[28] * cospi_25_64);
-  output[5]  = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64);
+  output[5] = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64);
   output[21] = dct_32_round(step[21] * cospi_11_64 + step[26] * cospi_21_64);
   output[13] = dct_32_round(step[22] * cospi_19_64 + step[25] * cospi_13_64);
   output[29] = dct_32_round(step[23] * cospi_3_64 + step[24] * cospi_29_64);
-  output[3]  = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64);
+  output[3] = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64);
   output[19] = dct_32_round(step[25] * cospi_19_64 + step[22] * -cospi_13_64);
   output[11] = dct_32_round(step[26] * cospi_11_64 + step[21] * -cospi_21_64);
   output[27] = dct_32_round(step[27] * cospi_27_64 + step[20] * -cospi_5_64);
-  output[7]  = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64);
+  output[7] = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64);
   output[23] = dct_32_round(step[29] * cospi_23_64 + step[18] * -cospi_9_64);
   output[15] = dct_32_round(step[30] * cospi_15_64 + step[17] * -cospi_17_64);
   output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
@@ -720,8 +715,7 @@ void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   // Columns
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = input[j * stride + i] * 4;
+    for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
     vp10_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
@@ -730,8 +724,7 @@ void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   // Rows
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = output[j + i * 32];
+    for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
     vp10_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
@@ -749,8 +742,7 @@ void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   // Columns
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = input[j * stride + i] * 4;
+    for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
     vp10_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       // TODO(cd): see quality impact of only doing
@@ -762,11 +754,9 @@ void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   // Rows
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = output[j + i * 32];
+    for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
     vp10_fdct32(temp_in, temp_out, 1);
-    for (j = 0; j < 32; ++j)
-      out[j + i * 32] = (tran_low_t)temp_out[j];
+    for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
 
@@ -774,8 +764,7 @@ void vp10_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 32; ++r)
-    for (c = 0; c < 32; ++c)
-      sum += input[r * stride + c];
+    for (c = 0; c < 32; ++c) sum += input[r * stride + c];
 
   output[0] = sum >> 3;
   output[1] = 0;
@@ -783,42 +772,42 @@ void vp10_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
 
 #if CONFIG_VPX_HIGHBITDEPTH
 void vp10_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
-                          int stride) {
+                           int stride) {
   vp10_fdct4x4_c(input, output, stride);
 }
 
 void vp10_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
-                          int stride) {
+                           int stride) {
   vp10_fdct8x8_c(input, final_output, stride);
 }
 
 void vp10_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
-                            int stride) {
+                             int stride) {
   vp10_fdct8x8_1_c(input, final_output, stride);
 }
 
 void vp10_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
-                            int stride) {
+                             int stride) {
   vp10_fdct16x16_c(input, output, stride);
 }
 
 void vp10_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
-                              int stride) {
+                               int stride) {
   vp10_fdct16x16_1_c(input, output, stride);
 }
 
-void vp10_highbd_fdct32x32_c(const int16_t *input,
-    tran_low_t *out, int stride) {
+void vp10_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out,
+                             int stride) {
   vp10_fdct32x32_c(input, out, stride);
 }
 
 void vp10_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
-                               int stride) {
+                                int stride) {
   vp10_fdct32x32_rd_c(input, out, stride);
 }
 
-void vp10_highbd_fdct32x32_1_c(const int16_t *input,
-    tran_low_t *out, int stride) {
+void vp10_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
+                               int stride) {
   vp10_fdct32x32_1_c(input, out, stride);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vp10/common/vp10_inv_txfm.c b/vp10/common/vp10_inv_txfm.c
index cdfa3c1029dd938655e877c6229bcc84b62938fc..cd5eea3d8ee994f231a29d67b1c6e33a21f43d2d 100644
--- a/vp10/common/vp10_inv_txfm.c
+++ b/vp10/common/vp10_inv_txfm.c
@@ -15,8 +15,8 @@
 #include "vp10/common/vp10_inv_txfm.h"
 
 void vp10_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
-/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
-   0.5 shifts per pixel. */
+  /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
+     0.5 shifts per pixel. */
   int i;
   tran_low_t output[16];
   tran_high_t a1, b1, c1, d1, e1;
@@ -66,8 +66,7 @@ void vp10_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   }
 }
 
-void vp10_iwht4x4_1_add_c(const tran_low_t *in,
-                          uint8_t *dest,
+void vp10_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest,
                           int dest_stride) {
   int i;
   tran_high_t a1, e1;
@@ -129,8 +128,7 @@ void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
 
   // Columns
   for (i = 0; i < 4; ++i) {
-    for (j = 0; j < 4; ++j)
-      temp_in[j] = out[j * 4 + i];
+    for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
     vp10_idct4_c(temp_in, temp_out);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -140,7 +138,7 @@ void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
 }
 
 void vp10_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
-                         int dest_stride) {
+                          int dest_stride) {
   int i;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8);
@@ -225,8 +223,7 @@ void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
 
   // Then transform columns
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     vp10_idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -242,8 +239,7 @@ void vp10_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8);
   a1 = ROUND_POWER_OF_TWO(out, 5);
   for (j = 0; j < 8; ++j) {
-    for (i = 0; i < 8; ++i)
-      dest[i] = clip_pixel_add(dest[i], a1);
+    for (i = 0; i < 8; ++i) dest[i] = clip_pixel_add(dest[i], a1);
     dest += stride;
   }
 }
@@ -298,20 +294,20 @@ void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t x7 = input[6];
 
   if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
-    output[0] = output[1] = output[2] = output[3] = output[4]
-              = output[5] = output[6] = output[7] = 0;
+    output[0] = output[1] = output[2] = output[3] = output[4] = output[5] =
+        output[6] = output[7] = 0;
     return;
   }
 
   // stage 1
-  s0 = (int)(cospi_2_64  * x0 + cospi_30_64 * x1);
-  s1 = (int)(cospi_30_64 * x0 - cospi_2_64  * x1);
+  s0 = (int)(cospi_2_64 * x0 + cospi_30_64 * x1);
+  s1 = (int)(cospi_30_64 * x0 - cospi_2_64 * x1);
   s2 = (int)(cospi_10_64 * x2 + cospi_22_64 * x3);
   s3 = (int)(cospi_22_64 * x2 - cospi_10_64 * x3);
   s4 = (int)(cospi_18_64 * x4 + cospi_14_64 * x5);
   s5 = (int)(cospi_14_64 * x4 - cospi_18_64 * x5);
-  s6 = (int)(cospi_26_64 * x6 + cospi_6_64  * x7);
-  s7 = (int)(cospi_6_64  * x6 - cospi_26_64 * x7);
+  s6 = (int)(cospi_26_64 * x6 + cospi_6_64 * x7);
+  s7 = (int)(cospi_6_64 * x6 - cospi_26_64 * x7);
 
   x0 = WRAPLOW(dct_const_round_shift(s0 + s4), 8);
   x1 = WRAPLOW(dct_const_round_shift(s1 + s5), 8);
@@ -378,8 +374,7 @@ void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
 
   // Then transform columns
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     vp10_idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -393,22 +388,22 @@ void vp10_idct16_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t temp1, temp2;
 
   // stage 1
-  step1[0] = input[0/2];
-  step1[1] = input[16/2];
-  step1[2] = input[8/2];
-  step1[3] = input[24/2];
-  step1[4] = input[4/2];
-  step1[5] = input[20/2];
-  step1[6] = input[12/2];
-  step1[7] = input[28/2];
-  step1[8] = input[2/2];
-  step1[9] = input[18/2];
-  step1[10] = input[10/2];
-  step1[11] = input[26/2];
-  step1[12] = input[6/2];
-  step1[13] = input[22/2];
-  step1[14] = input[14/2];
-  step1[15] = input[30/2];
+  step1[0] = input[0 / 2];
+  step1[1] = input[16 / 2];
+  step1[2] = input[8 / 2];
+  step1[3] = input[24 / 2];
+  step1[4] = input[4 / 2];
+  step1[5] = input[20 / 2];
+  step1[6] = input[12 / 2];
+  step1[7] = input[28 / 2];
+  step1[8] = input[2 / 2];
+  step1[9] = input[18 / 2];
+  step1[10] = input[10 / 2];
+  step1[11] = input[26 / 2];
+  step1[12] = input[6 / 2];
+  step1[13] = input[22 / 2];
+  step1[14] = input[14 / 2];
+  step1[15] = input[30 / 2];
 
   // stage 2
   step2[0] = step1[0];
@@ -554,7 +549,7 @@ void vp10_idct16_c(const tran_low_t *input, tran_low_t *output) {
 }
 
 void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
-                             int stride) {
+                              int stride) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
   int i, j;
@@ -569,8 +564,7 @@ void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
 
   // Then transform columns
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j * 16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     vp10_idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -600,21 +594,20 @@ void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t x14 = input[1];
   tran_high_t x15 = input[14];
 
-  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
-           | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
-    output[0] = output[1] = output[2] = output[3] = output[4]
-              = output[5] = output[6] = output[7] = output[8]
-              = output[9] = output[10] = output[11] = output[12]
-              = output[13] = output[14] = output[15] = 0;
+  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
+        x13 | x14 | x15)) {
+    output[0] = output[1] = output[2] = output[3] = output[4] = output[5] =
+        output[6] = output[7] = output[8] = output[9] = output[10] =
+            output[11] = output[12] = output[13] = output[14] = output[15] = 0;
     return;
   }
 
   // stage 1
-  s0 = x0 * cospi_1_64  + x1 * cospi_31_64;
+  s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
   s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
-  s2 = x2 * cospi_5_64  + x3 * cospi_27_64;
+  s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
   s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
-  s4 = x4 * cospi_9_64  + x5 * cospi_23_64;
+  s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
   s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
   s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
   s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -623,9 +616,9 @@ void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
   s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
   s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
   s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
-  s13 = x12 * cospi_7_64  - x13 * cospi_25_64;
+  s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
   s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
-  s15 = x14 * cospi_3_64  - x15 * cospi_29_64;
+  s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
 
   x0 = WRAPLOW(dct_const_round_shift(s0 + s8), 8);
   x1 = WRAPLOW(dct_const_round_shift(s1 + s9), 8);
@@ -653,14 +646,14 @@ void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
   s5 = x5;
   s6 = x6;
   s7 = x7;
-  s8 =    x8 * cospi_4_64   + x9 * cospi_28_64;
-  s9 =    x8 * cospi_28_64  - x9 * cospi_4_64;
-  s10 =   x10 * cospi_20_64 + x11 * cospi_12_64;
-  s11 =   x10 * cospi_12_64 - x11 * cospi_20_64;
-  s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
-  s13 =   x12 * cospi_4_64  + x13 * cospi_28_64;
-  s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
-  s15 =   x14 * cospi_20_64 + x15 * cospi_12_64;
+  s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+  s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+  s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+  s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+  s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+  s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+  s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+  s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
 
   x0 = WRAPLOW(s0 + s4, 8);
   x1 = WRAPLOW(s1 + s5, 8);
@@ -684,18 +677,18 @@ void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 = x4 * cospi_8_64  + x5 * cospi_24_64;
+  s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
   s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
-  s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
-  s7 =   x6 * cospi_8_64  + x7 * cospi_24_64;
+  s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+  s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
   s8 = x8;
   s9 = x9;
   s10 = x10;
   s11 = x11;
-  s12 = x12 * cospi_8_64  + x13 * cospi_24_64;
+  s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
   s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
-  s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
-  s15 =   x14 * cospi_8_64  + x15 * cospi_24_64;
+  s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+  s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
 
   x0 = WRAPLOW(check_range(s0 + s2), 8);
   x1 = WRAPLOW(check_range(s1 + s3), 8);
@@ -715,13 +708,13 @@ void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
   x15 = WRAPLOW(dct_const_round_shift(s13 - s15), 8);
 
   // stage 4
-  s2 = (- cospi_16_64) * (x2 + x3);
+  s2 = (-cospi_16_64) * (x2 + x3);
   s3 = cospi_16_64 * (x2 - x3);
   s6 = cospi_16_64 * (x6 + x7);
-  s7 = cospi_16_64 * (- x6 + x7);
+  s7 = cospi_16_64 * (-x6 + x7);
   s10 = cospi_16_64 * (x10 + x11);
-  s11 = cospi_16_64 * (- x10 + x11);
-  s14 = (- cospi_16_64) * (x14 + x15);
+  s11 = cospi_16_64 * (-x10 + x11);
+  s14 = (-cospi_16_64) * (x14 + x15);
   s15 = cospi_16_64 * (x14 - x15);
 
   x2 = WRAPLOW(dct_const_round_shift(s2), 8);
@@ -752,7 +745,7 @@ void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
 }
 
 void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
-                            int stride) {
+                             int stride) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -768,19 +761,16 @@ void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
 
   // Then transform columns
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j*16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     vp10_idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
-      dest[j * stride + i] = clip_pixel_add(
-          dest[j * stride + i],
-          ROUND_POWER_OF_TWO(temp_out[j], 6));
+      dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
+                                            ROUND_POWER_OF_TWO(temp_out[j], 6));
     }
   }
 }
 
-void vp10_idct16x16_1_add_c(const tran_low_t *input,
-                            uint8_t *dest,
+void vp10_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest,
                             int stride) {
   int i, j;
   tran_high_t a1;
@@ -788,8 +778,7 @@ void vp10_idct16x16_1_add_c(const tran_low_t *input,
   out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8);
   a1 = ROUND_POWER_OF_TWO(out, 6);
   for (j = 0; j < 16; ++j) {
-    for (i = 0; i < 16; ++i)
-      dest[i] = clip_pixel_add(dest[i], a1);
+    for (i = 0; i < 16; ++i) dest[i] = clip_pixel_add(dest[i], a1);
     dest += stride;
   }
 }
@@ -1162,7 +1151,7 @@ void vp10_idct32_c(const tran_low_t *input, tran_low_t *output) {
 }
 
 void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
-                              int stride) {
+                               int stride) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
   int i, j;
@@ -1171,8 +1160,7 @@ void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
   // Rows
   for (i = 0; i < 32; ++i) {
     int16_t zero_coeff[16];
-    for (j = 0; j < 16; ++j)
-      zero_coeff[j] = input[2 * j] | input[2 * j + 1];
+    for (j = 0; j < 16; ++j) zero_coeff[j] = input[2 * j] | input[2 * j + 1];
     for (j = 0; j < 8; ++j)
       zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
     for (j = 0; j < 4; ++j)
@@ -1190,8 +1178,7 @@ void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
 
   // Columns
   for (i = 0; i < 32; ++i) {
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = out[j * 32 + i];
+    for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
     vp10_idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -1201,8 +1188,8 @@ void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
 }
 
 void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
-                            int stride) {
-  tran_low_t out[32 * 32] = {0};
+                             int stride) {
+  tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
   tran_low_t temp_in[32], temp_out[32];
@@ -1217,19 +1204,16 @@ void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
 
   // Columns
   for (i = 0; i < 32; ++i) {
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = out[j * 32 + i];
+    for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
     vp10_idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
-      dest[j * stride + i] = clip_pixel_add(
-          dest[j * stride + i],
-          ROUND_POWER_OF_TWO(temp_out[j], 6));
+      dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
+                                            ROUND_POWER_OF_TWO(temp_out[j], 6));
     }
   }
 }
 
-void vp10_idct32x32_1_add_c(const tran_low_t *input,
-                            uint8_t *dest,
+void vp10_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest,
                             int stride) {
   int i, j;
   tran_high_t a1;
@@ -1239,15 +1223,14 @@ void vp10_idct32x32_1_add_c(const tran_low_t *input,
   a1 = ROUND_POWER_OF_TWO(out, 6);
 
   for (j = 0; j < 32; ++j) {
-    for (i = 0; i < 32; ++i)
-      dest[i] = clip_pixel_add(dest[i], a1);
+    for (i = 0; i < 32; ++i) dest[i] = clip_pixel_add(dest[i], a1);
     dest += stride;
   }
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
 void vp10_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int bd) {
+                                  int stride, int bd) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
      0.5 shifts per pixel. */
   int i;
@@ -1301,14 +1284,14 @@ void vp10_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
-                                int dest_stride, int bd) {
+                                 int dest_stride, int bd) {
   int i;
   tran_high_t a1, e1;
   tran_low_t tmp[4];
   const tran_low_t *ip = in;
   tran_low_t *op = tmp;
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
-  (void) bd;
+  (void)bd;
 
   a1 = ip[0] >> UNIT_QUANT_SHIFT;
   e1 = a1 >> 1;
@@ -1320,14 +1303,14 @@ void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
   for (i = 0; i < 4; i++) {
     e1 = ip[0] >> 1;
     a1 = ip[0] - e1;
-    dest[dest_stride * 0] = highbd_clip_pixel_add(
-        dest[dest_stride * 0], a1, bd);
-    dest[dest_stride * 1] = highbd_clip_pixel_add(
-        dest[dest_stride * 1], e1, bd);
-    dest[dest_stride * 2] = highbd_clip_pixel_add(
-        dest[dest_stride * 2], e1, bd);
-    dest[dest_stride * 3] = highbd_clip_pixel_add(
-        dest[dest_stride * 3], e1, bd);
+    dest[dest_stride * 0] =
+        highbd_clip_pixel_add(dest[dest_stride * 0], a1, bd);
+    dest[dest_stride * 1] =
+        highbd_clip_pixel_add(dest[dest_stride * 1], e1, bd);
+    dest[dest_stride * 2] =
+        highbd_clip_pixel_add(dest[dest_stride * 2], e1, bd);
+    dest[dest_stride * 3] =
+        highbd_clip_pixel_add(dest[dest_stride * 3], e1, bd);
     ip++;
     dest++;
   }
@@ -1336,7 +1319,7 @@ void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
 void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
-  (void) bd;
+  (void)bd;
   // stage 1
   temp1 = (input[0] + input[2]) * cospi_16_64;
   temp2 = (input[0] - input[2]) * cospi_16_64;
@@ -1355,7 +1338,7 @@ void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
 }
 
 void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int bd) {
+                                  int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -1371,8 +1354,7 @@ void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Columns
   for (i = 0; i < 4; ++i) {
-    for (j = 0; j < 4; ++j)
-      temp_in[j] = out[j * 4 + i];
+    for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
     vp10_highbd_idct4_c(temp_in, temp_out, bd);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1382,11 +1364,11 @@ void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                int dest_stride, int bd) {
+                                 int dest_stride, int bd) {
   int i;
   tran_high_t a1;
-  tran_low_t out = WRAPLOW(
-      highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
+  tran_low_t out =
+      WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
 
   out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
@@ -1447,7 +1429,7 @@ void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
 }
 
 void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int bd) {
+                                  int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j;
@@ -1463,8 +1445,7 @@ void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     vp10_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1474,17 +1455,16 @@ void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                int stride, int bd) {
+                                 int stride, int bd) {
   int i, j;
   tran_high_t a1;
-  tran_low_t out = WRAPLOW(
-      highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
+  tran_low_t out =
+      WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
   out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
   a1 = ROUND_POWER_OF_TWO(out, 5);
   for (j = 0; j < 8; ++j) {
-    for (i = 0; i < 8; ++i)
-      dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
+    for (i = 0; i < 8; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
     dest += stride;
   }
 }
@@ -1496,7 +1476,7 @@ void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t x1 = input[1];
   tran_low_t x2 = input[2];
   tran_low_t x3 = input[3];
-  (void) bd;
+  (void)bd;
 
   if (!(x0 | x1 | x2 | x3)) {
     memset(output, 0, 4 * sizeof(*output));
@@ -1538,7 +1518,7 @@ void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t x5 = input[4];
   tran_low_t x6 = input[1];
   tran_low_t x7 = input[6];
-  (void) bd;
+  (void)bd;
 
   if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
     memset(output, 0, 8 * sizeof(*output));
@@ -1546,14 +1526,14 @@ void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   }
 
   // stage 1
-  s0 = cospi_2_64  * x0 + cospi_30_64 * x1;
-  s1 = cospi_30_64 * x0 - cospi_2_64  * x1;
+  s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+  s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
   s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
   s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
   s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
   s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
-  s6 = cospi_26_64 * x6 + cospi_6_64  * x7;
-  s7 = cospi_6_64  * x6 - cospi_26_64 * x7;
+  s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+  s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
 
   x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s4, bd), bd);
   x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s5, bd), bd);
@@ -1569,10 +1549,10 @@ void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 =  cospi_8_64  * x4 + cospi_24_64 * x5;
-  s5 =  cospi_24_64 * x4 - cospi_8_64  * x5;
-  s6 = -cospi_24_64 * x6 + cospi_8_64  * x7;
-  s7 =  cospi_8_64  * x6 + cospi_24_64 * x7;
+  s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+  s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+  s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+  s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
 
   x0 = WRAPLOW(s0 + s2, bd);
   x1 = WRAPLOW(s1 + s3, bd);
@@ -1605,7 +1585,7 @@ void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
 }
 
 void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
-                                 int stride, int bd) {
+                                  int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -1621,8 +1601,7 @@ void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
   }
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     vp10_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1634,25 +1613,25 @@ void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
 void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
-  (void) bd;
+  (void)bd;
 
   // stage 1
-  step1[0] = input[0/2];
-  step1[1] = input[16/2];
-  step1[2] = input[8/2];
-  step1[3] = input[24/2];
-  step1[4] = input[4/2];
-  step1[5] = input[20/2];
-  step1[6] = input[12/2];
-  step1[7] = input[28/2];
-  step1[8] = input[2/2];
-  step1[9] = input[18/2];
-  step1[10] = input[10/2];
-  step1[11] = input[26/2];
-  step1[12] = input[6/2];
-  step1[13] = input[22/2];
-  step1[14] = input[14/2];
-  step1[15] = input[30/2];
+  step1[0] = input[0 / 2];
+  step1[1] = input[16 / 2];
+  step1[2] = input[8 / 2];
+  step1[3] = input[24 / 2];
+  step1[4] = input[4 / 2];
+  step1[5] = input[20 / 2];
+  step1[6] = input[12 / 2];
+  step1[7] = input[28 / 2];
+  step1[8] = input[2 / 2];
+  step1[9] = input[18 / 2];
+  step1[10] = input[10 / 2];
+  step1[11] = input[26 / 2];
+  step1[12] = input[6 / 2];
+  step1[13] = input[22 / 2];
+  step1[14] = input[14 / 2];
+  step1[15] = input[30 / 2];
 
   // stage 2
   step2[0] = step1[0];
@@ -1798,7 +1777,7 @@ void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
 }
 
 void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int bd) {
+                                     int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
   int i, j;
@@ -1814,20 +1793,16 @@ void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j * 16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     vp10_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
-          dest[j * stride + i],
-          ROUND_POWER_OF_TWO(temp_out[j], 6),
-          bd);
+          dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
     }
   }
 }
 
-void vp10_highbd_iadst16_c(const tran_low_t *input,
-                           tran_low_t *output,
+void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output,
                            int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
   tran_high_t s9, s10, s11, s12, s13, s14, s15;
@@ -1848,20 +1823,20 @@ void vp10_highbd_iadst16_c(const tran_low_t *input,
   tran_low_t x13 = input[12];
   tran_low_t x14 = input[1];
   tran_low_t x15 = input[14];
-  (void) bd;
+  (void)bd;
 
-  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
-           | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
+  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
+        x13 | x14 | x15)) {
     memset(output, 0, 16 * sizeof(*output));
     return;
   }
 
   // stage 1
-  s0 = x0 * cospi_1_64  + x1 * cospi_31_64;
+  s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
   s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
-  s2 = x2 * cospi_5_64  + x3 * cospi_27_64;
+  s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
   s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
-  s4 = x4 * cospi_9_64  + x5 * cospi_23_64;
+  s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
   s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
   s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
   s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -1870,9 +1845,9 @@ void vp10_highbd_iadst16_c(const tran_low_t *input,
   s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
   s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
   s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
-  s13 = x12 * cospi_7_64  - x13 * cospi_25_64;
+  s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
   s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
-  s15 = x14 * cospi_3_64  - x15 * cospi_29_64;
+  s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
 
   x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s8, bd), bd);
   x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s9, bd), bd);
@@ -1882,8 +1857,8 @@ void vp10_highbd_iadst16_c(const tran_low_t *input,
   x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s13, bd), bd);
   x6 = WRAPLOW(highbd_dct_const_round_shift(s6 + s14, bd), bd);
   x7 = WRAPLOW(highbd_dct_const_round_shift(s7 + s15, bd), bd);
-  x8  = WRAPLOW(highbd_dct_const_round_shift(s0 - s8, bd), bd);
-  x9  = WRAPLOW(highbd_dct_const_round_shift(s1 - s9, bd), bd);
+  x8 = WRAPLOW(highbd_dct_const_round_shift(s0 - s8, bd), bd);
+  x9 = WRAPLOW(highbd_dct_const_round_shift(s1 - s9, bd), bd);
   x10 = WRAPLOW(highbd_dct_const_round_shift(s2 - s10, bd), bd);
   x11 = WRAPLOW(highbd_dct_const_round_shift(s3 - s11, bd), bd);
   x12 = WRAPLOW(highbd_dct_const_round_shift(s4 - s12, bd), bd);
@@ -1962,13 +1937,13 @@ void vp10_highbd_iadst16_c(const tran_low_t *input,
   x15 = WRAPLOW(highbd_dct_const_round_shift(s13 - s15, bd), bd);
 
   // stage 4
-  s2 = (- cospi_16_64) * (x2 + x3);
+  s2 = (-cospi_16_64) * (x2 + x3);
   s3 = cospi_16_64 * (x2 - x3);
   s6 = cospi_16_64 * (x6 + x7);
   s7 = cospi_16_64 * (-x6 + x7);
   s10 = cospi_16_64 * (x10 + x11);
   s11 = cospi_16_64 * (-x10 + x11);
-  s14 = (- cospi_16_64) * (x14 + x15);
+  s14 = (-cospi_16_64) * (x14 + x15);
   s15 = cospi_16_64 * (x14 - x15);
 
   x2 = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd);
@@ -1999,7 +1974,7 @@ void vp10_highbd_iadst16_c(const tran_low_t *input,
 }
 
 void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int bd) {
+                                    int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -2016,8 +1991,7 @@ void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j*16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     vp10_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -2027,27 +2001,26 @@ void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+                                   int stride, int bd) {
   int i, j;
   tran_high_t a1;
-  tran_low_t out = WRAPLOW(
-      highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
+  tran_low_t out =
+      WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
 
   out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
   a1 = ROUND_POWER_OF_TWO(out, 6);
   for (j = 0; j < 16; ++j) {
-    for (i = 0; i < 16; ++i)
-      dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
+    for (i = 0; i < 16; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
     dest += stride;
   }
 }
 
-static void highbd_idct32_c(const tran_low_t *input,
-                            tran_low_t *output, int bd) {
+static void highbd_idct32_c(const tran_low_t *input, tran_low_t *output,
+                            int bd) {
   tran_low_t step1[32], step2[32];
   tran_high_t temp1, temp2;
-  (void) bd;
+  (void)bd;
 
   // stage 1
   step1[0] = input[0];
@@ -2413,7 +2386,7 @@ static void highbd_idct32_c(const tran_low_t *input,
 }
 
 void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
-                                     int stride, int bd) {
+                                      int stride, int bd) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
   int i, j;
@@ -2423,8 +2396,7 @@ void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
   // Rows
   for (i = 0; i < 32; ++i) {
     tran_low_t zero_coeff[16];
-    for (j = 0; j < 16; ++j)
-      zero_coeff[j] = input[2 * j] | input[2 * j + 1];
+    for (j = 0; j < 16; ++j) zero_coeff[j] = input[2 * j] | input[2 * j + 1];
     for (j = 0; j < 8; ++j)
       zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
     for (j = 0; j < 4; ++j)
@@ -2442,8 +2414,7 @@ void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Columns
   for (i = 0; i < 32; ++i) {
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = out[j * 32 + i];
+    for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
     highbd_idct32_c(temp_in, temp_out, bd);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -2453,8 +2424,8 @@ void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
-                                   int stride, int bd) {
-  tran_low_t out[32 * 32] = {0};
+                                    int stride, int bd) {
+  tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
   tran_low_t temp_in[32], temp_out[32];
@@ -2469,8 +2440,7 @@ void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
   }
   // Columns
   for (i = 0; i < 32; ++i) {
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = out[j * 32 + i];
+    for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
     highbd_idct32_c(temp_in, temp_out, bd);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -2480,19 +2450,18 @@ void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
-                                  int stride, int bd) {
+                                   int stride, int bd) {
   int i, j;
   int a1;
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
 
-  tran_low_t out = WRAPLOW(
-      highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
+  tran_low_t out =
+      WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
   out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
   a1 = ROUND_POWER_OF_TWO(out, 6);
 
   for (j = 0; j < 32; ++j) {
-    for (i = 0; i < 32; ++i)
-      dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
+    for (i = 0; i < 32; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
     dest += stride;
   }
 }
diff --git a/vp10/common/vp10_inv_txfm.h b/vp10/common/vp10_inv_txfm.h
index 7f71dd5131559456a80111a78ed084b8bd953202..ec981e8de2f8c889d9bfc807988a254a0dd08e83 100644
--- a/vp10/common/vp10_inv_txfm.h
+++ b/vp10/common/vp10_inv_txfm.h
@@ -41,8 +41,7 @@ static INLINE tran_low_t dct_const_round_shift(tran_high_t input) {
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static INLINE tran_low_t highbd_check_range(tran_high_t input,
-                                            int bd) {
+static INLINE tran_low_t highbd_check_range(tran_high_t input, int bd) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
   // For valid highbitdepth streams, intermediate stage coefficients will
   // stay within the ranges:
@@ -53,9 +52,9 @@ static INLINE tran_low_t highbd_check_range(tran_high_t input,
   const int32_t int_min = -int_max - 1;
   assert(int_min <= input);
   assert(input <= int_max);
-  (void) int_min;
+  (void)int_min;
 #endif  // CONFIG_COEFFICIENT_RANGE_CHECKING
-  (void) bd;
+  (void)bd;
   return (tran_low_t)input;
 }
 
diff --git a/vp10/common/vp10_rtcd.c b/vp10/common/vp10_rtcd.c
index 36b294ae8a6d550f1627bb454521985ae4e4e8ff..0fd8ab0ea6276e9f7703bf3792e2d93e6226de8d 100644
--- a/vp10/common/vp10_rtcd.c
+++ b/vp10/common/vp10_rtcd.c
@@ -13,7 +13,7 @@
 #include "vpx_ports/vpx_once.h"
 
 void vp10_rtcd() {
-    // TODO(JBB): Remove this once, by insuring that both the encoder and
-    // decoder setup functions are protected by once();
-    once(setup_rtcd_internal);
+  // TODO(JBB): Remove this once, by insuring that both the encoder and
+  // decoder setup functions are protected by once();
+  once(setup_rtcd_internal);
 }
diff --git a/vp10/common/x86/idct_intrin_sse2.c b/vp10/common/x86/idct_intrin_sse2.c
index a2c674b802fb5a2cb409441b553fe544abd93077..792e2f9a18f63e469a747f5db53727224a8c6900 100644
--- a/vp10/common/x86/idct_intrin_sse2.c
+++ b/vp10/common/x86/idct_intrin_sse2.c
@@ -38,9 +38,7 @@ void vp10_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
       iadst4_sse2(in);
       iadst4_sse2(in);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 
   // Final round and shift
@@ -110,9 +108,7 @@ void vp10_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
       iadst8_sse2(in);
       iadst8_sse2(in);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 
   // Final rounding and shift
@@ -169,9 +165,7 @@ void vp10_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
       iadst16_sse2(in0, in1);
       iadst16_sse2(in0, in1);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 
   write_buffer_8x16(dest, in0, stride);
diff --git a/vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h b/vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h
index 77f633e9168f53708240ccebf4b833c9f930f54a..eea9f97090703e90d8954c7a873f82531337ea94 100644
--- a/vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h
+++ b/vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h
@@ -23,42 +23,37 @@
 #define SUB_EPI16 _mm_subs_epi16
 #if FDCT32x32_HIGH_PRECISION
 void vp10_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
-    int i, j;
-    for (i = 0; i < 32; ++i) {
-      tran_high_t temp_in[32], temp_out[32];
-      for (j = 0; j < 32; ++j)
-        temp_in[j] = intermediate[j * 32 + i];
-      vp10_fdct32(temp_in, temp_out, 0);
-      for (j = 0; j < 32; ++j)
-        out[j + i * 32] =
-            (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
-    }
+  int i, j;
+  for (i = 0; i < 32; ++i) {
+    tran_high_t temp_in[32], temp_out[32];
+    for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
+    vp10_fdct32(temp_in, temp_out, 0);
+    for (j = 0; j < 32; ++j)
+      out[j + i * 32] =
+          (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
+  }
 }
-  #define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_c
-  #define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rows_c
+#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_c
+#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rows_c
 #else
 void vp10_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
-    int i, j;
-    for (i = 0; i < 32; ++i) {
-      tran_high_t temp_in[32], temp_out[32];
-      for (j = 0; j < 32; ++j)
-        temp_in[j] = intermediate[j * 32 + i];
-      vp10_fdct32(temp_in, temp_out, 1);
-      for (j = 0; j < 32; ++j)
-        out[j + i * 32] = (tran_low_t)temp_out[j];
-    }
+  int i, j;
+  for (i = 0; i < 32; ++i) {
+    tran_high_t temp_in[32], temp_out[32];
+    for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
+    vp10_fdct32(temp_in, temp_out, 1);
+    for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
+  }
 }
-  #define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_rd_c
-  #define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rd_rows_c
+#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_rd_c
+#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rd_rows_c
 #endif  // FDCT32x32_HIGH_PRECISION
 #else
 #define ADD_EPI16 _mm_add_epi16
 #define SUB_EPI16 _mm_sub_epi16
 #endif  // DCT_HIGH_BIT_DEPTH
 
-
-void FDCT32x32_2D(const int16_t *input,
-                  tran_low_t *output_org, int stride) {
+void FDCT32x32_2D(const int16_t *input, tran_low_t *output_org, int stride) {
   // Calculate pre-multiplied strides
   const int str1 = stride;
   const int str2 = 2 * stride;
@@ -71,42 +66,42 @@ void FDCT32x32_2D(const int16_t *input,
   //    by constructing the 32 bit constant corresponding to that pair.
   const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64);
-  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64,   cospi_24_64);
+  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
   const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
-  const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64,  cospi_8_64);
-  const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64,  cospi_20_64);
-  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64,  cospi_12_64);
-  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64,   cospi_28_64);
-  const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64,  cospi_4_64);
+  const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64);
+  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+  const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64);
   const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
   const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
-  const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64,  cospi_2_64);
-  const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64,  cospi_18_64);
-  const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64,  cospi_10_64);
-  const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64,   cospi_26_64);
-  const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64,  cospi_6_64);
-  const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64,  cospi_22_64);
-  const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64,  cospi_14_64);
-  const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64,   cospi_30_64);
-  const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64,  cospi_1_64);
-  const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64,  cospi_17_64);
-  const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64,  cospi_9_64);
-  const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64,   cospi_25_64);
-  const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64,  cospi_7_64);
-  const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64,   cospi_23_64);
-  const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64,  cospi_15_64);
-  const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64,   cospi_31_64);
-  const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64,  cospi_5_64);
-  const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64,  cospi_21_64);
-  const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64,  cospi_13_64);
-  const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64,   cospi_29_64);
-  const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64,  cospi_3_64);
-  const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64,  cospi_19_64);
-  const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64,  cospi_11_64);
-  const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64,   cospi_27_64);
+  const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64, cospi_2_64);
+  const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64, cospi_18_64);
+  const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64, cospi_10_64);
+  const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64, cospi_26_64);
+  const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
+  const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
+  const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
+  const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
+  const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64, cospi_1_64);
+  const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64, cospi_17_64);
+  const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64, cospi_9_64);
+  const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64, cospi_25_64);
+  const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64, cospi_7_64);
+  const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64, cospi_23_64);
+  const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64, cospi_15_64);
+  const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64, cospi_31_64);
+  const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64, cospi_5_64);
+  const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64, cospi_21_64);
+  const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64, cospi_13_64);
+  const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64, cospi_29_64);
+  const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64, cospi_3_64);
+  const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64, cospi_19_64);
+  const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64, cospi_11_64);
+  const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64, cospi_27_64);
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i kZero = _mm_set1_epi16(0);
-  const __m128i kOne  = _mm_set1_epi16(1);
+  const __m128i kOne = _mm_set1_epi16(1);
   // Do the two transform/transpose passes
   int pass;
 #if DCT_HIGH_BIT_DEPTH
@@ -124,125 +119,125 @@ void FDCT32x32_2D(const int16_t *input,
       // Note: even though all the loads below are aligned, using the aligned
       //       intrinsic make the code slightly slower.
       if (0 == pass) {
-        const int16_t *in  = &input[column_start];
+        const int16_t *in = &input[column_start];
         // step1[i] =  (in[ 0 * stride] + in[(32 -  1) * stride]) << 2;
         // Note: the next four blocks could be in a loop. That would help the
         //       instruction cache but is actually slower.
         {
-          const int16_t *ina =  in +  0 * str1;
-          const int16_t *inb =  in + 31 * str1;
-          __m128i *step1a = &step1[ 0];
+          const int16_t *ina = in + 0 * str1;
+          const int16_t *inb = in + 31 * str1;
+          __m128i *step1a = &step1[0];
           __m128i *step1b = &step1[31];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[0] = _mm_add_epi16(ina0, inb0);
+          step1a[1] = _mm_add_epi16(ina1, inb1);
+          step1a[2] = _mm_add_epi16(ina2, inb2);
+          step1a[3] = _mm_add_epi16(ina3, inb3);
           step1b[-3] = _mm_sub_epi16(ina3, inb3);
           step1b[-2] = _mm_sub_epi16(ina2, inb2);
           step1b[-1] = _mm_sub_epi16(ina1, inb1);
           step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
           step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
         }
         {
-          const int16_t *ina =  in +  4 * str1;
-          const int16_t *inb =  in + 27 * str1;
-          __m128i *step1a = &step1[ 4];
+          const int16_t *ina = in + 4 * str1;
+          const int16_t *inb = in + 27 * str1;
+          __m128i *step1a = &step1[4];
           __m128i *step1b = &step1[27];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[0] = _mm_add_epi16(ina0, inb0);
+          step1a[1] = _mm_add_epi16(ina1, inb1);
+          step1a[2] = _mm_add_epi16(ina2, inb2);
+          step1a[3] = _mm_add_epi16(ina3, inb3);
           step1b[-3] = _mm_sub_epi16(ina3, inb3);
           step1b[-2] = _mm_sub_epi16(ina2, inb2);
           step1b[-1] = _mm_sub_epi16(ina1, inb1);
           step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
           step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
         }
         {
-          const int16_t *ina =  in +  8 * str1;
-          const int16_t *inb =  in + 23 * str1;
-          __m128i *step1a = &step1[ 8];
+          const int16_t *ina = in + 8 * str1;
+          const int16_t *inb = in + 23 * str1;
+          __m128i *step1a = &step1[8];
           __m128i *step1b = &step1[23];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[0] = _mm_add_epi16(ina0, inb0);
+          step1a[1] = _mm_add_epi16(ina1, inb1);
+          step1a[2] = _mm_add_epi16(ina2, inb2);
+          step1a[3] = _mm_add_epi16(ina3, inb3);
           step1b[-3] = _mm_sub_epi16(ina3, inb3);
           step1b[-2] = _mm_sub_epi16(ina2, inb2);
           step1b[-1] = _mm_sub_epi16(ina1, inb1);
           step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
           step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
         }
         {
-          const int16_t *ina =  in + 12 * str1;
-          const int16_t *inb =  in + 19 * str1;
+          const int16_t *ina = in + 12 * str1;
+          const int16_t *inb = in + 19 * str1;
           __m128i *step1a = &step1[12];
           __m128i *step1b = &step1[19];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[0] = _mm_add_epi16(ina0, inb0);
+          step1a[1] = _mm_add_epi16(ina1, inb1);
+          step1a[2] = _mm_add_epi16(ina2, inb2);
+          step1a[3] = _mm_add_epi16(ina3, inb3);
           step1b[-3] = _mm_sub_epi16(ina3, inb3);
           step1b[-2] = _mm_sub_epi16(ina2, inb2);
           step1b[-1] = _mm_sub_epi16(ina1, inb1);
           step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
@@ -257,14 +252,14 @@ void FDCT32x32_2D(const int16_t *input,
         // Note: the next four blocks could be in a loop. That would help the
         //       instruction cache but is actually slower.
         {
-          __m128i in00  = _mm_loadu_si128((const __m128i *)(in +  0 * 32));
-          __m128i in01  = _mm_loadu_si128((const __m128i *)(in +  1 * 32));
-          __m128i in02  = _mm_loadu_si128((const __m128i *)(in +  2 * 32));
-          __m128i in03  = _mm_loadu_si128((const __m128i *)(in +  3 * 32));
-          __m128i in28  = _mm_loadu_si128((const __m128i *)(in + 28 * 32));
-          __m128i in29  = _mm_loadu_si128((const __m128i *)(in + 29 * 32));
-          __m128i in30  = _mm_loadu_si128((const __m128i *)(in + 30 * 32));
-          __m128i in31  = _mm_loadu_si128((const __m128i *)(in + 31 * 32));
+          __m128i in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 32));
+          __m128i in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 32));
+          __m128i in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 32));
+          __m128i in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 32));
+          __m128i in28 = _mm_loadu_si128((const __m128i *)(in + 28 * 32));
+          __m128i in29 = _mm_loadu_si128((const __m128i *)(in + 29 * 32));
+          __m128i in30 = _mm_loadu_si128((const __m128i *)(in + 30 * 32));
+          __m128i in31 = _mm_loadu_si128((const __m128i *)(in + 31 * 32));
           step1[0] = ADD_EPI16(in00, in31);
           step1[1] = ADD_EPI16(in01, in30);
           step1[2] = ADD_EPI16(in02, in29);
@@ -284,14 +279,14 @@ void FDCT32x32_2D(const int16_t *input,
 #endif  // DCT_HIGH_BIT_DEPTH
         }
         {
-          __m128i in04  = _mm_loadu_si128((const __m128i *)(in +  4 * 32));
-          __m128i in05  = _mm_loadu_si128((const __m128i *)(in +  5 * 32));
-          __m128i in06  = _mm_loadu_si128((const __m128i *)(in +  6 * 32));
-          __m128i in07  = _mm_loadu_si128((const __m128i *)(in +  7 * 32));
-          __m128i in24  = _mm_loadu_si128((const __m128i *)(in + 24 * 32));
-          __m128i in25  = _mm_loadu_si128((const __m128i *)(in + 25 * 32));
-          __m128i in26  = _mm_loadu_si128((const __m128i *)(in + 26 * 32));
-          __m128i in27  = _mm_loadu_si128((const __m128i *)(in + 27 * 32));
+          __m128i in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 32));
+          __m128i in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 32));
+          __m128i in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 32));
+          __m128i in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 32));
+          __m128i in24 = _mm_loadu_si128((const __m128i *)(in + 24 * 32));
+          __m128i in25 = _mm_loadu_si128((const __m128i *)(in + 25 * 32));
+          __m128i in26 = _mm_loadu_si128((const __m128i *)(in + 26 * 32));
+          __m128i in27 = _mm_loadu_si128((const __m128i *)(in + 27 * 32));
           step1[4] = ADD_EPI16(in04, in27);
           step1[5] = ADD_EPI16(in05, in26);
           step1[6] = ADD_EPI16(in06, in25);
@@ -311,14 +306,14 @@ void FDCT32x32_2D(const int16_t *input,
 #endif  // DCT_HIGH_BIT_DEPTH
         }
         {
-          __m128i in08  = _mm_loadu_si128((const __m128i *)(in +  8 * 32));
-          __m128i in09  = _mm_loadu_si128((const __m128i *)(in +  9 * 32));
-          __m128i in10  = _mm_loadu_si128((const __m128i *)(in + 10 * 32));
-          __m128i in11  = _mm_loadu_si128((const __m128i *)(in + 11 * 32));
-          __m128i in20  = _mm_loadu_si128((const __m128i *)(in + 20 * 32));
-          __m128i in21  = _mm_loadu_si128((const __m128i *)(in + 21 * 32));
-          __m128i in22  = _mm_loadu_si128((const __m128i *)(in + 22 * 32));
-          __m128i in23  = _mm_loadu_si128((const __m128i *)(in + 23 * 32));
+          __m128i in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 32));
+          __m128i in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 32));
+          __m128i in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 32));
+          __m128i in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 32));
+          __m128i in20 = _mm_loadu_si128((const __m128i *)(in + 20 * 32));
+          __m128i in21 = _mm_loadu_si128((const __m128i *)(in + 21 * 32));
+          __m128i in22 = _mm_loadu_si128((const __m128i *)(in + 22 * 32));
+          __m128i in23 = _mm_loadu_si128((const __m128i *)(in + 23 * 32));
           step1[8] = ADD_EPI16(in08, in23);
           step1[9] = ADD_EPI16(in09, in22);
           step1[10] = ADD_EPI16(in10, in21);
@@ -338,14 +333,14 @@ void FDCT32x32_2D(const int16_t *input,
 #endif  // DCT_HIGH_BIT_DEPTH
         }
         {
-          __m128i in12  = _mm_loadu_si128((const __m128i *)(in + 12 * 32));
-          __m128i in13  = _mm_loadu_si128((const __m128i *)(in + 13 * 32));
-          __m128i in14  = _mm_loadu_si128((const __m128i *)(in + 14 * 32));
-          __m128i in15  = _mm_loadu_si128((const __m128i *)(in + 15 * 32));
-          __m128i in16  = _mm_loadu_si128((const __m128i *)(in + 16 * 32));
-          __m128i in17  = _mm_loadu_si128((const __m128i *)(in + 17 * 32));
-          __m128i in18  = _mm_loadu_si128((const __m128i *)(in + 18 * 32));
-          __m128i in19  = _mm_loadu_si128((const __m128i *)(in + 19 * 32));
+          __m128i in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 32));
+          __m128i in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 32));
+          __m128i in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 32));
+          __m128i in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 32));
+          __m128i in16 = _mm_loadu_si128((const __m128i *)(in + 16 * 32));
+          __m128i in17 = _mm_loadu_si128((const __m128i *)(in + 17 * 32));
+          __m128i in18 = _mm_loadu_si128((const __m128i *)(in + 18 * 32));
+          __m128i in19 = _mm_loadu_si128((const __m128i *)(in + 19 * 32));
           step1[12] = ADD_EPI16(in12, in19);
           step1[13] = ADD_EPI16(in13, in18);
           step1[14] = ADD_EPI16(in14, in17);
@@ -373,10 +368,10 @@ void FDCT32x32_2D(const int16_t *input,
         step2[3] = ADD_EPI16(step1[3], step1[12]);
         step2[4] = ADD_EPI16(step1[4], step1[11]);
         step2[5] = ADD_EPI16(step1[5], step1[10]);
-        step2[6] = ADD_EPI16(step1[6], step1[ 9]);
-        step2[7] = ADD_EPI16(step1[7], step1[ 8]);
-        step2[8] = SUB_EPI16(step1[7], step1[ 8]);
-        step2[9] = SUB_EPI16(step1[6], step1[ 9]);
+        step2[6] = ADD_EPI16(step1[6], step1[9]);
+        step2[7] = ADD_EPI16(step1[7], step1[8]);
+        step2[8] = SUB_EPI16(step1[7], step1[8]);
+        step2[9] = SUB_EPI16(step1[6], step1[9]);
         step2[10] = SUB_EPI16(step1[5], step1[10]);
         step2[11] = SUB_EPI16(step1[4], step1[11]);
         step2[12] = SUB_EPI16(step1[3], step1[12]);
@@ -385,9 +380,8 @@ void FDCT32x32_2D(const int16_t *input,
         step2[15] = SUB_EPI16(step1[0], step1[15]);
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x16(
-            &step2[0], &step2[1], &step2[2], &step2[3],
-            &step2[4], &step2[5], &step2[6], &step2[7],
-            &step2[8], &step2[9], &step2[10], &step2[11],
+            &step2[0], &step2[1], &step2[2], &step2[3], &step2[4], &step2[5],
+            &step2[6], &step2[7], &step2[8], &step2[9], &step2[10], &step2[11],
             &step2[12], &step2[13], &step2[14], &step2[15]);
         if (overflow) {
           if (pass == 0)
@@ -483,16 +477,16 @@ void FDCT32x32_2D(const int16_t *input,
       // dump the magnitude by half, hence the intermediate values are within
       // the range of 16 bits.
       if (1 == pass) {
-        __m128i s3_00_0 = _mm_cmplt_epi16(step2[ 0], kZero);
-        __m128i s3_01_0 = _mm_cmplt_epi16(step2[ 1], kZero);
-        __m128i s3_02_0 = _mm_cmplt_epi16(step2[ 2], kZero);
-        __m128i s3_03_0 = _mm_cmplt_epi16(step2[ 3], kZero);
-        __m128i s3_04_0 = _mm_cmplt_epi16(step2[ 4], kZero);
-        __m128i s3_05_0 = _mm_cmplt_epi16(step2[ 5], kZero);
-        __m128i s3_06_0 = _mm_cmplt_epi16(step2[ 6], kZero);
-        __m128i s3_07_0 = _mm_cmplt_epi16(step2[ 7], kZero);
-        __m128i s2_08_0 = _mm_cmplt_epi16(step2[ 8], kZero);
-        __m128i s2_09_0 = _mm_cmplt_epi16(step2[ 9], kZero);
+        __m128i s3_00_0 = _mm_cmplt_epi16(step2[0], kZero);
+        __m128i s3_01_0 = _mm_cmplt_epi16(step2[1], kZero);
+        __m128i s3_02_0 = _mm_cmplt_epi16(step2[2], kZero);
+        __m128i s3_03_0 = _mm_cmplt_epi16(step2[3], kZero);
+        __m128i s3_04_0 = _mm_cmplt_epi16(step2[4], kZero);
+        __m128i s3_05_0 = _mm_cmplt_epi16(step2[5], kZero);
+        __m128i s3_06_0 = _mm_cmplt_epi16(step2[6], kZero);
+        __m128i s3_07_0 = _mm_cmplt_epi16(step2[7], kZero);
+        __m128i s2_08_0 = _mm_cmplt_epi16(step2[8], kZero);
+        __m128i s2_09_0 = _mm_cmplt_epi16(step2[9], kZero);
         __m128i s3_10_0 = _mm_cmplt_epi16(step2[10], kZero);
         __m128i s3_11_0 = _mm_cmplt_epi16(step2[11], kZero);
         __m128i s3_12_0 = _mm_cmplt_epi16(step2[12], kZero);
@@ -516,16 +510,16 @@ void FDCT32x32_2D(const int16_t *input,
         __m128i s3_30_0 = _mm_cmplt_epi16(step1[30], kZero);
         __m128i s3_31_0 = _mm_cmplt_epi16(step1[31], kZero);
 
-        step2[0] = SUB_EPI16(step2[ 0], s3_00_0);
-        step2[1] = SUB_EPI16(step2[ 1], s3_01_0);
-        step2[2] = SUB_EPI16(step2[ 2], s3_02_0);
-        step2[3] = SUB_EPI16(step2[ 3], s3_03_0);
-        step2[4] = SUB_EPI16(step2[ 4], s3_04_0);
-        step2[5] = SUB_EPI16(step2[ 5], s3_05_0);
-        step2[6] = SUB_EPI16(step2[ 6], s3_06_0);
-        step2[7] = SUB_EPI16(step2[ 7], s3_07_0);
-        step2[8] = SUB_EPI16(step2[ 8], s2_08_0);
-        step2[9] = SUB_EPI16(step2[ 9], s2_09_0);
+        step2[0] = SUB_EPI16(step2[0], s3_00_0);
+        step2[1] = SUB_EPI16(step2[1], s3_01_0);
+        step2[2] = SUB_EPI16(step2[2], s3_02_0);
+        step2[3] = SUB_EPI16(step2[3], s3_03_0);
+        step2[4] = SUB_EPI16(step2[4], s3_04_0);
+        step2[5] = SUB_EPI16(step2[5], s3_05_0);
+        step2[6] = SUB_EPI16(step2[6], s3_06_0);
+        step2[7] = SUB_EPI16(step2[7], s3_07_0);
+        step2[8] = SUB_EPI16(step2[8], s2_08_0);
+        step2[9] = SUB_EPI16(step2[9], s2_09_0);
         step2[10] = SUB_EPI16(step2[10], s3_10_0);
         step2[11] = SUB_EPI16(step2[11], s3_11_0);
         step2[12] = SUB_EPI16(step2[12], s3_12_0);
@@ -550,29 +544,27 @@ void FDCT32x32_2D(const int16_t *input,
         step1[31] = SUB_EPI16(step1[31], s3_31_0);
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x32(
-            &step2[0], &step2[1], &step2[2], &step2[3],
-            &step2[4], &step2[5], &step2[6], &step2[7],
-            &step2[8], &step2[9], &step2[10], &step2[11],
-            &step2[12], &step2[13], &step2[14], &step2[15],
-            &step1[16], &step1[17], &step1[18], &step1[19],
-            &step2[20], &step2[21], &step2[22], &step2[23],
-            &step2[24], &step2[25], &step2[26], &step2[27],
-            &step1[28], &step1[29], &step1[30], &step1[31]);
+            &step2[0], &step2[1], &step2[2], &step2[3], &step2[4], &step2[5],
+            &step2[6], &step2[7], &step2[8], &step2[9], &step2[10], &step2[11],
+            &step2[12], &step2[13], &step2[14], &step2[15], &step1[16],
+            &step1[17], &step1[18], &step1[19], &step2[20], &step2[21],
+            &step2[22], &step2[23], &step2[24], &step2[25], &step2[26],
+            &step2[27], &step1[28], &step1[29], &step1[30], &step1[31]);
         if (overflow) {
           HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
-        step2[0] = _mm_add_epi16(step2[ 0], kOne);
-        step2[1] = _mm_add_epi16(step2[ 1], kOne);
-        step2[2] = _mm_add_epi16(step2[ 2], kOne);
-        step2[3] = _mm_add_epi16(step2[ 3], kOne);
-        step2[4] = _mm_add_epi16(step2[ 4], kOne);
-        step2[5] = _mm_add_epi16(step2[ 5], kOne);
-        step2[6] = _mm_add_epi16(step2[ 6], kOne);
-        step2[7] = _mm_add_epi16(step2[ 7], kOne);
-        step2[8] = _mm_add_epi16(step2[ 8], kOne);
-        step2[9] = _mm_add_epi16(step2[ 9], kOne);
+        step2[0] = _mm_add_epi16(step2[0], kOne);
+        step2[1] = _mm_add_epi16(step2[1], kOne);
+        step2[2] = _mm_add_epi16(step2[2], kOne);
+        step2[3] = _mm_add_epi16(step2[3], kOne);
+        step2[4] = _mm_add_epi16(step2[4], kOne);
+        step2[5] = _mm_add_epi16(step2[5], kOne);
+        step2[6] = _mm_add_epi16(step2[6], kOne);
+        step2[7] = _mm_add_epi16(step2[7], kOne);
+        step2[8] = _mm_add_epi16(step2[8], kOne);
+        step2[9] = _mm_add_epi16(step2[9], kOne);
         step2[10] = _mm_add_epi16(step2[10], kOne);
         step2[11] = _mm_add_epi16(step2[11], kOne);
         step2[12] = _mm_add_epi16(step2[12], kOne);
@@ -596,16 +588,16 @@ void FDCT32x32_2D(const int16_t *input,
         step1[30] = _mm_add_epi16(step1[30], kOne);
         step1[31] = _mm_add_epi16(step1[31], kOne);
 
-        step2[0] = _mm_srai_epi16(step2[ 0], 2);
-        step2[1] = _mm_srai_epi16(step2[ 1], 2);
-        step2[2] = _mm_srai_epi16(step2[ 2], 2);
-        step2[3] = _mm_srai_epi16(step2[ 3], 2);
-        step2[4] = _mm_srai_epi16(step2[ 4], 2);
-        step2[5] = _mm_srai_epi16(step2[ 5], 2);
-        step2[6] = _mm_srai_epi16(step2[ 6], 2);
-        step2[7] = _mm_srai_epi16(step2[ 7], 2);
-        step2[8] = _mm_srai_epi16(step2[ 8], 2);
-        step2[9] = _mm_srai_epi16(step2[ 9], 2);
+        step2[0] = _mm_srai_epi16(step2[0], 2);
+        step2[1] = _mm_srai_epi16(step2[1], 2);
+        step2[2] = _mm_srai_epi16(step2[2], 2);
+        step2[3] = _mm_srai_epi16(step2[3], 2);
+        step2[4] = _mm_srai_epi16(step2[4], 2);
+        step2[5] = _mm_srai_epi16(step2[5], 2);
+        step2[6] = _mm_srai_epi16(step2[6], 2);
+        step2[7] = _mm_srai_epi16(step2[7], 2);
+        step2[8] = _mm_srai_epi16(step2[8], 2);
+        step2[9] = _mm_srai_epi16(step2[9], 2);
         step2[10] = _mm_srai_epi16(step2[10], 2);
         step2[11] = _mm_srai_epi16(step2[11], 2);
         step2[12] = _mm_srai_epi16(step2[12], 2);
@@ -634,821 +626,884 @@ void FDCT32x32_2D(const int16_t *input,
 #if FDCT32x32_HIGH_PRECISION
       if (pass == 0) {
 #endif
-      // Stage 3
-      {
-        step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]);
-        step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]);
-        step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]);
-        step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]);
-        step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]);
-        step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]);
-        step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]);
-        step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]);
+        // Stage 3
+        {
+          step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]);
+          step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]);
+          step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]);
+          step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]);
+          step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]);
+          step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]);
+          step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]);
+          step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2],
-                                           &step3[3], &step3[4], &step3[5],
-                                           &step3[6], &step3[7]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2],
+                                             &step3[3], &step3[4], &step3[5],
+                                             &step3[6], &step3[7]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
-        const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
-        const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
-        const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
-        const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
-        const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
-        const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
-        const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
-        const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
-        const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
-        const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
-        const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
-        const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
-        const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
-        const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
-        const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
-        const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
-        const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
-        const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
-        // Combine
-        step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7);
-        step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7);
-        step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7);
-        step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&step3[10], &step3[11],
-                                           &step3[12], &step3[13]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step3[16] = ADD_EPI16(step2[23], step1[16]);
-        step3[17] = ADD_EPI16(step2[22], step1[17]);
-        step3[18] = ADD_EPI16(step2[21], step1[18]);
-        step3[19] = ADD_EPI16(step2[20], step1[19]);
-        step3[20] = SUB_EPI16(step1[19], step2[20]);
-        step3[21] = SUB_EPI16(step1[18], step2[21]);
-        step3[22] = SUB_EPI16(step1[17], step2[22]);
-        step3[23] = SUB_EPI16(step1[16], step2[23]);
-        step3[24] = SUB_EPI16(step1[31], step2[24]);
-        step3[25] = SUB_EPI16(step1[30], step2[25]);
-        step3[26] = SUB_EPI16(step1[29], step2[26]);
-        step3[27] = SUB_EPI16(step1[28], step2[27]);
-        step3[28] = ADD_EPI16(step2[27], step1[28]);
-        step3[29] = ADD_EPI16(step2[26], step1[29]);
-        step3[30] = ADD_EPI16(step2[25], step1[30]);
-        step3[31] = ADD_EPI16(step2[24], step1[31]);
+        {
+          const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
+          const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
+          const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
+          const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
+          const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
+          const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
+          const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
+          const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
+          const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
+          const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
+          const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
+          const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
+          // dct_const_round_shift
+          const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
+          const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
+          const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
+          const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
+          const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
+          const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
+          const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
+          const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
+          // Combine
+          step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7);
+          step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7);
+          step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7);
+          step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step3[16], &step3[17], &step3[18], &step3[19],
-            &step3[20], &step3[21], &step3[22], &step3[23],
-            &step3[24], &step3[25], &step3[26], &step3[27],
-            &step3[28], &step3[29], &step3[30], &step3[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow = check_epi16_overflow_x4(&step3[10], &step3[11], &step3[12],
+                                             &step3[13]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        {
+          step3[16] = ADD_EPI16(step2[23], step1[16]);
+          step3[17] = ADD_EPI16(step2[22], step1[17]);
+          step3[18] = ADD_EPI16(step2[21], step1[18]);
+          step3[19] = ADD_EPI16(step2[20], step1[19]);
+          step3[20] = SUB_EPI16(step1[19], step2[20]);
+          step3[21] = SUB_EPI16(step1[18], step2[21]);
+          step3[22] = SUB_EPI16(step1[17], step2[22]);
+          step3[23] = SUB_EPI16(step1[16], step2[23]);
+          step3[24] = SUB_EPI16(step1[31], step2[24]);
+          step3[25] = SUB_EPI16(step1[30], step2[25]);
+          step3[26] = SUB_EPI16(step1[29], step2[26]);
+          step3[27] = SUB_EPI16(step1[28], step2[27]);
+          step3[28] = ADD_EPI16(step2[27], step1[28]);
+          step3[29] = ADD_EPI16(step2[26], step1[29]);
+          step3[30] = ADD_EPI16(step2[25], step1[30]);
+          step3[31] = ADD_EPI16(step2[24], step1[31]);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x16(
+              &step3[16], &step3[17], &step3[18], &step3[19], &step3[20],
+              &step3[21], &step3[22], &step3[23], &step3[24], &step3[25],
+              &step3[26], &step3[27], &step3[28], &step3[29], &step3[30],
+              &step3[31]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
+        }
 
-      // Stage 4
-      {
-        step1[0] = ADD_EPI16(step3[ 3], step3[ 0]);
-        step1[1] = ADD_EPI16(step3[ 2], step3[ 1]);
-        step1[2] = SUB_EPI16(step3[ 1], step3[ 2]);
-        step1[3] = SUB_EPI16(step3[ 0], step3[ 3]);
-        step1[8] = ADD_EPI16(step3[11], step2[ 8]);
-        step1[9] = ADD_EPI16(step3[10], step2[ 9]);
-        step1[10] = SUB_EPI16(step2[ 9], step3[10]);
-        step1[11] = SUB_EPI16(step2[ 8], step3[11]);
-        step1[12] = SUB_EPI16(step2[15], step3[12]);
-        step1[13] = SUB_EPI16(step2[14], step3[13]);
-        step1[14] = ADD_EPI16(step3[13], step2[14]);
-        step1[15] = ADD_EPI16(step3[12], step2[15]);
+        // Stage 4
+        {
+          step1[0] = ADD_EPI16(step3[3], step3[0]);
+          step1[1] = ADD_EPI16(step3[2], step3[1]);
+          step1[2] = SUB_EPI16(step3[1], step3[2]);
+          step1[3] = SUB_EPI16(step3[0], step3[3]);
+          step1[8] = ADD_EPI16(step3[11], step2[8]);
+          step1[9] = ADD_EPI16(step3[10], step2[9]);
+          step1[10] = SUB_EPI16(step2[9], step3[10]);
+          step1[11] = SUB_EPI16(step2[8], step3[11]);
+          step1[12] = SUB_EPI16(step2[15], step3[12]);
+          step1[13] = SUB_EPI16(step2[14], step3[13]);
+          step1[14] = ADD_EPI16(step3[13], step2[14]);
+          step1[15] = ADD_EPI16(step3[12], step2[15]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step1[0], &step1[1], &step1[2], &step1[3],
-            &step1[4], &step1[5], &step1[6], &step1[7],
-            &step1[8], &step1[9], &step1[10], &step1[11],
-            &step1[12], &step1[13], &step1[14], &step1[15]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x16(
+              &step1[0], &step1[1], &step1[2], &step1[3], &step1[4], &step1[5],
+              &step1[6], &step1[7], &step1[8], &step1[9], &step1[10],
+              &step1[11], &step1[12], &step1[13], &step1[14], &step1[15]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]);
-        const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]);
-        const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16);
-        const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16);
-        const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16);
-        const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS);
-        const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS);
-        const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS);
-        const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS);
-        // Combine
-        step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7);
-        step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x2(&step1[5], &step1[6]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]);
-        const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]);
-        const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]);
-        const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]);
-        const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]);
-        const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]);
-        const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]);
-        const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]);
-        const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24);
-        const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24);
-        const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24);
-        const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24);
-        const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08);
-        const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08);
-        const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08);
-        const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08);
-        const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24);
-        const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24);
-        const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24);
-        const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24);
-        const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08);
-        const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08);
-        const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08);
-        const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS);
-        const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS);
-        const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS);
-        const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS);
-        const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS);
-        const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS);
-        const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS);
-        const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS);
-        const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS);
-        const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS);
-        const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS);
-        const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS);
-        const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS);
-        const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS);
-        const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS);
-        const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS);
-        // Combine
-        step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7);
-        step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7);
-        step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7);
-        step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7);
-        step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7);
-        step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7);
-        step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7);
-        step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7);
+        {
+          const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]);
+          const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]);
+          const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16);
+          const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16);
+          const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16);
+          const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16);
+          // dct_const_round_shift
+          const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS);
+          const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS);
+          const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS);
+          const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS);
+          // Combine
+          step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7);
+          step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20],
-                                           &step1[21], &step1[26], &step1[27],
-                                           &step1[28], &step1[29]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x2(&step1[5], &step1[6]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Stage 5
-      {
-        step2[4] = ADD_EPI16(step1[5], step3[4]);
-        step2[5] = SUB_EPI16(step3[4], step1[5]);
-        step2[6] = SUB_EPI16(step3[7], step1[6]);
-        step2[7] = ADD_EPI16(step1[6], step3[7]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&step2[4], &step2[5],
-                                           &step2[6], &step2[7]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]);
-        const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]);
-        const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]);
-        const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]);
-        const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16);
-        const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16);
-        const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16);
-        const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16);
-        const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08);
-        const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08);
-        const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24);
-        const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24);
-        // dct_const_round_shift
-        const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS);
-        const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS);
-        const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS);
-        const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS);
-        const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS);
-        const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS);
-        const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS);
-        const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS);
-        // Combine
-        out[ 0] = _mm_packs_epi32(out_00_6, out_00_7);
-        out[16] = _mm_packs_epi32(out_16_6, out_16_7);
-        out[ 8] = _mm_packs_epi32(out_08_6, out_08_7);
-        out[24] = _mm_packs_epi32(out_24_6, out_24_7);
+        {
+          const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]);
+          const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]);
+          const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]);
+          const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]);
+          const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]);
+          const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]);
+          const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]);
+          const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]);
+          const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24);
+          const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24);
+          const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24);
+          const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24);
+          const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08);
+          const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08);
+          const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08);
+          const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08);
+          const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24);
+          const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24);
+          const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24);
+          const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24);
+          const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08);
+          const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08);
+          const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08);
+          const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08);
+          // dct_const_round_shift
+          const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS);
+          const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS);
+          const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS);
+          const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS);
+          const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS);
+          const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS);
+          const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS);
+          const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS);
+          const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS);
+          const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS);
+          const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS);
+          const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS);
+          const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS);
+          const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS);
+          const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS);
+          const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS);
+          // Combine
+          step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7);
+          step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7);
+          step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7);
+          step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7);
+          step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7);
+          step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7);
+          step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7);
+          step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&out[0], &out[16],
-                                           &out[8], &out[24]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20],
+                                             &step1[21], &step1[26], &step1[27],
+                                             &step1[28], &step1[29]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[ 9], step1[14]);
-        const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[ 9], step1[14]);
-        const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]);
-        const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]);
-        const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24);
-        const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24);
-        const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08);
-        const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08);
-        const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24);
-        const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24);
-        const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08);
-        const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS);
-        const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS);
-        const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS);
-        const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS);
-        const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS);
-        const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS);
-        const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS);
-        const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS);
-        // Combine
-        step2[ 9] = _mm_packs_epi32(s2_09_6, s2_09_7);
-        step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7);
-        step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7);
-        step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&step2[9], &step2[10],
-                                           &step2[13], &step2[14]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step2[16] = ADD_EPI16(step1[19], step3[16]);
-        step2[17] = ADD_EPI16(step1[18], step3[17]);
-        step2[18] = SUB_EPI16(step3[17], step1[18]);
-        step2[19] = SUB_EPI16(step3[16], step1[19]);
-        step2[20] = SUB_EPI16(step3[23], step1[20]);
-        step2[21] = SUB_EPI16(step3[22], step1[21]);
-        step2[22] = ADD_EPI16(step1[21], step3[22]);
-        step2[23] = ADD_EPI16(step1[20], step3[23]);
-        step2[24] = ADD_EPI16(step1[27], step3[24]);
-        step2[25] = ADD_EPI16(step1[26], step3[25]);
-        step2[26] = SUB_EPI16(step3[25], step1[26]);
-        step2[27] = SUB_EPI16(step3[24], step1[27]);
-        step2[28] = SUB_EPI16(step3[31], step1[28]);
-        step2[29] = SUB_EPI16(step3[30], step1[29]);
-        step2[30] = ADD_EPI16(step1[29], step3[30]);
-        step2[31] = ADD_EPI16(step1[28], step3[31]);
+        // Stage 5
+        {
+          step2[4] = ADD_EPI16(step1[5], step3[4]);
+          step2[5] = SUB_EPI16(step3[4], step1[5]);
+          step2[6] = SUB_EPI16(step3[7], step1[6]);
+          step2[7] = ADD_EPI16(step1[6], step3[7]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step2[16], &step2[17], &step2[18], &step2[19],
-            &step2[20], &step2[21], &step2[22], &step2[23],
-            &step2[24], &step2[25], &step2[26], &step2[27],
-            &step2[28], &step2[29], &step2[30], &step2[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x4(&step2[4], &step2[5], &step2[6],
+                                             &step2[7]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Stage 6
-      {
-        const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
-        const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
-        const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
-        const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
-        const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
-        const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
-        const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
-        const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
-        const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04);
-        const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04);
-        const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20);
-        const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20);
-        const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12);
-        const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12);
-        const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28);
-        const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28);
-        // dct_const_round_shift
-        const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS);
-        const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS);
-        const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS);
-        const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS);
-        const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS);
-        const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS);
-        const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS);
-        const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS);
-        // Combine
-        out[4] = _mm_packs_epi32(out_04_6, out_04_7);
-        out[20] = _mm_packs_epi32(out_20_6, out_20_7);
-        out[12] = _mm_packs_epi32(out_12_6, out_12_7);
-        out[28] = _mm_packs_epi32(out_28_6, out_28_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&out[4], &out[20],
-                                           &out[12], &out[28]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step3[8] = ADD_EPI16(step2[ 9], step1[ 8]);
-        step3[9] = SUB_EPI16(step1[ 8], step2[ 9]);
-        step3[10] = SUB_EPI16(step1[11], step2[10]);
-        step3[11] = ADD_EPI16(step2[10], step1[11]);
-        step3[12] = ADD_EPI16(step2[13], step1[12]);
-        step3[13] = SUB_EPI16(step1[12], step2[13]);
-        step3[14] = SUB_EPI16(step1[15], step2[14]);
-        step3[15] = ADD_EPI16(step2[14], step1[15]);
+        {
+          const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]);
+          const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]);
+          const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]);
+          const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]);
+          const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16);
+          const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16);
+          const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16);
+          const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16);
+          const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08);
+          const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08);
+          const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24);
+          const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24);
+          // dct_const_round_shift
+          const __m128i out_00_4 =
+              _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_00_5 =
+              _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_16_4 =
+              _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_16_5 =
+              _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_08_4 =
+              _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_08_5 =
+              _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_24_4 =
+              _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_24_5 =
+              _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS);
+          const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS);
+          const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS);
+          const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS);
+          const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS);
+          const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS);
+          const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS);
+          const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS);
+          // Combine
+          out[0] = _mm_packs_epi32(out_00_6, out_00_7);
+          out[16] = _mm_packs_epi32(out_16_6, out_16_7);
+          out[8] = _mm_packs_epi32(out_08_6, out_08_7);
+          out[24] = _mm_packs_epi32(out_24_6, out_24_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10],
-                                           &step3[11], &step3[12], &step3[13],
-                                           &step3[14], &step3[15]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow =
+              check_epi16_overflow_x4(&out[0], &out[16], &out[8], &out[24]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]);
-        const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]);
-        const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]);
-        const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]);
-        const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]);
-        const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]);
-        const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]);
-        const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]);
-        const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28);
-        const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28);
-        const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04);
-        const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04);
-        const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12);
-        const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12);
-        const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20);
-        const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20);
-        const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12);
-        const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12);
-        const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20);
-        const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20);
-        const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28);
-        const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28);
-        const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04);
-        const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04);
-        // dct_const_round_shift
-        const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS);
-        const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS);
-        const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS);
-        const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS);
-        const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS);
-        const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS);
-        const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS);
-        const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS);
-        const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS);
-        const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS);
-        const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS);
-        const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS);
-        const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS);
-        const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS);
-        const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS);
-        const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS);
-        // Combine
-        step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7);
-        step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7);
-        step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7);
-        step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7);
-        // Combine
-        step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7);
-        step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7);
-        step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7);
-        step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21],
-                                           &step3[22], &step3[25], &step3[26],
-                                           &step3[29], &step3[30]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
+        {
+          const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[9], step1[14]);
+          const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[9], step1[14]);
+          const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]);
+          const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]);
+          const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24);
+          const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24);
+          const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08);
+          const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08);
+          const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24);
+          const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24);
+          const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08);
+          const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08);
+          // dct_const_round_shift
+          const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
+          const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
+          const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
+          const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
+          const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
+          const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
+          const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
+          const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
+          const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS);
+          const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS);
+          const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS);
+          const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS);
+          const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS);
+          const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS);
+          const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS);
+          const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS);
+          // Combine
+          step2[9] = _mm_packs_epi32(s2_09_6, s2_09_7);
+          step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7);
+          step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7);
+          step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x4(&step2[9], &step2[10], &step2[13],
+                                             &step2[14]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Stage 7
-      {
-        const __m128i out_02_0 = _mm_unpacklo_epi16(step3[ 8], step3[15]);
-        const __m128i out_02_1 = _mm_unpackhi_epi16(step3[ 8], step3[15]);
-        const __m128i out_18_0 = _mm_unpacklo_epi16(step3[ 9], step3[14]);
-        const __m128i out_18_1 = _mm_unpackhi_epi16(step3[ 9], step3[14]);
-        const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]);
-        const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]);
-        const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]);
-        const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]);
-        const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02);
-        const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02);
-        const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18);
-        const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18);
-        const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10);
-        const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10);
-        const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26);
-        const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26);
-        const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06);
-        const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06);
-        const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22);
-        const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22);
-        const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14);
-        const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14);
-        const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30);
-        const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30);
-        // dct_const_round_shift
-        const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS);
-        const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS);
-        const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS);
-        const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS);
-        const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS);
-        const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS);
-        const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS);
-        const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS);
-        const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS);
-        const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS);
-        const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS);
-        const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS);
-        const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS);
-        const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS);
-        const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS);
-        const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS);
-        // Combine
-        out[ 2] = _mm_packs_epi32(out_02_6, out_02_7);
-        out[18] = _mm_packs_epi32(out_18_6, out_18_7);
-        out[10] = _mm_packs_epi32(out_10_6, out_10_7);
-        out[26] = _mm_packs_epi32(out_26_6, out_26_7);
-        out[ 6] = _mm_packs_epi32(out_06_6, out_06_7);
-        out[22] = _mm_packs_epi32(out_22_6, out_22_7);
-        out[14] = _mm_packs_epi32(out_14_6, out_14_7);
-        out[30] = _mm_packs_epi32(out_30_6, out_30_7);
+        }
+        {
+          step2[16] = ADD_EPI16(step1[19], step3[16]);
+          step2[17] = ADD_EPI16(step1[18], step3[17]);
+          step2[18] = SUB_EPI16(step3[17], step1[18]);
+          step2[19] = SUB_EPI16(step3[16], step1[19]);
+          step2[20] = SUB_EPI16(step3[23], step1[20]);
+          step2[21] = SUB_EPI16(step3[22], step1[21]);
+          step2[22] = ADD_EPI16(step1[21], step3[22]);
+          step2[23] = ADD_EPI16(step1[20], step3[23]);
+          step2[24] = ADD_EPI16(step1[27], step3[24]);
+          step2[25] = ADD_EPI16(step1[26], step3[25]);
+          step2[26] = SUB_EPI16(step3[25], step1[26]);
+          step2[27] = SUB_EPI16(step3[24], step1[27]);
+          step2[28] = SUB_EPI16(step3[31], step1[28]);
+          step2[29] = SUB_EPI16(step3[30], step1[29]);
+          step2[30] = ADD_EPI16(step1[29], step3[30]);
+          step2[31] = ADD_EPI16(step1[28], step3[31]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
-                                           &out[26], &out[6], &out[22],
-                                           &out[14], &out[30]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow = check_epi16_overflow_x16(
+              &step2[16], &step2[17], &step2[18], &step2[19], &step2[20],
+              &step2[21], &step2[22], &step2[23], &step2[24], &step2[25],
+              &step2[26], &step2[27], &step2[28], &step2[29], &step2[30],
+              &step2[31]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        // Stage 6
+        {
+          const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+          const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+          const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+          const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+          const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+          const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+          const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+          const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+          const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04);
+          const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04);
+          const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20);
+          const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20);
+          const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12);
+          const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12);
+          const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28);
+          const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28);
+          // dct_const_round_shift
+          const __m128i out_04_4 =
+              _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_04_5 =
+              _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_20_4 =
+              _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_20_5 =
+              _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_12_4 =
+              _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_12_5 =
+              _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_28_4 =
+              _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_28_5 =
+              _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS);
+          const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS);
+          const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS);
+          const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS);
+          const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS);
+          const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS);
+          const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS);
+          const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS);
+          // Combine
+          out[4] = _mm_packs_epi32(out_04_6, out_04_7);
+          out[20] = _mm_packs_epi32(out_20_6, out_20_7);
+          out[12] = _mm_packs_epi32(out_12_6, out_12_7);
+          out[28] = _mm_packs_epi32(out_28_6, out_28_7);
+#if DCT_HIGH_BIT_DEPTH
+          overflow =
+              check_epi16_overflow_x4(&out[4], &out[20], &out[12], &out[28]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step1[16] = ADD_EPI16(step3[17], step2[16]);
-        step1[17] = SUB_EPI16(step2[16], step3[17]);
-        step1[18] = SUB_EPI16(step2[19], step3[18]);
-        step1[19] = ADD_EPI16(step3[18], step2[19]);
-        step1[20] = ADD_EPI16(step3[21], step2[20]);
-        step1[21] = SUB_EPI16(step2[20], step3[21]);
-        step1[22] = SUB_EPI16(step2[23], step3[22]);
-        step1[23] = ADD_EPI16(step3[22], step2[23]);
-        step1[24] = ADD_EPI16(step3[25], step2[24]);
-        step1[25] = SUB_EPI16(step2[24], step3[25]);
-        step1[26] = SUB_EPI16(step2[27], step3[26]);
-        step1[27] = ADD_EPI16(step3[26], step2[27]);
-        step1[28] = ADD_EPI16(step3[29], step2[28]);
-        step1[29] = SUB_EPI16(step2[28], step3[29]);
-        step1[30] = SUB_EPI16(step2[31], step3[30]);
-        step1[31] = ADD_EPI16(step3[30], step2[31]);
+        }
+        {
+          step3[8] = ADD_EPI16(step2[9], step1[8]);
+          step3[9] = SUB_EPI16(step1[8], step2[9]);
+          step3[10] = SUB_EPI16(step1[11], step2[10]);
+          step3[11] = ADD_EPI16(step2[10], step1[11]);
+          step3[12] = ADD_EPI16(step2[13], step1[12]);
+          step3[13] = SUB_EPI16(step1[12], step2[13]);
+          step3[14] = SUB_EPI16(step1[15], step2[14]);
+          step3[15] = ADD_EPI16(step2[14], step1[15]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step1[16], &step1[17], &step1[18], &step1[19],
-            &step1[20], &step1[21], &step1[22], &step1[23],
-            &step1[24], &step1[25], &step1[26], &step1[27],
-            &step1[28], &step1[29], &step1[30], &step1[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10],
+                                             &step3[11], &step3[12], &step3[13],
+                                             &step3[14], &step3[15]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        {
+          const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]);
+          const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]);
+          const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]);
+          const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]);
+          const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]);
+          const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]);
+          const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]);
+          const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]);
+          const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28);
+          const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28);
+          const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04);
+          const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04);
+          const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12);
+          const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12);
+          const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20);
+          const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20);
+          const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12);
+          const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12);
+          const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20);
+          const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20);
+          const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28);
+          const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28);
+          const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04);
+          const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04);
+          // dct_const_round_shift
+          const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS);
+          const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS);
+          const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS);
+          const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS);
+          const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS);
+          const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS);
+          const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS);
+          const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS);
+          const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS);
+          const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS);
+          const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS);
+          const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS);
+          const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS);
+          const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS);
+          const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS);
+          const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS);
+          // Combine
+          step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7);
+          step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7);
+          step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7);
+          step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7);
+          // Combine
+          step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7);
+          step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7);
+          step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7);
+          step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21],
+                                             &step3[22], &step3[25], &step3[26],
+                                             &step3[29], &step3[30]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Final stage --- outputs indices are bit-reversed.
-      {
-        const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]);
-        const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]);
-        const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]);
-        const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]);
-        const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]);
-        const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]);
-        const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]);
-        const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]);
-        const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01);
-        const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01);
-        const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17);
-        const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17);
-        const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09);
-        const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09);
-        const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25);
-        const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25);
-        const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07);
-        const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07);
-        const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23);
-        const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23);
-        const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15);
-        const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15);
-        const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31);
-        const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31);
-        // dct_const_round_shift
-        const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS);
-        const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS);
-        const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS);
-        const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS);
-        const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS);
-        const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS);
-        const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS);
-        const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS);
-        const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS);
-        const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS);
-        const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS);
-        const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS);
-        const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS);
-        const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS);
-        const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS);
-        const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS);
-        // Combine
-        out[ 1] = _mm_packs_epi32(out_01_6, out_01_7);
-        out[17] = _mm_packs_epi32(out_17_6, out_17_7);
-        out[ 9] = _mm_packs_epi32(out_09_6, out_09_7);
-        out[25] = _mm_packs_epi32(out_25_6, out_25_7);
-        out[ 7] = _mm_packs_epi32(out_07_6, out_07_7);
-        out[23] = _mm_packs_epi32(out_23_6, out_23_7);
-        out[15] = _mm_packs_epi32(out_15_6, out_15_7);
-        out[31] = _mm_packs_epi32(out_31_6, out_31_7);
+        }
+        // Stage 7
+        {
+          const __m128i out_02_0 = _mm_unpacklo_epi16(step3[8], step3[15]);
+          const __m128i out_02_1 = _mm_unpackhi_epi16(step3[8], step3[15]);
+          const __m128i out_18_0 = _mm_unpacklo_epi16(step3[9], step3[14]);
+          const __m128i out_18_1 = _mm_unpackhi_epi16(step3[9], step3[14]);
+          const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]);
+          const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]);
+          const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]);
+          const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]);
+          const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02);
+          const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02);
+          const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18);
+          const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18);
+          const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10);
+          const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10);
+          const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26);
+          const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26);
+          const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06);
+          const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06);
+          const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22);
+          const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22);
+          const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14);
+          const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14);
+          const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30);
+          const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30);
+          // dct_const_round_shift
+          const __m128i out_02_4 =
+              _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_02_5 =
+              _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_18_4 =
+              _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_18_5 =
+              _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_10_4 =
+              _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_10_5 =
+              _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_26_4 =
+              _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_26_5 =
+              _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_06_4 =
+              _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_06_5 =
+              _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_22_4 =
+              _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_22_5 =
+              _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_14_4 =
+              _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_14_5 =
+              _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_30_4 =
+              _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_30_5 =
+              _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS);
+          const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS);
+          const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS);
+          const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS);
+          const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS);
+          const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS);
+          const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS);
+          const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS);
+          const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS);
+          const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS);
+          const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS);
+          const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS);
+          const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS);
+          const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS);
+          const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS);
+          const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS);
+          // Combine
+          out[2] = _mm_packs_epi32(out_02_6, out_02_7);
+          out[18] = _mm_packs_epi32(out_18_6, out_18_7);
+          out[10] = _mm_packs_epi32(out_10_6, out_10_7);
+          out[26] = _mm_packs_epi32(out_26_6, out_26_7);
+          out[6] = _mm_packs_epi32(out_06_6, out_06_7);
+          out[22] = _mm_packs_epi32(out_22_6, out_22_7);
+          out[14] = _mm_packs_epi32(out_14_6, out_14_7);
+          out[30] = _mm_packs_epi32(out_30_6, out_30_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
-                                           &out[25], &out[7], &out[23],
-                                           &out[15], &out[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow =
+              check_epi16_overflow_x8(&out[2], &out[18], &out[10], &out[26],
+                                      &out[6], &out[22], &out[14], &out[30]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        {
+          step1[16] = ADD_EPI16(step3[17], step2[16]);
+          step1[17] = SUB_EPI16(step2[16], step3[17]);
+          step1[18] = SUB_EPI16(step2[19], step3[18]);
+          step1[19] = ADD_EPI16(step3[18], step2[19]);
+          step1[20] = ADD_EPI16(step3[21], step2[20]);
+          step1[21] = SUB_EPI16(step2[20], step3[21]);
+          step1[22] = SUB_EPI16(step2[23], step3[22]);
+          step1[23] = ADD_EPI16(step3[22], step2[23]);
+          step1[24] = ADD_EPI16(step3[25], step2[24]);
+          step1[25] = SUB_EPI16(step2[24], step3[25]);
+          step1[26] = SUB_EPI16(step2[27], step3[26]);
+          step1[27] = ADD_EPI16(step3[26], step2[27]);
+          step1[28] = ADD_EPI16(step3[29], step2[28]);
+          step1[29] = SUB_EPI16(step2[28], step3[29]);
+          step1[30] = SUB_EPI16(step2[31], step3[30]);
+          step1[31] = ADD_EPI16(step3[30], step2[31]);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x16(
+              &step1[16], &step1[17], &step1[18], &step1[19], &step1[20],
+              &step1[21], &step1[22], &step1[23], &step1[24], &step1[25],
+              &step1[26], &step1[27], &step1[28], &step1[29], &step1[30],
+              &step1[31]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]);
-        const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]);
-        const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]);
-        const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]);
-        const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]);
-        const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]);
-        const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]);
-        const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]);
-        const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05);
-        const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05);
-        const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21);
-        const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21);
-        const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13);
-        const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13);
-        const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29);
-        const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29);
-        const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03);
-        const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03);
-        const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19);
-        const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19);
-        const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11);
-        const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11);
-        const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27);
-        const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27);
-        // dct_const_round_shift
-        const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS);
-        const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS);
-        const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS);
-        const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS);
-        const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS);
-        const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS);
-        const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS);
-        const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS);
-        const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS);
-        const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS);
-        const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS);
-        const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS);
-        const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS);
-        const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS);
-        const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS);
-        const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS);
-        // Combine
-        out[ 5] = _mm_packs_epi32(out_05_6, out_05_7);
-        out[21] = _mm_packs_epi32(out_21_6, out_21_7);
-        out[13] = _mm_packs_epi32(out_13_6, out_13_7);
-        out[29] = _mm_packs_epi32(out_29_6, out_29_7);
-        out[ 3] = _mm_packs_epi32(out_03_6, out_03_7);
-        out[19] = _mm_packs_epi32(out_19_6, out_19_7);
-        out[11] = _mm_packs_epi32(out_11_6, out_11_7);
-        out[27] = _mm_packs_epi32(out_27_6, out_27_7);
+        }
+        // Final stage --- outputs indices are bit-reversed.
+        {
+          const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]);
+          const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]);
+          const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]);
+          const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]);
+          const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]);
+          const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]);
+          const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]);
+          const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]);
+          const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01);
+          const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01);
+          const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17);
+          const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17);
+          const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09);
+          const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09);
+          const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25);
+          const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25);
+          const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07);
+          const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07);
+          const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23);
+          const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23);
+          const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15);
+          const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15);
+          const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31);
+          const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31);
+          // dct_const_round_shift
+          const __m128i out_01_4 =
+              _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_01_5 =
+              _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_17_4 =
+              _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_17_5 =
+              _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_09_4 =
+              _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_09_5 =
+              _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_25_4 =
+              _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_25_5 =
+              _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_07_4 =
+              _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_07_5 =
+              _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_23_4 =
+              _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_23_5 =
+              _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_15_4 =
+              _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_15_5 =
+              _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_31_4 =
+              _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_31_5 =
+              _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS);
+          const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS);
+          const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS);
+          const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS);
+          const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS);
+          const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS);
+          const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS);
+          const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS);
+          const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS);
+          const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS);
+          const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS);
+          const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS);
+          const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS);
+          const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS);
+          const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS);
+          const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS);
+          // Combine
+          out[1] = _mm_packs_epi32(out_01_6, out_01_7);
+          out[17] = _mm_packs_epi32(out_17_6, out_17_7);
+          out[9] = _mm_packs_epi32(out_09_6, out_09_7);
+          out[25] = _mm_packs_epi32(out_25_6, out_25_7);
+          out[7] = _mm_packs_epi32(out_07_6, out_07_7);
+          out[23] = _mm_packs_epi32(out_23_6, out_23_7);
+          out[15] = _mm_packs_epi32(out_15_6, out_15_7);
+          out[31] = _mm_packs_epi32(out_31_6, out_31_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
-                                           &out[29], &out[3], &out[19],
-                                           &out[11], &out[27]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow =
+              check_epi16_overflow_x8(&out[1], &out[17], &out[9], &out[25],
+                                      &out[7], &out[23], &out[15], &out[31]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        {
+          const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]);
+          const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]);
+          const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]);
+          const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]);
+          const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]);
+          const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]);
+          const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]);
+          const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]);
+          const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05);
+          const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05);
+          const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21);
+          const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21);
+          const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13);
+          const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13);
+          const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29);
+          const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29);
+          const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03);
+          const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03);
+          const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19);
+          const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19);
+          const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11);
+          const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11);
+          const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27);
+          const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27);
+          // dct_const_round_shift
+          const __m128i out_05_4 =
+              _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_05_5 =
+              _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_21_4 =
+              _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_21_5 =
+              _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_13_4 =
+              _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_13_5 =
+              _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_29_4 =
+              _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_29_5 =
+              _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_03_4 =
+              _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_03_5 =
+              _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_19_4 =
+              _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_19_5 =
+              _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_11_4 =
+              _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_11_5 =
+              _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_27_4 =
+              _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_27_5 =
+              _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS);
+          const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS);
+          const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS);
+          const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS);
+          const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS);
+          const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS);
+          const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS);
+          const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS);
+          const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS);
+          const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS);
+          const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS);
+          const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS);
+          const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS);
+          const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS);
+          const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS);
+          const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS);
+          // Combine
+          out[5] = _mm_packs_epi32(out_05_6, out_05_7);
+          out[21] = _mm_packs_epi32(out_21_6, out_21_7);
+          out[13] = _mm_packs_epi32(out_13_6, out_13_7);
+          out[29] = _mm_packs_epi32(out_29_6, out_29_7);
+          out[3] = _mm_packs_epi32(out_03_6, out_03_7);
+          out[19] = _mm_packs_epi32(out_19_6, out_19_7);
+          out[11] = _mm_packs_epi32(out_11_6, out_11_7);
+          out[27] = _mm_packs_epi32(out_27_6, out_27_7);
+#if DCT_HIGH_BIT_DEPTH
+          overflow =
+              check_epi16_overflow_x8(&out[5], &out[21], &out[13], &out[29],
+                                      &out[3], &out[19], &out[11], &out[27]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
+        }
 #if FDCT32x32_HIGH_PRECISION
       } else {
         __m128i lstep1[64], lstep2[64], lstep3[64];
@@ -1458,32 +1513,32 @@ void FDCT32x32_2D(const int16_t *input,
         // stage 3
         {
           // expanding to 32-bit length priori to addition operations
-          lstep2[ 0] = _mm_unpacklo_epi16(step2[ 0], kZero);
-          lstep2[ 1] = _mm_unpackhi_epi16(step2[ 0], kZero);
-          lstep2[ 2] = _mm_unpacklo_epi16(step2[ 1], kZero);
-          lstep2[ 3] = _mm_unpackhi_epi16(step2[ 1], kZero);
-          lstep2[ 4] = _mm_unpacklo_epi16(step2[ 2], kZero);
-          lstep2[ 5] = _mm_unpackhi_epi16(step2[ 2], kZero);
-          lstep2[ 6] = _mm_unpacklo_epi16(step2[ 3], kZero);
-          lstep2[ 7] = _mm_unpackhi_epi16(step2[ 3], kZero);
-          lstep2[ 8] = _mm_unpacklo_epi16(step2[ 4], kZero);
-          lstep2[ 9] = _mm_unpackhi_epi16(step2[ 4], kZero);
-          lstep2[10] = _mm_unpacklo_epi16(step2[ 5], kZero);
-          lstep2[11] = _mm_unpackhi_epi16(step2[ 5], kZero);
-          lstep2[12] = _mm_unpacklo_epi16(step2[ 6], kZero);
-          lstep2[13] = _mm_unpackhi_epi16(step2[ 6], kZero);
-          lstep2[14] = _mm_unpacklo_epi16(step2[ 7], kZero);
-          lstep2[15] = _mm_unpackhi_epi16(step2[ 7], kZero);
-          lstep2[ 0] = _mm_madd_epi16(lstep2[ 0], kOne);
-          lstep2[ 1] = _mm_madd_epi16(lstep2[ 1], kOne);
-          lstep2[ 2] = _mm_madd_epi16(lstep2[ 2], kOne);
-          lstep2[ 3] = _mm_madd_epi16(lstep2[ 3], kOne);
-          lstep2[ 4] = _mm_madd_epi16(lstep2[ 4], kOne);
-          lstep2[ 5] = _mm_madd_epi16(lstep2[ 5], kOne);
-          lstep2[ 6] = _mm_madd_epi16(lstep2[ 6], kOne);
-          lstep2[ 7] = _mm_madd_epi16(lstep2[ 7], kOne);
-          lstep2[ 8] = _mm_madd_epi16(lstep2[ 8], kOne);
-          lstep2[ 9] = _mm_madd_epi16(lstep2[ 9], kOne);
+          lstep2[0] = _mm_unpacklo_epi16(step2[0], kZero);
+          lstep2[1] = _mm_unpackhi_epi16(step2[0], kZero);
+          lstep2[2] = _mm_unpacklo_epi16(step2[1], kZero);
+          lstep2[3] = _mm_unpackhi_epi16(step2[1], kZero);
+          lstep2[4] = _mm_unpacklo_epi16(step2[2], kZero);
+          lstep2[5] = _mm_unpackhi_epi16(step2[2], kZero);
+          lstep2[6] = _mm_unpacklo_epi16(step2[3], kZero);
+          lstep2[7] = _mm_unpackhi_epi16(step2[3], kZero);
+          lstep2[8] = _mm_unpacklo_epi16(step2[4], kZero);
+          lstep2[9] = _mm_unpackhi_epi16(step2[4], kZero);
+          lstep2[10] = _mm_unpacklo_epi16(step2[5], kZero);
+          lstep2[11] = _mm_unpackhi_epi16(step2[5], kZero);
+          lstep2[12] = _mm_unpacklo_epi16(step2[6], kZero);
+          lstep2[13] = _mm_unpackhi_epi16(step2[6], kZero);
+          lstep2[14] = _mm_unpacklo_epi16(step2[7], kZero);
+          lstep2[15] = _mm_unpackhi_epi16(step2[7], kZero);
+          lstep2[0] = _mm_madd_epi16(lstep2[0], kOne);
+          lstep2[1] = _mm_madd_epi16(lstep2[1], kOne);
+          lstep2[2] = _mm_madd_epi16(lstep2[2], kOne);
+          lstep2[3] = _mm_madd_epi16(lstep2[3], kOne);
+          lstep2[4] = _mm_madd_epi16(lstep2[4], kOne);
+          lstep2[5] = _mm_madd_epi16(lstep2[5], kOne);
+          lstep2[6] = _mm_madd_epi16(lstep2[6], kOne);
+          lstep2[7] = _mm_madd_epi16(lstep2[7], kOne);
+          lstep2[8] = _mm_madd_epi16(lstep2[8], kOne);
+          lstep2[9] = _mm_madd_epi16(lstep2[9], kOne);
           lstep2[10] = _mm_madd_epi16(lstep2[10], kOne);
           lstep2[11] = _mm_madd_epi16(lstep2[11], kOne);
           lstep2[12] = _mm_madd_epi16(lstep2[12], kOne);
@@ -1491,22 +1546,22 @@ void FDCT32x32_2D(const int16_t *input,
           lstep2[14] = _mm_madd_epi16(lstep2[14], kOne);
           lstep2[15] = _mm_madd_epi16(lstep2[15], kOne);
 
-          lstep3[ 0] = _mm_add_epi32(lstep2[14], lstep2[ 0]);
-          lstep3[ 1] = _mm_add_epi32(lstep2[15], lstep2[ 1]);
-          lstep3[ 2] = _mm_add_epi32(lstep2[12], lstep2[ 2]);
-          lstep3[ 3] = _mm_add_epi32(lstep2[13], lstep2[ 3]);
-          lstep3[ 4] = _mm_add_epi32(lstep2[10], lstep2[ 4]);
-          lstep3[ 5] = _mm_add_epi32(lstep2[11], lstep2[ 5]);
-          lstep3[ 6] = _mm_add_epi32(lstep2[ 8], lstep2[ 6]);
-          lstep3[ 7] = _mm_add_epi32(lstep2[ 9], lstep2[ 7]);
-          lstep3[ 8] = _mm_sub_epi32(lstep2[ 6], lstep2[ 8]);
-          lstep3[ 9] = _mm_sub_epi32(lstep2[ 7], lstep2[ 9]);
-          lstep3[10] = _mm_sub_epi32(lstep2[ 4], lstep2[10]);
-          lstep3[11] = _mm_sub_epi32(lstep2[ 5], lstep2[11]);
-          lstep3[12] = _mm_sub_epi32(lstep2[ 2], lstep2[12]);
-          lstep3[13] = _mm_sub_epi32(lstep2[ 3], lstep2[13]);
-          lstep3[14] = _mm_sub_epi32(lstep2[ 0], lstep2[14]);
-          lstep3[15] = _mm_sub_epi32(lstep2[ 1], lstep2[15]);
+          lstep3[0] = _mm_add_epi32(lstep2[14], lstep2[0]);
+          lstep3[1] = _mm_add_epi32(lstep2[15], lstep2[1]);
+          lstep3[2] = _mm_add_epi32(lstep2[12], lstep2[2]);
+          lstep3[3] = _mm_add_epi32(lstep2[13], lstep2[3]);
+          lstep3[4] = _mm_add_epi32(lstep2[10], lstep2[4]);
+          lstep3[5] = _mm_add_epi32(lstep2[11], lstep2[5]);
+          lstep3[6] = _mm_add_epi32(lstep2[8], lstep2[6]);
+          lstep3[7] = _mm_add_epi32(lstep2[9], lstep2[7]);
+          lstep3[8] = _mm_sub_epi32(lstep2[6], lstep2[8]);
+          lstep3[9] = _mm_sub_epi32(lstep2[7], lstep2[9]);
+          lstep3[10] = _mm_sub_epi32(lstep2[4], lstep2[10]);
+          lstep3[11] = _mm_sub_epi32(lstep2[5], lstep2[11]);
+          lstep3[12] = _mm_sub_epi32(lstep2[2], lstep2[12]);
+          lstep3[13] = _mm_sub_epi32(lstep2[3], lstep2[13]);
+          lstep3[14] = _mm_sub_epi32(lstep2[0], lstep2[14]);
+          lstep3[15] = _mm_sub_epi32(lstep2[1], lstep2[15]);
         }
         {
           const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
@@ -1644,10 +1699,10 @@ void FDCT32x32_2D(const int16_t *input,
         // stage 4
         {
           // expanding to 32-bit length priori to addition operations
-          lstep2[16] = _mm_unpacklo_epi16(step2[ 8], kZero);
-          lstep2[17] = _mm_unpackhi_epi16(step2[ 8], kZero);
-          lstep2[18] = _mm_unpacklo_epi16(step2[ 9], kZero);
-          lstep2[19] = _mm_unpackhi_epi16(step2[ 9], kZero);
+          lstep2[16] = _mm_unpacklo_epi16(step2[8], kZero);
+          lstep2[17] = _mm_unpackhi_epi16(step2[8], kZero);
+          lstep2[18] = _mm_unpacklo_epi16(step2[9], kZero);
+          lstep2[19] = _mm_unpackhi_epi16(step2[9], kZero);
           lstep2[28] = _mm_unpacklo_epi16(step2[14], kZero);
           lstep2[29] = _mm_unpackhi_epi16(step2[14], kZero);
           lstep2[30] = _mm_unpacklo_epi16(step2[15], kZero);
@@ -1661,14 +1716,14 @@ void FDCT32x32_2D(const int16_t *input,
           lstep2[30] = _mm_madd_epi16(lstep2[30], kOne);
           lstep2[31] = _mm_madd_epi16(lstep2[31], kOne);
 
-          lstep1[ 0] = _mm_add_epi32(lstep3[ 6], lstep3[ 0]);
-          lstep1[ 1] = _mm_add_epi32(lstep3[ 7], lstep3[ 1]);
-          lstep1[ 2] = _mm_add_epi32(lstep3[ 4], lstep3[ 2]);
-          lstep1[ 3] = _mm_add_epi32(lstep3[ 5], lstep3[ 3]);
-          lstep1[ 4] = _mm_sub_epi32(lstep3[ 2], lstep3[ 4]);
-          lstep1[ 5] = _mm_sub_epi32(lstep3[ 3], lstep3[ 5]);
-          lstep1[ 6] = _mm_sub_epi32(lstep3[ 0], lstep3[ 6]);
-          lstep1[ 7] = _mm_sub_epi32(lstep3[ 1], lstep3[ 7]);
+          lstep1[0] = _mm_add_epi32(lstep3[6], lstep3[0]);
+          lstep1[1] = _mm_add_epi32(lstep3[7], lstep3[1]);
+          lstep1[2] = _mm_add_epi32(lstep3[4], lstep3[2]);
+          lstep1[3] = _mm_add_epi32(lstep3[5], lstep3[3]);
+          lstep1[4] = _mm_sub_epi32(lstep3[2], lstep3[4]);
+          lstep1[5] = _mm_sub_epi32(lstep3[3], lstep3[5]);
+          lstep1[6] = _mm_sub_epi32(lstep3[0], lstep3[6]);
+          lstep1[7] = _mm_sub_epi32(lstep3[1], lstep3[7]);
           lstep1[16] = _mm_add_epi32(lstep3[22], lstep2[16]);
           lstep1[17] = _mm_add_epi32(lstep3[23], lstep2[17]);
           lstep1[18] = _mm_add_epi32(lstep3[20], lstep2[18]);
@@ -1687,64 +1742,64 @@ void FDCT32x32_2D(const int16_t *input,
           lstep1[31] = _mm_add_epi32(lstep3[25], lstep2[31]);
         }
         {
-        // to be continued...
-        //
-        const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
-        const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
-
-        u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]);
-        u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]);
-        u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]);
-        u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]);
-
-        // TODO(jingning): manually inline k_madd_epi32_ to further hide
-        // instruction latency.
-        v[0] = k_madd_epi32(u[0], k32_p16_m16);
-        v[1] = k_madd_epi32(u[1], k32_p16_m16);
-        v[2] = k_madd_epi32(u[2], k32_p16_m16);
-        v[3] = k_madd_epi32(u[3], k32_p16_m16);
-        v[4] = k_madd_epi32(u[0], k32_p16_p16);
-        v[5] = k_madd_epi32(u[1], k32_p16_p16);
-        v[6] = k_madd_epi32(u[2], k32_p16_p16);
-        v[7] = k_madd_epi32(u[3], k32_p16_p16);
+          // to be continued...
+          //
+          const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
+          const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
+
+          u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]);
+          u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]);
+          u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]);
+          u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]);
+
+          // TODO(jingning): manually inline k_madd_epi32_ to further hide
+          // instruction latency.
+          v[0] = k_madd_epi32(u[0], k32_p16_m16);
+          v[1] = k_madd_epi32(u[1], k32_p16_m16);
+          v[2] = k_madd_epi32(u[2], k32_p16_m16);
+          v[3] = k_madd_epi32(u[3], k32_p16_m16);
+          v[4] = k_madd_epi32(u[0], k32_p16_p16);
+          v[5] = k_madd_epi32(u[1], k32_p16_p16);
+          v[6] = k_madd_epi32(u[2], k32_p16_p16);
+          v[7] = k_madd_epi32(u[3], k32_p16_p16);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3],
-                                            &v[4], &v[5], &v[6], &v[7], &kZero);
-        if (overflow) {
-          HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3], &v[4],
+                                              &v[5], &v[6], &v[7], &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-        u[0] = k_packs_epi64(v[0], v[1]);
-        u[1] = k_packs_epi64(v[2], v[3]);
-        u[2] = k_packs_epi64(v[4], v[5]);
-        u[3] = k_packs_epi64(v[6], v[7]);
-
-        v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-        v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-        v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-        v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-
-        lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
-        lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
-        lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
-        lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+
+          lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
         }
         {
           const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
           const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64);
           const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]);
-          u[ 1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]);
-          u[ 2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]);
-          u[ 3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]);
-          u[ 4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]);
-          u[ 5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]);
-          u[ 6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]);
-          u[ 7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]);
-          u[ 8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]);
-          u[ 9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]);
+          u[0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]);
+          u[1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]);
+          u[2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]);
+          u[3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]);
+          u[4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]);
+          u[5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]);
+          u[6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]);
+          u[7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]);
+          u[8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]);
+          u[9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]);
           u[10] = _mm_unpacklo_epi32(lstep3[41], lstep3[55]);
           u[11] = _mm_unpackhi_epi32(lstep3[41], lstep3[55]);
           u[12] = _mm_unpacklo_epi32(lstep3[42], lstep3[52]);
@@ -1752,16 +1807,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep3[43], lstep3[53]);
           u[15] = _mm_unpackhi_epi32(lstep3[43], lstep3[53]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_m08_p24);
-          v[ 1] = k_madd_epi32(u[ 1], k32_m08_p24);
-          v[ 2] = k_madd_epi32(u[ 2], k32_m08_p24);
-          v[ 3] = k_madd_epi32(u[ 3], k32_m08_p24);
-          v[ 4] = k_madd_epi32(u[ 4], k32_m08_p24);
-          v[ 5] = k_madd_epi32(u[ 5], k32_m08_p24);
-          v[ 6] = k_madd_epi32(u[ 6], k32_m08_p24);
-          v[ 7] = k_madd_epi32(u[ 7], k32_m08_p24);
-          v[ 8] = k_madd_epi32(u[ 8], k32_m24_m08);
-          v[ 9] = k_madd_epi32(u[ 9], k32_m24_m08);
+          v[0] = k_madd_epi32(u[0], k32_m08_p24);
+          v[1] = k_madd_epi32(u[1], k32_m08_p24);
+          v[2] = k_madd_epi32(u[2], k32_m08_p24);
+          v[3] = k_madd_epi32(u[3], k32_m08_p24);
+          v[4] = k_madd_epi32(u[4], k32_m08_p24);
+          v[5] = k_madd_epi32(u[5], k32_m08_p24);
+          v[6] = k_madd_epi32(u[6], k32_m08_p24);
+          v[7] = k_madd_epi32(u[7], k32_m08_p24);
+          v[8] = k_madd_epi32(u[8], k32_m24_m08);
+          v[9] = k_madd_epi32(u[9], k32_m24_m08);
           v[10] = k_madd_epi32(u[10], k32_m24_m08);
           v[11] = k_madd_epi32(u[11], k32_m24_m08);
           v[12] = k_madd_epi32(u[12], k32_m24_m08);
@@ -1772,41 +1827,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m08_p24);
           v[18] = k_madd_epi32(u[14], k32_m08_p24);
           v[19] = k_madd_epi32(u[15], k32_m08_p24);
-          v[20] = k_madd_epi32(u[ 8], k32_m08_p24);
-          v[21] = k_madd_epi32(u[ 9], k32_m08_p24);
+          v[20] = k_madd_epi32(u[8], k32_m08_p24);
+          v[21] = k_madd_epi32(u[9], k32_m08_p24);
           v[22] = k_madd_epi32(u[10], k32_m08_p24);
           v[23] = k_madd_epi32(u[11], k32_m08_p24);
-          v[24] = k_madd_epi32(u[ 4], k32_p24_p08);
-          v[25] = k_madd_epi32(u[ 5], k32_p24_p08);
-          v[26] = k_madd_epi32(u[ 6], k32_p24_p08);
-          v[27] = k_madd_epi32(u[ 7], k32_p24_p08);
-          v[28] = k_madd_epi32(u[ 0], k32_p24_p08);
-          v[29] = k_madd_epi32(u[ 1], k32_p24_p08);
-          v[30] = k_madd_epi32(u[ 2], k32_p24_p08);
-          v[31] = k_madd_epi32(u[ 3], k32_p24_p08);
+          v[24] = k_madd_epi32(u[4], k32_p24_p08);
+          v[25] = k_madd_epi32(u[5], k32_p24_p08);
+          v[26] = k_madd_epi32(u[6], k32_p24_p08);
+          v[27] = k_madd_epi32(u[7], k32_p24_p08);
+          v[28] = k_madd_epi32(u[0], k32_p24_p08);
+          v[29] = k_madd_epi32(u[1], k32_p24_p08);
+          v[30] = k_madd_epi32(u[2], k32_p24_p08);
+          v[31] = k_madd_epi32(u[3], k32_p24_p08);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -1814,16 +1868,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -1831,16 +1885,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          lstep1[36] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep1[37] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep1[38] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep1[39] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep1[40] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep1[41] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep1[42] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep1[43] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep1[52] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep1[53] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep1[36] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          lstep1[37] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          lstep1[38] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          lstep1[39] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          lstep1[40] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          lstep1[41] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          lstep1[42] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          lstep1[43] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          lstep1[52] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          lstep1[53] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           lstep1[54] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           lstep1[55] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           lstep1[56] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -1850,10 +1904,10 @@ void FDCT32x32_2D(const int16_t *input,
         }
         // stage 5
         {
-          lstep2[ 8] = _mm_add_epi32(lstep1[10], lstep3[ 8]);
-          lstep2[ 9] = _mm_add_epi32(lstep1[11], lstep3[ 9]);
-          lstep2[10] = _mm_sub_epi32(lstep3[ 8], lstep1[10]);
-          lstep2[11] = _mm_sub_epi32(lstep3[ 9], lstep1[11]);
+          lstep2[8] = _mm_add_epi32(lstep1[10], lstep3[8]);
+          lstep2[9] = _mm_add_epi32(lstep1[11], lstep3[9]);
+          lstep2[10] = _mm_sub_epi32(lstep3[8], lstep1[10]);
+          lstep2[11] = _mm_sub_epi32(lstep3[9], lstep1[11]);
           lstep2[12] = _mm_sub_epi32(lstep3[14], lstep1[12]);
           lstep2[13] = _mm_sub_epi32(lstep3[15], lstep1[13]);
           lstep2[14] = _mm_add_epi32(lstep1[12], lstep3[14]);
@@ -1876,16 +1930,16 @@ void FDCT32x32_2D(const int16_t *input,
 
           // TODO(jingning): manually inline k_madd_epi32_ to further hide
           // instruction latency.
-          v[ 0] = k_madd_epi32(u[0], k32_p16_p16);
-          v[ 1] = k_madd_epi32(u[1], k32_p16_p16);
-          v[ 2] = k_madd_epi32(u[2], k32_p16_p16);
-          v[ 3] = k_madd_epi32(u[3], k32_p16_p16);
-          v[ 4] = k_madd_epi32(u[0], k32_p16_m16);
-          v[ 5] = k_madd_epi32(u[1], k32_p16_m16);
-          v[ 6] = k_madd_epi32(u[2], k32_p16_m16);
-          v[ 7] = k_madd_epi32(u[3], k32_p16_m16);
-          v[ 8] = k_madd_epi32(u[4], k32_p24_p08);
-          v[ 9] = k_madd_epi32(u[5], k32_p24_p08);
+          v[0] = k_madd_epi32(u[0], k32_p16_p16);
+          v[1] = k_madd_epi32(u[1], k32_p16_p16);
+          v[2] = k_madd_epi32(u[2], k32_p16_p16);
+          v[3] = k_madd_epi32(u[3], k32_p16_p16);
+          v[4] = k_madd_epi32(u[0], k32_p16_m16);
+          v[5] = k_madd_epi32(u[1], k32_p16_m16);
+          v[6] = k_madd_epi32(u[2], k32_p16_m16);
+          v[7] = k_madd_epi32(u[3], k32_p16_m16);
+          v[8] = k_madd_epi32(u[4], k32_p24_p08);
+          v[9] = k_madd_epi32(u[5], k32_p24_p08);
           v[10] = k_madd_epi32(u[6], k32_p24_p08);
           v[11] = k_madd_epi32(u[7], k32_p24_p08);
           v[12] = k_madd_epi32(u[4], k32_m08_p24);
@@ -1895,9 +1949,8 @@ void FDCT32x32_2D(const int16_t *input,
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_16(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -1967,13 +2020,13 @@ void FDCT32x32_2D(const int16_t *input,
           u[7] = _mm_srai_epi32(u[7], 2);
 
           // Combine
-          out[ 0] = _mm_packs_epi32(u[0], u[1]);
+          out[0] = _mm_packs_epi32(u[0], u[1]);
           out[16] = _mm_packs_epi32(u[2], u[3]);
-          out[ 8] = _mm_packs_epi32(u[4], u[5]);
+          out[8] = _mm_packs_epi32(u[4], u[5]);
           out[24] = _mm_packs_epi32(u[6], u[7]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&out[0], &out[16],
-                                             &out[8], &out[24]);
+          overflow =
+              check_epi16_overflow_x4(&out[0], &out[16], &out[8], &out[24]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2002,8 +2055,8 @@ void FDCT32x32_2D(const int16_t *input,
           v[5] = k_madd_epi32(u[5], k32_m24_m08);
           v[6] = k_madd_epi32(u[6], k32_m24_m08);
           v[7] = k_madd_epi32(u[7], k32_m24_m08);
-          v[ 8] = k_madd_epi32(u[4], k32_m08_p24);
-          v[ 9] = k_madd_epi32(u[5], k32_m08_p24);
+          v[8] = k_madd_epi32(u[4], k32_m08_p24);
+          v[9] = k_madd_epi32(u[5], k32_m08_p24);
           v[10] = k_madd_epi32(u[6], k32_m08_p24);
           v[11] = k_madd_epi32(u[7], k32_m08_p24);
           v[12] = k_madd_epi32(u[0], k32_p24_p08);
@@ -2013,9 +2066,8 @@ void FDCT32x32_2D(const int16_t *input,
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_16(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2089,10 +2141,10 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
           const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
 
-          u[0] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[1] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[2] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[3] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+          u[0] = _mm_unpacklo_epi32(lstep2[8], lstep2[14]);
+          u[1] = _mm_unpackhi_epi32(lstep2[8], lstep2[14]);
+          u[2] = _mm_unpacklo_epi32(lstep2[9], lstep2[15]);
+          u[3] = _mm_unpackhi_epi32(lstep2[9], lstep2[15]);
           u[4] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]);
           u[5] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
           u[6] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
@@ -2101,10 +2153,10 @@ void FDCT32x32_2D(const int16_t *input,
           u[9] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
           u[10] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
           u[11] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]);
-          u[12] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[13] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[14] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[15] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+          u[12] = _mm_unpacklo_epi32(lstep2[8], lstep2[14]);
+          u[13] = _mm_unpackhi_epi32(lstep2[8], lstep2[14]);
+          u[14] = _mm_unpacklo_epi32(lstep2[9], lstep2[15]);
+          u[15] = _mm_unpackhi_epi32(lstep2[9], lstep2[15]);
 
           v[0] = k_madd_epi32(u[0], k32_p28_p04);
           v[1] = k_madd_epi32(u[1], k32_p28_p04);
@@ -2114,8 +2166,8 @@ void FDCT32x32_2D(const int16_t *input,
           v[5] = k_madd_epi32(u[5], k32_p12_p20);
           v[6] = k_madd_epi32(u[6], k32_p12_p20);
           v[7] = k_madd_epi32(u[7], k32_p12_p20);
-          v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
+          v[8] = k_madd_epi32(u[8], k32_m20_p12);
+          v[9] = k_madd_epi32(u[9], k32_m20_p12);
           v[10] = k_madd_epi32(u[10], k32_m20_p12);
           v[11] = k_madd_epi32(u[11], k32_m20_p12);
           v[12] = k_madd_epi32(u[12], k32_m04_p28);
@@ -2125,9 +2177,8 @@ void FDCT32x32_2D(const int16_t *input,
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_16(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2196,13 +2247,13 @@ void FDCT32x32_2D(const int16_t *input,
           u[6] = _mm_srai_epi32(u[6], 2);
           u[7] = _mm_srai_epi32(u[7], 2);
 
-          out[ 4] = _mm_packs_epi32(u[0], u[1]);
+          out[4] = _mm_packs_epi32(u[0], u[1]);
           out[20] = _mm_packs_epi32(u[2], u[3]);
           out[12] = _mm_packs_epi32(u[4], u[5]);
           out[28] = _mm_packs_epi32(u[6], u[7]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&out[4], &out[20],
-                                             &out[12], &out[28]);
+          overflow =
+              check_epi16_overflow_x4(&out[4], &out[20], &out[12], &out[28]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2231,21 +2282,21 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
           const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64);
           const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
-          const __m128i k32_m12_m20 = pair_set_epi32(-cospi_12_64,
-                                                     -cospi_20_64);
+          const __m128i k32_m12_m20 =
+              pair_set_epi32(-cospi_12_64, -cospi_20_64);
           const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64);
           const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]);
-          u[ 1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]);
-          u[ 2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]);
-          u[ 3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]);
-          u[ 4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]);
-          u[ 5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]);
-          u[ 6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]);
-          u[ 7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]);
-          u[ 8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]);
-          u[ 9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]);
+          u[0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]);
+          u[1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]);
+          u[2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]);
+          u[3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]);
+          u[4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]);
+          u[5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]);
+          u[6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]);
+          u[7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]);
+          u[8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]);
+          u[9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]);
           u[10] = _mm_unpacklo_epi32(lstep2[43], lstep2[53]);
           u[11] = _mm_unpackhi_epi32(lstep2[43], lstep2[53]);
           u[12] = _mm_unpacklo_epi32(lstep2[44], lstep2[50]);
@@ -2253,16 +2304,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep2[45], lstep2[51]);
           u[15] = _mm_unpackhi_epi32(lstep2[45], lstep2[51]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_m04_p28);
-          v[ 1] = k_madd_epi32(u[ 1], k32_m04_p28);
-          v[ 2] = k_madd_epi32(u[ 2], k32_m04_p28);
-          v[ 3] = k_madd_epi32(u[ 3], k32_m04_p28);
-          v[ 4] = k_madd_epi32(u[ 4], k32_m28_m04);
-          v[ 5] = k_madd_epi32(u[ 5], k32_m28_m04);
-          v[ 6] = k_madd_epi32(u[ 6], k32_m28_m04);
-          v[ 7] = k_madd_epi32(u[ 7], k32_m28_m04);
-          v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
+          v[0] = k_madd_epi32(u[0], k32_m04_p28);
+          v[1] = k_madd_epi32(u[1], k32_m04_p28);
+          v[2] = k_madd_epi32(u[2], k32_m04_p28);
+          v[3] = k_madd_epi32(u[3], k32_m04_p28);
+          v[4] = k_madd_epi32(u[4], k32_m28_m04);
+          v[5] = k_madd_epi32(u[5], k32_m28_m04);
+          v[6] = k_madd_epi32(u[6], k32_m28_m04);
+          v[7] = k_madd_epi32(u[7], k32_m28_m04);
+          v[8] = k_madd_epi32(u[8], k32_m20_p12);
+          v[9] = k_madd_epi32(u[9], k32_m20_p12);
           v[10] = k_madd_epi32(u[10], k32_m20_p12);
           v[11] = k_madd_epi32(u[11], k32_m20_p12);
           v[12] = k_madd_epi32(u[12], k32_m12_m20);
@@ -2273,41 +2324,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m20_p12);
           v[18] = k_madd_epi32(u[14], k32_m20_p12);
           v[19] = k_madd_epi32(u[15], k32_m20_p12);
-          v[20] = k_madd_epi32(u[ 8], k32_p12_p20);
-          v[21] = k_madd_epi32(u[ 9], k32_p12_p20);
+          v[20] = k_madd_epi32(u[8], k32_p12_p20);
+          v[21] = k_madd_epi32(u[9], k32_p12_p20);
           v[22] = k_madd_epi32(u[10], k32_p12_p20);
           v[23] = k_madd_epi32(u[11], k32_p12_p20);
-          v[24] = k_madd_epi32(u[ 4], k32_m04_p28);
-          v[25] = k_madd_epi32(u[ 5], k32_m04_p28);
-          v[26] = k_madd_epi32(u[ 6], k32_m04_p28);
-          v[27] = k_madd_epi32(u[ 7], k32_m04_p28);
-          v[28] = k_madd_epi32(u[ 0], k32_p28_p04);
-          v[29] = k_madd_epi32(u[ 1], k32_p28_p04);
-          v[30] = k_madd_epi32(u[ 2], k32_p28_p04);
-          v[31] = k_madd_epi32(u[ 3], k32_p28_p04);
+          v[24] = k_madd_epi32(u[4], k32_m04_p28);
+          v[25] = k_madd_epi32(u[5], k32_m04_p28);
+          v[26] = k_madd_epi32(u[6], k32_m04_p28);
+          v[27] = k_madd_epi32(u[7], k32_m04_p28);
+          v[28] = k_madd_epi32(u[0], k32_p28_p04);
+          v[29] = k_madd_epi32(u[1], k32_p28_p04);
+          v[30] = k_madd_epi32(u[2], k32_p28_p04);
+          v[31] = k_madd_epi32(u[3], k32_p28_p04);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -2315,16 +2365,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2332,16 +2382,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          lstep3[34] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep3[35] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep3[36] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep3[37] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep3[42] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep3[43] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep3[44] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep3[45] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep3[50] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep3[51] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep3[34] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          lstep3[35] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          lstep3[36] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          lstep3[37] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          lstep3[42] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          lstep3[43] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          lstep3[44] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          lstep3[45] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          lstep3[50] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          lstep3[51] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           lstep3[52] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           lstep3[53] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           lstep3[58] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2354,22 +2404,22 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_p30_p02 = pair_set_epi32(cospi_30_64, cospi_2_64);
           const __m128i k32_p14_p18 = pair_set_epi32(cospi_14_64, cospi_18_64);
           const __m128i k32_p22_p10 = pair_set_epi32(cospi_22_64, cospi_10_64);
-          const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64,  cospi_26_64);
+          const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64, cospi_26_64);
           const __m128i k32_m26_p06 = pair_set_epi32(-cospi_26_64, cospi_6_64);
           const __m128i k32_m10_p22 = pair_set_epi32(-cospi_10_64, cospi_22_64);
           const __m128i k32_m18_p14 = pair_set_epi32(-cospi_18_64, cospi_14_64);
           const __m128i k32_m02_p30 = pair_set_epi32(-cospi_2_64, cospi_30_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]);
-          u[ 1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]);
-          u[ 2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]);
-          u[ 3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]);
-          u[ 4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]);
-          u[ 5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]);
-          u[ 6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]);
-          u[ 7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]);
-          u[ 8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]);
-          u[ 9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]);
+          u[0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]);
+          u[1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]);
+          u[2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]);
+          u[3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]);
+          u[4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]);
+          u[5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]);
+          u[6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]);
+          u[7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]);
+          u[8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]);
+          u[9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]);
           u[10] = _mm_unpacklo_epi32(lstep3[21], lstep3[27]);
           u[11] = _mm_unpackhi_epi32(lstep3[21], lstep3[27]);
           u[12] = _mm_unpacklo_epi32(lstep3[22], lstep3[24]);
@@ -2377,16 +2427,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep3[23], lstep3[25]);
           u[15] = _mm_unpackhi_epi32(lstep3[23], lstep3[25]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_p30_p02);
-          v[ 1] = k_madd_epi32(u[ 1], k32_p30_p02);
-          v[ 2] = k_madd_epi32(u[ 2], k32_p30_p02);
-          v[ 3] = k_madd_epi32(u[ 3], k32_p30_p02);
-          v[ 4] = k_madd_epi32(u[ 4], k32_p14_p18);
-          v[ 5] = k_madd_epi32(u[ 5], k32_p14_p18);
-          v[ 6] = k_madd_epi32(u[ 6], k32_p14_p18);
-          v[ 7] = k_madd_epi32(u[ 7], k32_p14_p18);
-          v[ 8] = k_madd_epi32(u[ 8], k32_p22_p10);
-          v[ 9] = k_madd_epi32(u[ 9], k32_p22_p10);
+          v[0] = k_madd_epi32(u[0], k32_p30_p02);
+          v[1] = k_madd_epi32(u[1], k32_p30_p02);
+          v[2] = k_madd_epi32(u[2], k32_p30_p02);
+          v[3] = k_madd_epi32(u[3], k32_p30_p02);
+          v[4] = k_madd_epi32(u[4], k32_p14_p18);
+          v[5] = k_madd_epi32(u[5], k32_p14_p18);
+          v[6] = k_madd_epi32(u[6], k32_p14_p18);
+          v[7] = k_madd_epi32(u[7], k32_p14_p18);
+          v[8] = k_madd_epi32(u[8], k32_p22_p10);
+          v[9] = k_madd_epi32(u[9], k32_p22_p10);
           v[10] = k_madd_epi32(u[10], k32_p22_p10);
           v[11] = k_madd_epi32(u[11], k32_p22_p10);
           v[12] = k_madd_epi32(u[12], k32_p06_p26);
@@ -2397,41 +2447,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m26_p06);
           v[18] = k_madd_epi32(u[14], k32_m26_p06);
           v[19] = k_madd_epi32(u[15], k32_m26_p06);
-          v[20] = k_madd_epi32(u[ 8], k32_m10_p22);
-          v[21] = k_madd_epi32(u[ 9], k32_m10_p22);
+          v[20] = k_madd_epi32(u[8], k32_m10_p22);
+          v[21] = k_madd_epi32(u[9], k32_m10_p22);
           v[22] = k_madd_epi32(u[10], k32_m10_p22);
           v[23] = k_madd_epi32(u[11], k32_m10_p22);
-          v[24] = k_madd_epi32(u[ 4], k32_m18_p14);
-          v[25] = k_madd_epi32(u[ 5], k32_m18_p14);
-          v[26] = k_madd_epi32(u[ 6], k32_m18_p14);
-          v[27] = k_madd_epi32(u[ 7], k32_m18_p14);
-          v[28] = k_madd_epi32(u[ 0], k32_m02_p30);
-          v[29] = k_madd_epi32(u[ 1], k32_m02_p30);
-          v[30] = k_madd_epi32(u[ 2], k32_m02_p30);
-          v[31] = k_madd_epi32(u[ 3], k32_m02_p30);
+          v[24] = k_madd_epi32(u[4], k32_m18_p14);
+          v[25] = k_madd_epi32(u[5], k32_m18_p14);
+          v[26] = k_madd_epi32(u[6], k32_m18_p14);
+          v[27] = k_madd_epi32(u[7], k32_m18_p14);
+          v[28] = k_madd_epi32(u[0], k32_m02_p30);
+          v[29] = k_madd_epi32(u[1], k32_m02_p30);
+          v[30] = k_madd_epi32(u[2], k32_m02_p30);
+          v[31] = k_madd_epi32(u[3], k32_m02_p30);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -2439,16 +2488,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2456,16 +2505,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2473,16 +2522,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
           u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
 
-          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
-          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
-          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
-          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
-          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
-          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
-          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
-          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
-          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
-          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+          v[0] = _mm_cmplt_epi32(u[0], kZero);
+          v[1] = _mm_cmplt_epi32(u[1], kZero);
+          v[2] = _mm_cmplt_epi32(u[2], kZero);
+          v[3] = _mm_cmplt_epi32(u[3], kZero);
+          v[4] = _mm_cmplt_epi32(u[4], kZero);
+          v[5] = _mm_cmplt_epi32(u[5], kZero);
+          v[6] = _mm_cmplt_epi32(u[6], kZero);
+          v[7] = _mm_cmplt_epi32(u[7], kZero);
+          v[8] = _mm_cmplt_epi32(u[8], kZero);
+          v[9] = _mm_cmplt_epi32(u[9], kZero);
           v[10] = _mm_cmplt_epi32(u[10], kZero);
           v[11] = _mm_cmplt_epi32(u[11], kZero);
           v[12] = _mm_cmplt_epi32(u[12], kZero);
@@ -2490,16 +2539,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_cmplt_epi32(u[14], kZero);
           v[15] = _mm_cmplt_epi32(u[15], kZero);
 
-          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+          u[0] = _mm_sub_epi32(u[0], v[0]);
+          u[1] = _mm_sub_epi32(u[1], v[1]);
+          u[2] = _mm_sub_epi32(u[2], v[2]);
+          u[3] = _mm_sub_epi32(u[3], v[3]);
+          u[4] = _mm_sub_epi32(u[4], v[4]);
+          u[5] = _mm_sub_epi32(u[5], v[5]);
+          u[6] = _mm_sub_epi32(u[6], v[6]);
+          u[7] = _mm_sub_epi32(u[7], v[7]);
+          u[8] = _mm_sub_epi32(u[8], v[8]);
+          u[9] = _mm_sub_epi32(u[9], v[9]);
           u[10] = _mm_sub_epi32(u[10], v[10]);
           u[11] = _mm_sub_epi32(u[11], v[11]);
           u[12] = _mm_sub_epi32(u[12], v[12]);
@@ -2507,16 +2556,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_sub_epi32(u[14], v[14]);
           u[15] = _mm_sub_epi32(u[15], v[15]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], K32One);
-          v[ 1] = _mm_add_epi32(u[ 1], K32One);
-          v[ 2] = _mm_add_epi32(u[ 2], K32One);
-          v[ 3] = _mm_add_epi32(u[ 3], K32One);
-          v[ 4] = _mm_add_epi32(u[ 4], K32One);
-          v[ 5] = _mm_add_epi32(u[ 5], K32One);
-          v[ 6] = _mm_add_epi32(u[ 6], K32One);
-          v[ 7] = _mm_add_epi32(u[ 7], K32One);
-          v[ 8] = _mm_add_epi32(u[ 8], K32One);
-          v[ 9] = _mm_add_epi32(u[ 9], K32One);
+          v[0] = _mm_add_epi32(u[0], K32One);
+          v[1] = _mm_add_epi32(u[1], K32One);
+          v[2] = _mm_add_epi32(u[2], K32One);
+          v[3] = _mm_add_epi32(u[3], K32One);
+          v[4] = _mm_add_epi32(u[4], K32One);
+          v[5] = _mm_add_epi32(u[5], K32One);
+          v[6] = _mm_add_epi32(u[6], K32One);
+          v[7] = _mm_add_epi32(u[7], K32One);
+          v[8] = _mm_add_epi32(u[8], K32One);
+          v[9] = _mm_add_epi32(u[9], K32One);
           v[10] = _mm_add_epi32(u[10], K32One);
           v[11] = _mm_add_epi32(u[11], K32One);
           v[12] = _mm_add_epi32(u[12], K32One);
@@ -2524,16 +2573,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], K32One);
           v[15] = _mm_add_epi32(u[15], K32One);
 
-          u[ 0] = _mm_srai_epi32(v[ 0], 2);
-          u[ 1] = _mm_srai_epi32(v[ 1], 2);
-          u[ 2] = _mm_srai_epi32(v[ 2], 2);
-          u[ 3] = _mm_srai_epi32(v[ 3], 2);
-          u[ 4] = _mm_srai_epi32(v[ 4], 2);
-          u[ 5] = _mm_srai_epi32(v[ 5], 2);
-          u[ 6] = _mm_srai_epi32(v[ 6], 2);
-          u[ 7] = _mm_srai_epi32(v[ 7], 2);
-          u[ 8] = _mm_srai_epi32(v[ 8], 2);
-          u[ 9] = _mm_srai_epi32(v[ 9], 2);
+          u[0] = _mm_srai_epi32(v[0], 2);
+          u[1] = _mm_srai_epi32(v[1], 2);
+          u[2] = _mm_srai_epi32(v[2], 2);
+          u[3] = _mm_srai_epi32(v[3], 2);
+          u[4] = _mm_srai_epi32(v[4], 2);
+          u[5] = _mm_srai_epi32(v[5], 2);
+          u[6] = _mm_srai_epi32(v[6], 2);
+          u[7] = _mm_srai_epi32(v[7], 2);
+          u[8] = _mm_srai_epi32(v[8], 2);
+          u[9] = _mm_srai_epi32(v[9], 2);
           u[10] = _mm_srai_epi32(v[10], 2);
           u[11] = _mm_srai_epi32(v[11], 2);
           u[12] = _mm_srai_epi32(v[12], 2);
@@ -2541,18 +2590,18 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], 2);
           u[15] = _mm_srai_epi32(v[15], 2);
 
-          out[ 2] = _mm_packs_epi32(u[0], u[1]);
+          out[2] = _mm_packs_epi32(u[0], u[1]);
           out[18] = _mm_packs_epi32(u[2], u[3]);
           out[10] = _mm_packs_epi32(u[4], u[5]);
           out[26] = _mm_packs_epi32(u[6], u[7]);
-          out[ 6] = _mm_packs_epi32(u[8], u[9]);
+          out[6] = _mm_packs_epi32(u[8], u[9]);
           out[22] = _mm_packs_epi32(u[10], u[11]);
           out[14] = _mm_packs_epi32(u[12], u[13]);
           out[30] = _mm_packs_epi32(u[14], u[15]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
-                                             &out[26], &out[6], &out[22],
-                                             &out[14], &out[30]);
+          overflow =
+              check_epi16_overflow_x8(&out[2], &out[18], &out[10], &out[26],
+                                      &out[6], &out[22], &out[14], &out[30]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2604,16 +2653,16 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_m17_p15 = pair_set_epi32(-cospi_17_64, cospi_15_64);
           const __m128i k32_m01_p31 = pair_set_epi32(-cospi_1_64, cospi_31_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]);
-          u[ 1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]);
-          u[ 2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]);
-          u[ 3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]);
-          u[ 4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]);
-          u[ 5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]);
-          u[ 6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]);
-          u[ 7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]);
-          u[ 8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]);
-          u[ 9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]);
+          u[0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]);
+          u[1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]);
+          u[2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]);
+          u[3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]);
+          u[4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]);
+          u[5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]);
+          u[6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]);
+          u[7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]);
+          u[8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]);
+          u[9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]);
           u[10] = _mm_unpacklo_epi32(lstep1[37], lstep1[59]);
           u[11] = _mm_unpackhi_epi32(lstep1[37], lstep1[59]);
           u[12] = _mm_unpacklo_epi32(lstep1[38], lstep1[56]);
@@ -2621,16 +2670,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep1[39], lstep1[57]);
           u[15] = _mm_unpackhi_epi32(lstep1[39], lstep1[57]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_p31_p01);
-          v[ 1] = k_madd_epi32(u[ 1], k32_p31_p01);
-          v[ 2] = k_madd_epi32(u[ 2], k32_p31_p01);
-          v[ 3] = k_madd_epi32(u[ 3], k32_p31_p01);
-          v[ 4] = k_madd_epi32(u[ 4], k32_p15_p17);
-          v[ 5] = k_madd_epi32(u[ 5], k32_p15_p17);
-          v[ 6] = k_madd_epi32(u[ 6], k32_p15_p17);
-          v[ 7] = k_madd_epi32(u[ 7], k32_p15_p17);
-          v[ 8] = k_madd_epi32(u[ 8], k32_p23_p09);
-          v[ 9] = k_madd_epi32(u[ 9], k32_p23_p09);
+          v[0] = k_madd_epi32(u[0], k32_p31_p01);
+          v[1] = k_madd_epi32(u[1], k32_p31_p01);
+          v[2] = k_madd_epi32(u[2], k32_p31_p01);
+          v[3] = k_madd_epi32(u[3], k32_p31_p01);
+          v[4] = k_madd_epi32(u[4], k32_p15_p17);
+          v[5] = k_madd_epi32(u[5], k32_p15_p17);
+          v[6] = k_madd_epi32(u[6], k32_p15_p17);
+          v[7] = k_madd_epi32(u[7], k32_p15_p17);
+          v[8] = k_madd_epi32(u[8], k32_p23_p09);
+          v[9] = k_madd_epi32(u[9], k32_p23_p09);
           v[10] = k_madd_epi32(u[10], k32_p23_p09);
           v[11] = k_madd_epi32(u[11], k32_p23_p09);
           v[12] = k_madd_epi32(u[12], k32_p07_p25);
@@ -2641,41 +2690,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m25_p07);
           v[18] = k_madd_epi32(u[14], k32_m25_p07);
           v[19] = k_madd_epi32(u[15], k32_m25_p07);
-          v[20] = k_madd_epi32(u[ 8], k32_m09_p23);
-          v[21] = k_madd_epi32(u[ 9], k32_m09_p23);
+          v[20] = k_madd_epi32(u[8], k32_m09_p23);
+          v[21] = k_madd_epi32(u[9], k32_m09_p23);
           v[22] = k_madd_epi32(u[10], k32_m09_p23);
           v[23] = k_madd_epi32(u[11], k32_m09_p23);
-          v[24] = k_madd_epi32(u[ 4], k32_m17_p15);
-          v[25] = k_madd_epi32(u[ 5], k32_m17_p15);
-          v[26] = k_madd_epi32(u[ 6], k32_m17_p15);
-          v[27] = k_madd_epi32(u[ 7], k32_m17_p15);
-          v[28] = k_madd_epi32(u[ 0], k32_m01_p31);
-          v[29] = k_madd_epi32(u[ 1], k32_m01_p31);
-          v[30] = k_madd_epi32(u[ 2], k32_m01_p31);
-          v[31] = k_madd_epi32(u[ 3], k32_m01_p31);
+          v[24] = k_madd_epi32(u[4], k32_m17_p15);
+          v[25] = k_madd_epi32(u[5], k32_m17_p15);
+          v[26] = k_madd_epi32(u[6], k32_m17_p15);
+          v[27] = k_madd_epi32(u[7], k32_m17_p15);
+          v[28] = k_madd_epi32(u[0], k32_m01_p31);
+          v[29] = k_madd_epi32(u[1], k32_m01_p31);
+          v[30] = k_madd_epi32(u[2], k32_m01_p31);
+          v[31] = k_madd_epi32(u[3], k32_m01_p31);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -2683,16 +2731,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2700,16 +2748,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2717,16 +2765,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
           u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
 
-          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
-          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
-          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
-          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
-          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
-          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
-          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
-          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
-          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
-          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+          v[0] = _mm_cmplt_epi32(u[0], kZero);
+          v[1] = _mm_cmplt_epi32(u[1], kZero);
+          v[2] = _mm_cmplt_epi32(u[2], kZero);
+          v[3] = _mm_cmplt_epi32(u[3], kZero);
+          v[4] = _mm_cmplt_epi32(u[4], kZero);
+          v[5] = _mm_cmplt_epi32(u[5], kZero);
+          v[6] = _mm_cmplt_epi32(u[6], kZero);
+          v[7] = _mm_cmplt_epi32(u[7], kZero);
+          v[8] = _mm_cmplt_epi32(u[8], kZero);
+          v[9] = _mm_cmplt_epi32(u[9], kZero);
           v[10] = _mm_cmplt_epi32(u[10], kZero);
           v[11] = _mm_cmplt_epi32(u[11], kZero);
           v[12] = _mm_cmplt_epi32(u[12], kZero);
@@ -2734,16 +2782,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_cmplt_epi32(u[14], kZero);
           v[15] = _mm_cmplt_epi32(u[15], kZero);
 
-          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+          u[0] = _mm_sub_epi32(u[0], v[0]);
+          u[1] = _mm_sub_epi32(u[1], v[1]);
+          u[2] = _mm_sub_epi32(u[2], v[2]);
+          u[3] = _mm_sub_epi32(u[3], v[3]);
+          u[4] = _mm_sub_epi32(u[4], v[4]);
+          u[5] = _mm_sub_epi32(u[5], v[5]);
+          u[6] = _mm_sub_epi32(u[6], v[6]);
+          u[7] = _mm_sub_epi32(u[7], v[7]);
+          u[8] = _mm_sub_epi32(u[8], v[8]);
+          u[9] = _mm_sub_epi32(u[9], v[9]);
           u[10] = _mm_sub_epi32(u[10], v[10]);
           u[11] = _mm_sub_epi32(u[11], v[11]);
           u[12] = _mm_sub_epi32(u[12], v[12]);
@@ -2785,18 +2833,18 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], 2);
           u[15] = _mm_srai_epi32(v[15], 2);
 
-          out[ 1] = _mm_packs_epi32(u[0], u[1]);
+          out[1] = _mm_packs_epi32(u[0], u[1]);
           out[17] = _mm_packs_epi32(u[2], u[3]);
-          out[ 9] = _mm_packs_epi32(u[4], u[5]);
+          out[9] = _mm_packs_epi32(u[4], u[5]);
           out[25] = _mm_packs_epi32(u[6], u[7]);
-          out[ 7] = _mm_packs_epi32(u[8], u[9]);
+          out[7] = _mm_packs_epi32(u[8], u[9]);
           out[23] = _mm_packs_epi32(u[10], u[11]);
           out[15] = _mm_packs_epi32(u[12], u[13]);
           out[31] = _mm_packs_epi32(u[14], u[15]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
-                                             &out[25], &out[7], &out[23],
-                                             &out[15], &out[31]);
+          overflow =
+              check_epi16_overflow_x8(&out[1], &out[17], &out[9], &out[25],
+                                      &out[7], &out[23], &out[15], &out[31]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2813,16 +2861,16 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_m21_p11 = pair_set_epi32(-cospi_21_64, cospi_11_64);
           const __m128i k32_m05_p27 = pair_set_epi32(-cospi_5_64, cospi_27_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]);
-          u[ 1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]);
-          u[ 2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]);
-          u[ 3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]);
-          u[ 4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]);
-          u[ 5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]);
-          u[ 6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]);
-          u[ 7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]);
-          u[ 8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]);
-          u[ 9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]);
+          u[0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]);
+          u[1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]);
+          u[2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]);
+          u[3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]);
+          u[4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]);
+          u[5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]);
+          u[6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]);
+          u[7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]);
+          u[8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]);
+          u[9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]);
           u[10] = _mm_unpacklo_epi32(lstep1[45], lstep1[51]);
           u[11] = _mm_unpackhi_epi32(lstep1[45], lstep1[51]);
           u[12] = _mm_unpacklo_epi32(lstep1[46], lstep1[48]);
@@ -2830,16 +2878,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep1[47], lstep1[49]);
           u[15] = _mm_unpackhi_epi32(lstep1[47], lstep1[49]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_p27_p05);
-          v[ 1] = k_madd_epi32(u[ 1], k32_p27_p05);
-          v[ 2] = k_madd_epi32(u[ 2], k32_p27_p05);
-          v[ 3] = k_madd_epi32(u[ 3], k32_p27_p05);
-          v[ 4] = k_madd_epi32(u[ 4], k32_p11_p21);
-          v[ 5] = k_madd_epi32(u[ 5], k32_p11_p21);
-          v[ 6] = k_madd_epi32(u[ 6], k32_p11_p21);
-          v[ 7] = k_madd_epi32(u[ 7], k32_p11_p21);
-          v[ 8] = k_madd_epi32(u[ 8], k32_p19_p13);
-          v[ 9] = k_madd_epi32(u[ 9], k32_p19_p13);
+          v[0] = k_madd_epi32(u[0], k32_p27_p05);
+          v[1] = k_madd_epi32(u[1], k32_p27_p05);
+          v[2] = k_madd_epi32(u[2], k32_p27_p05);
+          v[3] = k_madd_epi32(u[3], k32_p27_p05);
+          v[4] = k_madd_epi32(u[4], k32_p11_p21);
+          v[5] = k_madd_epi32(u[5], k32_p11_p21);
+          v[6] = k_madd_epi32(u[6], k32_p11_p21);
+          v[7] = k_madd_epi32(u[7], k32_p11_p21);
+          v[8] = k_madd_epi32(u[8], k32_p19_p13);
+          v[9] = k_madd_epi32(u[9], k32_p19_p13);
           v[10] = k_madd_epi32(u[10], k32_p19_p13);
           v[11] = k_madd_epi32(u[11], k32_p19_p13);
           v[12] = k_madd_epi32(u[12], k32_p03_p29);
@@ -2850,41 +2898,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m29_p03);
           v[18] = k_madd_epi32(u[14], k32_m29_p03);
           v[19] = k_madd_epi32(u[15], k32_m29_p03);
-          v[20] = k_madd_epi32(u[ 8], k32_m13_p19);
-          v[21] = k_madd_epi32(u[ 9], k32_m13_p19);
+          v[20] = k_madd_epi32(u[8], k32_m13_p19);
+          v[21] = k_madd_epi32(u[9], k32_m13_p19);
           v[22] = k_madd_epi32(u[10], k32_m13_p19);
           v[23] = k_madd_epi32(u[11], k32_m13_p19);
-          v[24] = k_madd_epi32(u[ 4], k32_m21_p11);
-          v[25] = k_madd_epi32(u[ 5], k32_m21_p11);
-          v[26] = k_madd_epi32(u[ 6], k32_m21_p11);
-          v[27] = k_madd_epi32(u[ 7], k32_m21_p11);
-          v[28] = k_madd_epi32(u[ 0], k32_m05_p27);
-          v[29] = k_madd_epi32(u[ 1], k32_m05_p27);
-          v[30] = k_madd_epi32(u[ 2], k32_m05_p27);
-          v[31] = k_madd_epi32(u[ 3], k32_m05_p27);
+          v[24] = k_madd_epi32(u[4], k32_m21_p11);
+          v[25] = k_madd_epi32(u[5], k32_m21_p11);
+          v[26] = k_madd_epi32(u[6], k32_m21_p11);
+          v[27] = k_madd_epi32(u[7], k32_m21_p11);
+          v[28] = k_madd_epi32(u[0], k32_m05_p27);
+          v[29] = k_madd_epi32(u[1], k32_m05_p27);
+          v[30] = k_madd_epi32(u[2], k32_m05_p27);
+          v[31] = k_madd_epi32(u[3], k32_m05_p27);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -2892,16 +2939,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2909,16 +2956,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2926,16 +2973,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
           u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
 
-          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
-          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
-          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
-          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
-          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
-          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
-          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
-          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
-          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
-          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+          v[0] = _mm_cmplt_epi32(u[0], kZero);
+          v[1] = _mm_cmplt_epi32(u[1], kZero);
+          v[2] = _mm_cmplt_epi32(u[2], kZero);
+          v[3] = _mm_cmplt_epi32(u[3], kZero);
+          v[4] = _mm_cmplt_epi32(u[4], kZero);
+          v[5] = _mm_cmplt_epi32(u[5], kZero);
+          v[6] = _mm_cmplt_epi32(u[6], kZero);
+          v[7] = _mm_cmplt_epi32(u[7], kZero);
+          v[8] = _mm_cmplt_epi32(u[8], kZero);
+          v[9] = _mm_cmplt_epi32(u[9], kZero);
           v[10] = _mm_cmplt_epi32(u[10], kZero);
           v[11] = _mm_cmplt_epi32(u[11], kZero);
           v[12] = _mm_cmplt_epi32(u[12], kZero);
@@ -2943,16 +2990,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_cmplt_epi32(u[14], kZero);
           v[15] = _mm_cmplt_epi32(u[15], kZero);
 
-          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+          u[0] = _mm_sub_epi32(u[0], v[0]);
+          u[1] = _mm_sub_epi32(u[1], v[1]);
+          u[2] = _mm_sub_epi32(u[2], v[2]);
+          u[3] = _mm_sub_epi32(u[3], v[3]);
+          u[4] = _mm_sub_epi32(u[4], v[4]);
+          u[5] = _mm_sub_epi32(u[5], v[5]);
+          u[6] = _mm_sub_epi32(u[6], v[6]);
+          u[7] = _mm_sub_epi32(u[7], v[7]);
+          u[8] = _mm_sub_epi32(u[8], v[8]);
+          u[9] = _mm_sub_epi32(u[9], v[9]);
           u[10] = _mm_sub_epi32(u[10], v[10]);
           u[11] = _mm_sub_epi32(u[11], v[11]);
           u[12] = _mm_sub_epi32(u[12], v[12]);
@@ -2994,18 +3041,18 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], 2);
           u[15] = _mm_srai_epi32(v[15], 2);
 
-          out[ 5] = _mm_packs_epi32(u[0], u[1]);
+          out[5] = _mm_packs_epi32(u[0], u[1]);
           out[21] = _mm_packs_epi32(u[2], u[3]);
           out[13] = _mm_packs_epi32(u[4], u[5]);
           out[29] = _mm_packs_epi32(u[6], u[7]);
-          out[ 3] = _mm_packs_epi32(u[8], u[9]);
+          out[3] = _mm_packs_epi32(u[8], u[9]);
           out[19] = _mm_packs_epi32(u[10], u[11]);
           out[11] = _mm_packs_epi32(u[12], u[13]);
           out[27] = _mm_packs_epi32(u[14], u[15]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
-                                             &out[29], &out[3], &out[19],
-                                             &out[11], &out[27]);
+          overflow =
+              check_epi16_overflow_x8(&out[5], &out[21], &out[13], &out[29],
+                                      &out[3], &out[19], &out[11], &out[27]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
diff --git a/vp10/common/x86/vp10_fwd_txfm_impl_sse2.h b/vp10/common/x86/vp10_fwd_txfm_impl_sse2.h
index 69889e2e98cdf6d10788db303f3266fbf10a3718..b3bcc4a99488984a4dd61d0f13ce20fea68c5f33 100644
--- a/vp10/common/x86/vp10_fwd_txfm_impl_sse2.h
+++ b/vp10/common/x86/vp10_fwd_txfm_impl_sse2.h
@@ -43,44 +43,36 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
   // These are the coefficients used for the multiplies.
   // In the comments, pN means cos(N pi /64) and mN is -cos(N pi /64),
   // where cospi_N_64 = cos(N pi /64)
-  const __m128i k__cospi_A = octa_set_epi16(cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64,
-                                            cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64);
-  const __m128i k__cospi_B = octa_set_epi16(cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64);
-  const __m128i k__cospi_C = octa_set_epi16(cospi_8_64, cospi_24_64,
-                                            cospi_8_64, cospi_24_64,
-                                            cospi_24_64, -cospi_8_64,
-                                            cospi_24_64, -cospi_8_64);
-  const __m128i k__cospi_D = octa_set_epi16(cospi_24_64, -cospi_8_64,
-                                            cospi_24_64, -cospi_8_64,
-                                            cospi_8_64, cospi_24_64,
-                                            cospi_8_64, cospi_24_64);
-  const __m128i k__cospi_E = octa_set_epi16(cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64);
-  const __m128i k__cospi_F = octa_set_epi16(cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64);
-  const __m128i k__cospi_G = octa_set_epi16(cospi_8_64, cospi_24_64,
-                                            cospi_8_64, cospi_24_64,
-                                            -cospi_8_64, -cospi_24_64,
-                                            -cospi_8_64, -cospi_24_64);
-  const __m128i k__cospi_H = octa_set_epi16(cospi_24_64, -cospi_8_64,
-                                            cospi_24_64, -cospi_8_64,
-                                            -cospi_24_64, cospi_8_64,
-                                            -cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_A =
+      octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64,
+                     cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_B =
+      octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64,
+                     cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64);
+  const __m128i k__cospi_C =
+      octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64,
+                     cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64);
+  const __m128i k__cospi_D =
+      octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64,
+                     cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_E =
+      octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64,
+                     cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64);
+  const __m128i k__cospi_F =
+      octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64,
+                     cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_G =
+      octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64,
+                     -cospi_8_64, -cospi_24_64, -cospi_8_64, -cospi_24_64);
+  const __m128i k__cospi_H =
+      octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64,
+                     -cospi_24_64, cospi_8_64, -cospi_24_64, cospi_8_64);
 
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   // This second rounding constant saves doing some extra adds at the end
-  const __m128i k__DCT_CONST_ROUNDING2 = _mm_set1_epi32(DCT_CONST_ROUNDING
-                                               +(DCT_CONST_ROUNDING << 1));
-  const int DCT_CONST_BITS2 =  DCT_CONST_BITS + 2;
+  const __m128i k__DCT_CONST_ROUNDING2 =
+      _mm_set1_epi32(DCT_CONST_ROUNDING + (DCT_CONST_ROUNDING << 1));
+  const int DCT_CONST_BITS2 = DCT_CONST_BITS + 2;
   const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
   const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
   __m128i in0, in1;
@@ -90,14 +82,14 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
 #endif
 
   // Load inputs.
-  in0  = _mm_loadl_epi64((const __m128i *)(input +  0 * stride));
-  in1  = _mm_loadl_epi64((const __m128i *)(input +  1 * stride));
-  in1  = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *)
-                                                 (input +  2 * stride)));
-  in0  = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *)
-                                                 (input +  3 * stride)));
-  // in0 = [i0 i1 i2 i3 iC iD iE iF]
-  // in1 = [i4 i5 i6 i7 i8 i9 iA iB]
+  in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+  in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+  in1 = _mm_unpacklo_epi64(
+      in1, _mm_loadl_epi64((const __m128i *)(input + 2 * stride)));
+  in0 = _mm_unpacklo_epi64(
+      in0, _mm_loadl_epi64((const __m128i *)(input + 3 * stride)));
+// in0 = [i0 i1 i2 i3 iC iD iE iF]
+// in1 = [i4 i5 i6 i7 i8 i9 iA iB]
 #if DCT_HIGH_BIT_DEPTH
   // Check inputs small enough to use optimised code
   cmp0 = _mm_xor_si128(_mm_cmpgt_epi16(in0, _mm_set1_epi16(0x3ff)),
@@ -192,10 +184,10 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
     // vertical DCTs finished. Now we do the horizontal DCTs.
     // Stage 3: Add/subtract
 
-    const __m128i t0 = ADD_EPI16(in0, in1);
-    const __m128i t1 = SUB_EPI16(in0, in1);
     // t0 = [c0 c1 c8 c9  c4  c5  cC  cD]
     // t1 = [c3 c2 cB cA -c7 -c6 -cF -cE]
+    const __m128i t0 = ADD_EPI16(in0, in1);
+    const __m128i t1 = SUB_EPI16(in0, in1);
 #if DCT_HIGH_BIT_DEPTH
     overflow = check_epi16_overflow_x2(&t0, &t1);
     if (overflow) {
@@ -263,7 +255,6 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
   storeu_output(&in1, output + 2 * 4);
 }
 
-
 void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) {
   int pass;
   // Constants
@@ -283,14 +274,14 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) {
   int overflow;
 #endif
   // Load input
-  __m128i in0  = _mm_load_si128((const __m128i *)(input + 0 * stride));
-  __m128i in1  = _mm_load_si128((const __m128i *)(input + 1 * stride));
-  __m128i in2  = _mm_load_si128((const __m128i *)(input + 2 * stride));
-  __m128i in3  = _mm_load_si128((const __m128i *)(input + 3 * stride));
-  __m128i in4  = _mm_load_si128((const __m128i *)(input + 4 * stride));
-  __m128i in5  = _mm_load_si128((const __m128i *)(input + 5 * stride));
-  __m128i in6  = _mm_load_si128((const __m128i *)(input + 6 * stride));
-  __m128i in7  = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+  __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+  __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
+  __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+  __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+  __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+  __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
   // Pre-condition input (shift by two)
   in0 = _mm_slli_epi16(in0, 2);
   in1 = _mm_slli_epi16(in1, 2);
@@ -319,8 +310,8 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) {
     const __m128i q7 = SUB_EPI16(in0, in7);
 #if DCT_HIGH_BIT_DEPTH
     if (pass == 1) {
-      overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3,
-                                         &q4, &q5, &q6, &q7);
+      overflow =
+          check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
       if (overflow) {
         vpx_highbd_fdct8x8_c(input, output, stride);
         return;
@@ -630,22 +621,22 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
       __m128i res08, res09, res10, res11, res12, res13, res14, res15;
       // Load and pre-condition input.
       if (0 == pass) {
-        in00  = _mm_load_si128((const __m128i *)(in +  0 * stride));
-        in01  = _mm_load_si128((const __m128i *)(in +  1 * stride));
-        in02  = _mm_load_si128((const __m128i *)(in +  2 * stride));
-        in03  = _mm_load_si128((const __m128i *)(in +  3 * stride));
-        in04  = _mm_load_si128((const __m128i *)(in +  4 * stride));
-        in05  = _mm_load_si128((const __m128i *)(in +  5 * stride));
-        in06  = _mm_load_si128((const __m128i *)(in +  6 * stride));
-        in07  = _mm_load_si128((const __m128i *)(in +  7 * stride));
-        in08  = _mm_load_si128((const __m128i *)(in +  8 * stride));
-        in09  = _mm_load_si128((const __m128i *)(in +  9 * stride));
-        in10  = _mm_load_si128((const __m128i *)(in + 10 * stride));
-        in11  = _mm_load_si128((const __m128i *)(in + 11 * stride));
-        in12  = _mm_load_si128((const __m128i *)(in + 12 * stride));
-        in13  = _mm_load_si128((const __m128i *)(in + 13 * stride));
-        in14  = _mm_load_si128((const __m128i *)(in + 14 * stride));
-        in15  = _mm_load_si128((const __m128i *)(in + 15 * stride));
+        in00 = _mm_load_si128((const __m128i *)(in + 0 * stride));
+        in01 = _mm_load_si128((const __m128i *)(in + 1 * stride));
+        in02 = _mm_load_si128((const __m128i *)(in + 2 * stride));
+        in03 = _mm_load_si128((const __m128i *)(in + 3 * stride));
+        in04 = _mm_load_si128((const __m128i *)(in + 4 * stride));
+        in05 = _mm_load_si128((const __m128i *)(in + 5 * stride));
+        in06 = _mm_load_si128((const __m128i *)(in + 6 * stride));
+        in07 = _mm_load_si128((const __m128i *)(in + 7 * stride));
+        in08 = _mm_load_si128((const __m128i *)(in + 8 * stride));
+        in09 = _mm_load_si128((const __m128i *)(in + 9 * stride));
+        in10 = _mm_load_si128((const __m128i *)(in + 10 * stride));
+        in11 = _mm_load_si128((const __m128i *)(in + 11 * stride));
+        in12 = _mm_load_si128((const __m128i *)(in + 12 * stride));
+        in13 = _mm_load_si128((const __m128i *)(in + 13 * stride));
+        in14 = _mm_load_si128((const __m128i *)(in + 14 * stride));
+        in15 = _mm_load_si128((const __m128i *)(in + 15 * stride));
         // x = x << 2
         in00 = _mm_slli_epi16(in00, 2);
         in01 = _mm_slli_epi16(in01, 2);
@@ -664,22 +655,22 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
         in14 = _mm_slli_epi16(in14, 2);
         in15 = _mm_slli_epi16(in15, 2);
       } else {
-        in00  = _mm_load_si128((const __m128i *)(in +  0 * 16));
-        in01  = _mm_load_si128((const __m128i *)(in +  1 * 16));
-        in02  = _mm_load_si128((const __m128i *)(in +  2 * 16));
-        in03  = _mm_load_si128((const __m128i *)(in +  3 * 16));
-        in04  = _mm_load_si128((const __m128i *)(in +  4 * 16));
-        in05  = _mm_load_si128((const __m128i *)(in +  5 * 16));
-        in06  = _mm_load_si128((const __m128i *)(in +  6 * 16));
-        in07  = _mm_load_si128((const __m128i *)(in +  7 * 16));
-        in08  = _mm_load_si128((const __m128i *)(in +  8 * 16));
-        in09  = _mm_load_si128((const __m128i *)(in +  9 * 16));
-        in10  = _mm_load_si128((const __m128i *)(in + 10 * 16));
-        in11  = _mm_load_si128((const __m128i *)(in + 11 * 16));
-        in12  = _mm_load_si128((const __m128i *)(in + 12 * 16));
-        in13  = _mm_load_si128((const __m128i *)(in + 13 * 16));
-        in14  = _mm_load_si128((const __m128i *)(in + 14 * 16));
-        in15  = _mm_load_si128((const __m128i *)(in + 15 * 16));
+        in00 = _mm_load_si128((const __m128i *)(in + 0 * 16));
+        in01 = _mm_load_si128((const __m128i *)(in + 1 * 16));
+        in02 = _mm_load_si128((const __m128i *)(in + 2 * 16));
+        in03 = _mm_load_si128((const __m128i *)(in + 3 * 16));
+        in04 = _mm_load_si128((const __m128i *)(in + 4 * 16));
+        in05 = _mm_load_si128((const __m128i *)(in + 5 * 16));
+        in06 = _mm_load_si128((const __m128i *)(in + 6 * 16));
+        in07 = _mm_load_si128((const __m128i *)(in + 7 * 16));
+        in08 = _mm_load_si128((const __m128i *)(in + 8 * 16));
+        in09 = _mm_load_si128((const __m128i *)(in + 9 * 16));
+        in10 = _mm_load_si128((const __m128i *)(in + 10 * 16));
+        in11 = _mm_load_si128((const __m128i *)(in + 11 * 16));
+        in12 = _mm_load_si128((const __m128i *)(in + 12 * 16));
+        in13 = _mm_load_si128((const __m128i *)(in + 13 * 16));
+        in14 = _mm_load_si128((const __m128i *)(in + 14 * 16));
+        in15 = _mm_load_si128((const __m128i *)(in + 15 * 16));
         // x = (x + 1) >> 2
         in00 = _mm_add_epi16(in00, kOne);
         in01 = _mm_add_epi16(in01, kOne);
@@ -745,10 +736,9 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
         step1_6 = SUB_EPI16(in01, in14);
         step1_7 = SUB_EPI16(in00, in15);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step1_0, &step1_1,
-                                           &step1_2, &step1_3,
-                                           &step1_4, &step1_5,
-                                           &step1_6, &step1_7);
+        overflow =
+            check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
+                                    &step1_4, &step1_5, &step1_6, &step1_7);
         if (overflow) {
           vpx_highbd_fdct16x16_c(input, output, stride);
           return;
@@ -767,8 +757,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
         const __m128i q6 = SUB_EPI16(input1, input6);
         const __m128i q7 = SUB_EPI16(input0, input7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3,
-                                           &q4, &q5, &q6, &q7);
+        overflow =
+            check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
         if (overflow) {
           vpx_highbd_fdct16x16_c(input, output, stride);
           return;
@@ -818,12 +808,12 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           // into 32 bits.
           const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
           const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
-          const __m128i r0 = mult_round_shift(&d0, &d1, &k__cospi_p16_m16,
-                                              &k__DCT_CONST_ROUNDING,
-                                              DCT_CONST_BITS);
-          const __m128i r1 = mult_round_shift(&d0, &d1, &k__cospi_p16_p16,
-                                              &k__DCT_CONST_ROUNDING,
-                                              DCT_CONST_BITS);
+          const __m128i r0 =
+              mult_round_shift(&d0, &d1, &k__cospi_p16_m16,
+                               &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+          const __m128i r1 =
+              mult_round_shift(&d0, &d1, &k__cospi_p16_p16,
+                               &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x2(&r0, &r1);
           if (overflow) {
@@ -860,8 +850,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
               res06 = mult_round_shift(&t2, &t3, &k__cospi_m20_p12,
                                        &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
 #if DCT_HIGH_BIT_DEPTH
-              overflow = check_epi16_overflow_x4(&res02, &res14,
-                                                 &res10, &res06);
+              overflow =
+                  check_epi16_overflow_x4(&res02, &res14, &res10, &res06);
               if (overflow) {
                 vpx_highbd_fdct16x16_c(input, output, stride);
                 return;
@@ -888,8 +878,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           step2_4 = mult_round_shift(&t2, &t3, &k__cospi_p16_p16,
                                      &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5,
-                                             &step2_4);
+          overflow =
+              check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4);
           if (overflow) {
             vpx_highbd_fdct16x16_c(input, output, stride);
             return;
@@ -907,10 +897,9 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           step3_6 = ADD_EPI16(step1_6, step2_5);
           step3_7 = ADD_EPI16(step1_7, step2_4);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&step3_0, &step3_1,
-                                             &step3_2, &step3_3,
-                                             &step3_4, &step3_5,
-                                             &step3_6, &step3_7);
+          overflow =
+              check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3,
+                                      &step3_4, &step3_5, &step3_6, &step3_7);
           if (overflow) {
             vpx_highbd_fdct16x16_c(input, output, stride);
             return;
@@ -932,8 +921,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           step2_5 = mult_round_shift(&t2, &t3, &k__cospi_p08_m24,
                                      &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6,
-                                             &step2_5);
+          overflow =
+              check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5);
           if (overflow) {
             vpx_highbd_fdct16x16_c(input, output, stride);
             return;
@@ -951,10 +940,9 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           step1_6 = SUB_EPI16(step3_7, step2_6);
           step1_7 = ADD_EPI16(step3_7, step2_6);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&step1_0, &step1_1,
-                                             &step1_2, &step1_3,
-                                             &step1_4, &step1_5,
-                                             &step1_6, &step1_7);
+          overflow =
+              check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
+                                      &step1_4, &step1_5, &step1_6, &step1_7);
           if (overflow) {
             vpx_highbd_fdct16x16_c(input, output, stride);
             return;
@@ -1006,16 +994,14 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
         }
       }
       // Transpose the results, do it as two 8x8 transposes.
-      transpose_and_output8x8(&res00, &res01, &res02, &res03,
-                              &res04, &res05, &res06, &res07,
-                              pass, out0, out1);
-      transpose_and_output8x8(&res08, &res09, &res10, &res11,
-                              &res12, &res13, &res14, &res15,
-                              pass, out0 + 8, out1 + 8);
+      transpose_and_output8x8(&res00, &res01, &res02, &res03, &res04, &res05,
+                              &res06, &res07, pass, out0, out1);
+      transpose_and_output8x8(&res08, &res09, &res10, &res11, &res12, &res13,
+                              &res14, &res15, pass, out0 + 8, out1 + 8);
       if (pass == 0) {
-        out0 += 8*16;
+        out0 += 8 * 16;
       } else {
-        out1 += 8*16;
+        out1 += 8 * 16;
       }
     }
     // Setup in/out for next pass.
diff --git a/vp10/common/x86/vp10_fwd_txfm_sse2.c b/vp10/common/x86/vp10_fwd_txfm_sse2.c
index cf6aa8de408eefe4b6f8c8c0f953178c3f30e52c..5b26a3174a3272acefc2582c46fd9da967ec5c40 100644
--- a/vp10/common/x86/vp10_fwd_txfm_sse2.c
+++ b/vp10/common/x86/vp10_fwd_txfm_sse2.c
@@ -18,12 +18,12 @@ void vp10_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0, in1;
   __m128i tmp;
   const __m128i zero = _mm_setzero_si128();
-  in0  = _mm_loadl_epi64((const __m128i *)(input +  0 * stride));
-  in1  = _mm_loadl_epi64((const __m128i *)(input +  1 * stride));
-  in1  = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *)
-         (input +  2 * stride)));
-  in0  = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *)
-         (input +  3 * stride)));
+  in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+  in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+  in1 = _mm_unpacklo_epi64(
+      in1, _mm_loadl_epi64((const __m128i *)(input + 2 * stride)));
+  in0 = _mm_unpacklo_epi64(
+      in0, _mm_loadl_epi64((const __m128i *)(input + 3 * stride)));
 
   tmp = _mm_add_epi16(in0, in1);
   in0 = _mm_unpacklo_epi16(zero, tmp);
@@ -44,19 +44,19 @@ void vp10_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
 }
 
 void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
-  __m128i in0  = _mm_load_si128((const __m128i *)(input + 0 * stride));
-  __m128i in1  = _mm_load_si128((const __m128i *)(input + 1 * stride));
-  __m128i in2  = _mm_load_si128((const __m128i *)(input + 2 * stride));
-  __m128i in3  = _mm_load_si128((const __m128i *)(input + 3 * stride));
+  __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+  __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+  __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
   __m128i u0, u1, sum;
 
   u0 = _mm_add_epi16(in0, in1);
   u1 = _mm_add_epi16(in2, in3);
 
-  in0  = _mm_load_si128((const __m128i *)(input + 4 * stride));
-  in1  = _mm_load_si128((const __m128i *)(input + 5 * stride));
-  in2  = _mm_load_si128((const __m128i *)(input + 6 * stride));
-  in3  = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  in0 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+  in1 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+  in2 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+  in3 = _mm_load_si128((const __m128i *)(input + 7 * stride));
 
   sum = _mm_add_epi16(u0, u1);
 
@@ -64,7 +64,7 @@ void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   in2 = _mm_add_epi16(in2, in3);
   sum = _mm_add_epi16(sum, in0);
 
-  u0  = _mm_setzero_si128();
+  u0 = _mm_setzero_si128();
   sum = _mm_add_epi16(sum, in2);
 
   in0 = _mm_unpacklo_epi16(u0, sum);
@@ -84,7 +84,7 @@ void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
 }
 
 void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
-                          int stride) {
+                           int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
   __m128i sum = _mm_setzero_si128();
@@ -92,49 +92,49 @@ void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
 
   for (i = 0; i < 2; ++i) {
     input += 8 * i;
-    in0  = _mm_load_si128((const __m128i *)(input +  0 * stride));
-    in1  = _mm_load_si128((const __m128i *)(input +  1 * stride));
-    in2  = _mm_load_si128((const __m128i *)(input +  2 * stride));
-    in3  = _mm_load_si128((const __m128i *)(input +  3 * stride));
+    in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+    in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+    in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+    in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
 
     u0 = _mm_add_epi16(in0, in1);
     u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  4 * stride));
-    in1  = _mm_load_si128((const __m128i *)(input +  5 * stride));
-    in2  = _mm_load_si128((const __m128i *)(input +  6 * stride));
-    in3  = _mm_load_si128((const __m128i *)(input +  7 * stride));
+    in0 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+    in1 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+    in2 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+    in3 = _mm_load_si128((const __m128i *)(input + 7 * stride));
 
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  8 * stride));
-    in1  = _mm_load_si128((const __m128i *)(input +  9 * stride));
-    in2  = _mm_load_si128((const __m128i *)(input + 10 * stride));
-    in3  = _mm_load_si128((const __m128i *)(input + 11 * stride));
+    in0 = _mm_load_si128((const __m128i *)(input + 8 * stride));
+    in1 = _mm_load_si128((const __m128i *)(input + 9 * stride));
+    in2 = _mm_load_si128((const __m128i *)(input + 10 * stride));
+    in3 = _mm_load_si128((const __m128i *)(input + 11 * stride));
 
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input + 12 * stride));
-    in1  = _mm_load_si128((const __m128i *)(input + 13 * stride));
-    in2  = _mm_load_si128((const __m128i *)(input + 14 * stride));
-    in3  = _mm_load_si128((const __m128i *)(input + 15 * stride));
+    in0 = _mm_load_si128((const __m128i *)(input + 12 * stride));
+    in1 = _mm_load_si128((const __m128i *)(input + 13 * stride));
+    in2 = _mm_load_si128((const __m128i *)(input + 14 * stride));
+    in3 = _mm_load_si128((const __m128i *)(input + 15 * stride));
 
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
     sum = _mm_add_epi16(sum, u1);
   }
 
-  u0  = _mm_setzero_si128();
+  u0 = _mm_setzero_si128();
   in0 = _mm_unpacklo_epi16(u0, sum);
   in1 = _mm_unpackhi_epi16(u0, sum);
   in0 = _mm_srai_epi32(in0, 16);
@@ -153,60 +153,60 @@ void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
 }
 
 void vp10_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
-                          int stride) {
+                           int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
   __m128i sum = _mm_setzero_si128();
   int i;
 
   for (i = 0; i < 8; ++i) {
-    in0  = _mm_load_si128((const __m128i *)(input +  0));
-    in1  = _mm_load_si128((const __m128i *)(input +  8));
-    in2  = _mm_load_si128((const __m128i *)(input + 16));
-    in3  = _mm_load_si128((const __m128i *)(input + 24));
+    in0 = _mm_load_si128((const __m128i *)(input + 0));
+    in1 = _mm_load_si128((const __m128i *)(input + 8));
+    in2 = _mm_load_si128((const __m128i *)(input + 16));
+    in3 = _mm_load_si128((const __m128i *)(input + 24));
 
     input += stride;
     u0 = _mm_add_epi16(in0, in1);
     u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  0));
-    in1  = _mm_load_si128((const __m128i *)(input +  8));
-    in2  = _mm_load_si128((const __m128i *)(input + 16));
-    in3  = _mm_load_si128((const __m128i *)(input + 24));
+    in0 = _mm_load_si128((const __m128i *)(input + 0));
+    in1 = _mm_load_si128((const __m128i *)(input + 8));
+    in2 = _mm_load_si128((const __m128i *)(input + 16));
+    in3 = _mm_load_si128((const __m128i *)(input + 24));
 
     input += stride;
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  0));
-    in1  = _mm_load_si128((const __m128i *)(input +  8));
-    in2  = _mm_load_si128((const __m128i *)(input + 16));
-    in3  = _mm_load_si128((const __m128i *)(input + 24));
+    in0 = _mm_load_si128((const __m128i *)(input + 0));
+    in1 = _mm_load_si128((const __m128i *)(input + 8));
+    in2 = _mm_load_si128((const __m128i *)(input + 16));
+    in3 = _mm_load_si128((const __m128i *)(input + 24));
 
     input += stride;
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  0));
-    in1  = _mm_load_si128((const __m128i *)(input +  8));
-    in2  = _mm_load_si128((const __m128i *)(input + 16));
-    in3  = _mm_load_si128((const __m128i *)(input + 24));
+    in0 = _mm_load_si128((const __m128i *)(input + 0));
+    in1 = _mm_load_si128((const __m128i *)(input + 8));
+    in2 = _mm_load_si128((const __m128i *)(input + 16));
+    in3 = _mm_load_si128((const __m128i *)(input + 24));
 
     input += stride;
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
     sum = _mm_add_epi16(sum, u1);
   }
 
-  u0  = _mm_setzero_si128();
+  u0 = _mm_setzero_si128();
   in0 = _mm_unpacklo_epi16(u0, sum);
   in1 = _mm_unpackhi_epi16(u0, sum);
   in0 = _mm_srai_epi32(in0, 16);
@@ -229,43 +229,43 @@ void vp10_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
 #define FDCT8x8_2D vp10_fdct8x8_sse2
 #define FDCT16x16_2D vp10_fdct16x16_sse2
 #include "vp10/common/x86/vp10_fwd_txfm_impl_sse2.h"
-#undef  FDCT4x4_2D
-#undef  FDCT8x8_2D
-#undef  FDCT16x16_2D
+#undef FDCT4x4_2D
+#undef FDCT8x8_2D
+#undef FDCT16x16_2D
 
 #define FDCT32x32_2D vp10_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
 #include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h"
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
 
 #define FDCT32x32_2D vp10_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
 #include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h"  // NOLINT
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
-#undef  DCT_HIGH_BIT_DEPTH
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
+#undef DCT_HIGH_BIT_DEPTH
 
 #if CONFIG_VPX_HIGHBITDEPTH
 #define DCT_HIGH_BIT_DEPTH 1
 #define FDCT4x4_2D vp10_highbd_fdct4x4_sse2
 #define FDCT8x8_2D vp10_highbd_fdct8x8_sse2
 #define FDCT16x16_2D vp10_highbd_fdct16x16_sse2
-#include "vp10/common/x86/vp10_fwd_txfm_impl_sse2.h" // NOLINT
-#undef  FDCT4x4_2D
-#undef  FDCT8x8_2D
-#undef  FDCT16x16_2D
+#include "vp10/common/x86/vp10_fwd_txfm_impl_sse2.h"  // NOLINT
+#undef FDCT4x4_2D
+#undef FDCT8x8_2D
+#undef FDCT16x16_2D
 
 #define FDCT32x32_2D vp10_highbd_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
-#include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h" // NOLINT
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
+#include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h"  // NOLINT
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
 
 #define FDCT32x32_2D vp10_highbd_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
-#include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h" // NOLINT
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
-#undef  DCT_HIGH_BIT_DEPTH
+#include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h"  // NOLINT
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
+#undef DCT_HIGH_BIT_DEPTH
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vp10/common/x86/vp10_inv_txfm_sse2.c b/vp10/common/x86/vp10_inv_txfm_sse2.c
index 6c108a8b9aa5c68d8fcadb72f5bd00a494180593..c20039212ca1c6eed730de6ad7ec2a52b9478c2f 100644
--- a/vp10/common/x86/vp10_inv_txfm_sse2.c
+++ b/vp10/common/x86/vp10_inv_txfm_sse2.c
@@ -12,14 +12,14 @@
 #include "vp10/common/x86/vp10_inv_txfm_sse2.h"
 #include "vpx_dsp/x86/txfm_common_sse2.h"
 
-#define RECON_AND_STORE4X4(dest, in_x) \
-{                                                     \
-  __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); \
-  d0 = _mm_unpacklo_epi8(d0, zero); \
-  d0 = _mm_add_epi16(in_x, d0); \
-  d0 = _mm_packus_epi16(d0, d0); \
-  *(int *)(dest) = _mm_cvtsi128_si32(d0); \
-}
+#define RECON_AND_STORE4X4(dest, in_x)                    \
+  {                                                       \
+    __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); \
+    d0 = _mm_unpacklo_epi8(d0, zero);                     \
+    d0 = _mm_add_epi16(in_x, d0);                         \
+    d0 = _mm_packus_epi16(d0, d0);                        \
+    *(int *)(dest) = _mm_cvtsi128_si32(d0);               \
+  }
 
 void vp10_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
@@ -261,192 +261,189 @@ void vp10_iadst4_sse2(__m128i *in) {
   in[1] = _mm_packs_epi32(u[2], u[3]);
 }
 
-#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, \
-                      out0, out1, out2, out3, out4, out5, out6, out7) \
-  {                                                     \
-    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
-    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
-    const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \
-    const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \
-    const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \
-    const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \
-    const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5); \
-    const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7); \
-                                                        \
-    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
-    const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \
-    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
-    const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \
-    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
-    const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \
-    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
-    const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \
-                                                            \
-    out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
-    out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
-    out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
-    out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
-    out4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \
-    out5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \
-    out6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \
-    out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \
+#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                      out2, out3, out4, out5, out6, out7)                 \
+  {                                                                       \
+    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1);                   \
+    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3);                   \
+    const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1);                   \
+    const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3);                   \
+    const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5);                   \
+    const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7);                   \
+    const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5);                   \
+    const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7);                   \
+                                                                          \
+    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);               \
+    const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);               \
+    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);               \
+    const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);               \
+    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);               \
+    const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);               \
+    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);               \
+    const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);               \
+                                                                          \
+    out0 = _mm_unpacklo_epi64(tr1_0, tr1_4);                              \
+    out1 = _mm_unpackhi_epi64(tr1_0, tr1_4);                              \
+    out2 = _mm_unpacklo_epi64(tr1_2, tr1_6);                              \
+    out3 = _mm_unpackhi_epi64(tr1_2, tr1_6);                              \
+    out4 = _mm_unpacklo_epi64(tr1_1, tr1_5);                              \
+    out5 = _mm_unpackhi_epi64(tr1_1, tr1_5);                              \
+    out6 = _mm_unpacklo_epi64(tr1_3, tr1_7);                              \
+    out7 = _mm_unpackhi_epi64(tr1_3, tr1_7);                              \
   }
 
-#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, \
-                         out0, out1, out2, out3) \
-  {                                              \
-    const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1); \
-    const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0); \
-    const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3); \
-    const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2); \
-    \
-    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
-    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
-    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
-    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
-    \
-    out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
-    out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
-    out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
-    out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
+#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, out0, out1, out2, out3) \
+  {                                                                      \
+    const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1);                \
+    const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0);                \
+    const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3);                \
+    const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2);                \
+                                                                         \
+    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);              \
+    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);              \
+    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);              \
+    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);              \
+                                                                         \
+    out0 = _mm_unpacklo_epi64(tr1_0, tr1_4);                             \
+    out1 = _mm_unpackhi_epi64(tr1_0, tr1_4);                             \
+    out2 = _mm_unpacklo_epi64(tr1_2, tr1_6);                             \
+    out3 = _mm_unpackhi_epi64(tr1_2, tr1_6);                             \
   }
 
 #define TRANSPOSE_8X8_10(in0, in1, in2, in3, out0, out1) \
-  {                                            \
-    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
-    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
-    out0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
-    out1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
+  {                                                      \
+    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1);  \
+    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3);  \
+    out0 = _mm_unpacklo_epi32(tr0_0, tr0_1);             \
+    out1 = _mm_unpackhi_epi32(tr0_0, tr0_1);             \
   }
 
 // Define Macro for multiplying elements by constants and adding them together.
-#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, \
-                               cst0, cst1, cst2, cst3, res0, res1, res2, res3) \
-  {   \
-      tmp0 = _mm_madd_epi16(lo_0, cst0); \
-      tmp1 = _mm_madd_epi16(hi_0, cst0); \
-      tmp2 = _mm_madd_epi16(lo_0, cst1); \
-      tmp3 = _mm_madd_epi16(hi_0, cst1); \
-      tmp4 = _mm_madd_epi16(lo_1, cst2); \
-      tmp5 = _mm_madd_epi16(hi_1, cst2); \
-      tmp6 = _mm_madd_epi16(lo_1, cst3); \
-      tmp7 = _mm_madd_epi16(hi_1, cst3); \
-      \
-      tmp0 = _mm_add_epi32(tmp0, rounding); \
-      tmp1 = _mm_add_epi32(tmp1, rounding); \
-      tmp2 = _mm_add_epi32(tmp2, rounding); \
-      tmp3 = _mm_add_epi32(tmp3, rounding); \
-      tmp4 = _mm_add_epi32(tmp4, rounding); \
-      tmp5 = _mm_add_epi32(tmp5, rounding); \
-      tmp6 = _mm_add_epi32(tmp6, rounding); \
-      tmp7 = _mm_add_epi32(tmp7, rounding); \
-      \
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-      tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
-      tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
-      tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
-      tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
-      \
-      res0 = _mm_packs_epi32(tmp0, tmp1); \
-      res1 = _mm_packs_epi32(tmp2, tmp3); \
-      res2 = _mm_packs_epi32(tmp4, tmp5); \
-      res3 = _mm_packs_epi32(tmp6, tmp7); \
+#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, cst0, cst1, cst2, cst3, \
+                               res0, res1, res2, res3)                         \
+  {                                                                            \
+    tmp0 = _mm_madd_epi16(lo_0, cst0);                                         \
+    tmp1 = _mm_madd_epi16(hi_0, cst0);                                         \
+    tmp2 = _mm_madd_epi16(lo_0, cst1);                                         \
+    tmp3 = _mm_madd_epi16(hi_0, cst1);                                         \
+    tmp4 = _mm_madd_epi16(lo_1, cst2);                                         \
+    tmp5 = _mm_madd_epi16(hi_1, cst2);                                         \
+    tmp6 = _mm_madd_epi16(lo_1, cst3);                                         \
+    tmp7 = _mm_madd_epi16(hi_1, cst3);                                         \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+    tmp4 = _mm_add_epi32(tmp4, rounding);                                      \
+    tmp5 = _mm_add_epi32(tmp5, rounding);                                      \
+    tmp6 = _mm_add_epi32(tmp6, rounding);                                      \
+    tmp7 = _mm_add_epi32(tmp7, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);                               \
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);                               \
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);                               \
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);                               \
+                                                                               \
+    res0 = _mm_packs_epi32(tmp0, tmp1);                                        \
+    res1 = _mm_packs_epi32(tmp2, tmp3);                                        \
+    res2 = _mm_packs_epi32(tmp4, tmp5);                                        \
+    res3 = _mm_packs_epi32(tmp6, tmp7);                                        \
   }
 
 #define MULTIPLICATION_AND_ADD_2(lo_0, hi_0, cst0, cst1, res0, res1) \
-  {   \
-      tmp0 = _mm_madd_epi16(lo_0, cst0); \
-      tmp1 = _mm_madd_epi16(hi_0, cst0); \
-      tmp2 = _mm_madd_epi16(lo_0, cst1); \
-      tmp3 = _mm_madd_epi16(hi_0, cst1); \
-      \
-      tmp0 = _mm_add_epi32(tmp0, rounding); \
-      tmp1 = _mm_add_epi32(tmp1, rounding); \
-      tmp2 = _mm_add_epi32(tmp2, rounding); \
-      tmp3 = _mm_add_epi32(tmp3, rounding); \
-      \
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-      \
-      res0 = _mm_packs_epi32(tmp0, tmp1); \
-      res1 = _mm_packs_epi32(tmp2, tmp3); \
+  {                                                                  \
+    tmp0 = _mm_madd_epi16(lo_0, cst0);                               \
+    tmp1 = _mm_madd_epi16(hi_0, cst0);                               \
+    tmp2 = _mm_madd_epi16(lo_0, cst1);                               \
+    tmp3 = _mm_madd_epi16(hi_0, cst1);                               \
+                                                                     \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                            \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                            \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                            \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                            \
+                                                                     \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                     \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                     \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                     \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                     \
+                                                                     \
+    res0 = _mm_packs_epi32(tmp0, tmp1);                              \
+    res1 = _mm_packs_epi32(tmp2, tmp3);                              \
   }
 
-#define IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, \
-              out0, out1, out2, out3, out4, out5, out6, out7)  \
-  { \
-  /* Stage1 */      \
-  { \
-    const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7); \
-    const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7); \
-    const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5); \
-    const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5); \
-    \
-    MULTIPLICATION_AND_ADD(lo_17, hi_17, lo_35, hi_35, stg1_0, \
-                          stg1_1, stg1_2, stg1_3, stp1_4,      \
-                          stp1_7, stp1_5, stp1_6)              \
-  } \
-    \
-  /* Stage2 */ \
-  { \
-    const __m128i lo_04 = _mm_unpacklo_epi16(in0, in4); \
-    const __m128i hi_04 = _mm_unpackhi_epi16(in0, in4); \
-    const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6); \
-    const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6); \
-    \
-    MULTIPLICATION_AND_ADD(lo_04, hi_04, lo_26, hi_26, stg2_0, \
-                           stg2_1, stg2_2, stg2_3, stp2_0,     \
-                           stp2_1, stp2_2, stp2_3)             \
-    \
-    stp2_4 = _mm_adds_epi16(stp1_4, stp1_5); \
-    stp2_5 = _mm_subs_epi16(stp1_4, stp1_5); \
-    stp2_6 = _mm_subs_epi16(stp1_7, stp1_6); \
-    stp2_7 = _mm_adds_epi16(stp1_7, stp1_6); \
-  } \
-    \
-  /* Stage3 */ \
-  { \
-    const __m128i lo_56 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-    const __m128i hi_56 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-    \
-    stp1_0 = _mm_adds_epi16(stp2_0, stp2_3); \
-    stp1_1 = _mm_adds_epi16(stp2_1, stp2_2); \
-    stp1_2 = _mm_subs_epi16(stp2_1, stp2_2); \
-    stp1_3 = _mm_subs_epi16(stp2_0, stp2_3); \
-    \
-    tmp0 = _mm_madd_epi16(lo_56, stg2_1); \
-    tmp1 = _mm_madd_epi16(hi_56, stg2_1); \
-    tmp2 = _mm_madd_epi16(lo_56, stg2_0); \
-    tmp3 = _mm_madd_epi16(hi_56, stg2_0); \
-    \
-    tmp0 = _mm_add_epi32(tmp0, rounding); \
-    tmp1 = _mm_add_epi32(tmp1, rounding); \
-    tmp2 = _mm_add_epi32(tmp2, rounding); \
-    tmp3 = _mm_add_epi32(tmp3, rounding); \
-    \
-    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-    \
-    stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-    stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-  } \
-  \
-  /* Stage4  */ \
-  out0 = _mm_adds_epi16(stp1_0, stp2_7); \
-  out1 = _mm_adds_epi16(stp1_1, stp1_6); \
-  out2 = _mm_adds_epi16(stp1_2, stp1_5); \
-  out3 = _mm_adds_epi16(stp1_3, stp2_4); \
-  out4 = _mm_subs_epi16(stp1_3, stp2_4); \
-  out5 = _mm_subs_epi16(stp1_2, stp1_5); \
-  out6 = _mm_subs_epi16(stp1_1, stp1_6); \
-  out7 = _mm_subs_epi16(stp1_0, stp2_7); \
+#define IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, \
+              out4, out5, out6, out7)                                         \
+  {                                                                           \
+    /* Stage1 */                                                              \
+    {                                                                         \
+      const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7);                     \
+      const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7);                     \
+      const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5);                     \
+      const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5);                     \
+                                                                              \
+      MULTIPLICATION_AND_ADD(lo_17, hi_17, lo_35, hi_35, stg1_0, stg1_1,      \
+                             stg1_2, stg1_3, stp1_4, stp1_7, stp1_5, stp1_6)  \
+    }                                                                         \
+                                                                              \
+    /* Stage2 */                                                              \
+    {                                                                         \
+      const __m128i lo_04 = _mm_unpacklo_epi16(in0, in4);                     \
+      const __m128i hi_04 = _mm_unpackhi_epi16(in0, in4);                     \
+      const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6);                     \
+      const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6);                     \
+                                                                              \
+      MULTIPLICATION_AND_ADD(lo_04, hi_04, lo_26, hi_26, stg2_0, stg2_1,      \
+                             stg2_2, stg2_3, stp2_0, stp2_1, stp2_2, stp2_3)  \
+                                                                              \
+      stp2_4 = _mm_adds_epi16(stp1_4, stp1_5);                                \
+      stp2_5 = _mm_subs_epi16(stp1_4, stp1_5);                                \
+      stp2_6 = _mm_subs_epi16(stp1_7, stp1_6);                                \
+      stp2_7 = _mm_adds_epi16(stp1_7, stp1_6);                                \
+    }                                                                         \
+                                                                              \
+    /* Stage3 */                                                              \
+    {                                                                         \
+      const __m128i lo_56 = _mm_unpacklo_epi16(stp2_6, stp2_5);               \
+      const __m128i hi_56 = _mm_unpackhi_epi16(stp2_6, stp2_5);               \
+                                                                              \
+      stp1_0 = _mm_adds_epi16(stp2_0, stp2_3);                                \
+      stp1_1 = _mm_adds_epi16(stp2_1, stp2_2);                                \
+      stp1_2 = _mm_subs_epi16(stp2_1, stp2_2);                                \
+      stp1_3 = _mm_subs_epi16(stp2_0, stp2_3);                                \
+                                                                              \
+      tmp0 = _mm_madd_epi16(lo_56, stg2_1);                                   \
+      tmp1 = _mm_madd_epi16(hi_56, stg2_1);                                   \
+      tmp2 = _mm_madd_epi16(lo_56, stg2_0);                                   \
+      tmp3 = _mm_madd_epi16(hi_56, stg2_0);                                   \
+                                                                              \
+      tmp0 = _mm_add_epi32(tmp0, rounding);                                   \
+      tmp1 = _mm_add_epi32(tmp1, rounding);                                   \
+      tmp2 = _mm_add_epi32(tmp2, rounding);                                   \
+      tmp3 = _mm_add_epi32(tmp3, rounding);                                   \
+                                                                              \
+      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                            \
+      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                            \
+      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                            \
+      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                            \
+                                                                              \
+      stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                   \
+      stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                   \
+    }                                                                         \
+                                                                              \
+    /* Stage4  */                                                             \
+    out0 = _mm_adds_epi16(stp1_0, stp2_7);                                    \
+    out1 = _mm_adds_epi16(stp1_1, stp1_6);                                    \
+    out2 = _mm_adds_epi16(stp1_2, stp1_5);                                    \
+    out3 = _mm_adds_epi16(stp1_3, stp2_4);                                    \
+    out4 = _mm_subs_epi16(stp1_3, stp2_4);                                    \
+    out5 = _mm_subs_epi16(stp1_2, stp1_5);                                    \
+    out6 = _mm_subs_epi16(stp1_1, stp1_6);                                    \
+    out7 = _mm_subs_epi16(stp1_0, stp2_7);                                    \
   }
 
 void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
@@ -481,12 +478,12 @@ void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   // 2-D
   for (i = 0; i < 2; i++) {
     // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
-    TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7,
-                  in0, in1, in2, in3, in4, in5, in6, in7);
+    TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                  in4, in5, in6, in7);
 
     // 4-stage 1D vp10_idct8x8
-    IDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-          in0, in1, in2, in3, in4, in5, in6, in7);
+    IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5,
+          in6, in7);
   }
 
   // Final rounding and shift
@@ -556,12 +553,12 @@ void vp10_idct8_sse2(__m128i *in) {
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
 
   // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
-  TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7],
-                in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0,
+                in1, in2, in3, in4, in5, in6, in7);
 
   // 4-stage 1D vp10_idct8x8
-  IDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-        in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7]);
+  IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in[0], in[1], in[2], in[3],
+        in[4], in[5], in[6], in[7]);
 }
 
 void vp10_iadst8_sse2(__m128i *in) {
@@ -901,8 +898,8 @@ void vp10_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
 
   TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, in0, in1, in2, in3)
 
-  IDCT8(in0, in1, in2, in3, zero, zero, zero, zero,
-        in0, in1, in2, in3, in4, in5, in6, in7);
+  IDCT8(in0, in1, in2, in3, zero, zero, zero, zero, in0, in1, in2, in3, in4,
+        in5, in6, in7);
   // Final rounding and shift
   in0 = _mm_adds_epi16(in0, final_rounding);
   in1 = _mm_adds_epi16(in1, final_rounding);
@@ -932,245 +929,237 @@ void vp10_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   RECON_AND_STORE(dest + 7 * stride, in7);
 }
 
-#define IDCT16 \
-  /* Stage2 */ \
-  { \
-    const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], in[15]); \
-    const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], in[15]); \
-    const __m128i lo_9_7 = _mm_unpacklo_epi16(in[9], in[7]);   \
-    const __m128i hi_9_7 = _mm_unpackhi_epi16(in[9], in[7]);   \
-    const __m128i lo_5_11 = _mm_unpacklo_epi16(in[5], in[11]); \
-    const __m128i hi_5_11 = _mm_unpackhi_epi16(in[5], in[11]); \
-    const __m128i lo_13_3 = _mm_unpacklo_epi16(in[13], in[3]); \
-    const __m128i hi_13_3 = _mm_unpackhi_epi16(in[13], in[3]); \
-    \
-    MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, \
-                           stg2_0, stg2_1, stg2_2, stg2_3, \
-                           stp2_8, stp2_15, stp2_9, stp2_14) \
-    \
-    MULTIPLICATION_AND_ADD(lo_5_11, hi_5_11, lo_13_3, hi_13_3, \
-                           stg2_4, stg2_5, stg2_6, stg2_7, \
-                           stp2_10, stp2_13, stp2_11, stp2_12) \
-  } \
-    \
-  /* Stage3 */ \
-  { \
-    const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], in[14]); \
-    const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], in[14]); \
-    const __m128i lo_10_6 = _mm_unpacklo_epi16(in[10], in[6]); \
-    const __m128i hi_10_6 = _mm_unpackhi_epi16(in[10], in[6]); \
-    \
-    MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, \
-                           stg3_0, stg3_1, stg3_2, stg3_3, \
-                           stp1_4, stp1_7, stp1_5, stp1_6) \
-    \
-    stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9);  \
-    stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);    \
-    stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \
-    stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \
-    \
-    stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13); \
-    stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \
-    stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \
-    stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \
-  } \
-  \
-  /* Stage4 */ \
-  { \
-    const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]); \
-    const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], in[8]); \
-    const __m128i lo_4_12 = _mm_unpacklo_epi16(in[4], in[12]); \
-    const __m128i hi_4_12 = _mm_unpackhi_epi16(in[4], in[12]); \
-    \
-    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
-    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
-    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-    \
-    MULTIPLICATION_AND_ADD(lo_0_8, hi_0_8, lo_4_12, hi_4_12, \
-                           stg4_0, stg4_1, stg4_2, stg4_3, \
-                           stp2_0, stp2_1, stp2_2, stp2_3) \
-    \
-    stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \
-    stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \
-    stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \
-    stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \
-    \
-    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \
-                           stg4_4, stg4_5, stg4_6, stg4_7, \
-                           stp2_9, stp2_14, stp2_10, stp2_13) \
-  } \
-    \
-  /* Stage5 */ \
-  { \
-    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-    \
-    stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \
-    stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \
-    stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \
-    stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \
-    \
-    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
-    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
-    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
-    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
-    \
-    tmp0 = _mm_add_epi32(tmp0, rounding); \
-    tmp1 = _mm_add_epi32(tmp1, rounding); \
-    tmp2 = _mm_add_epi32(tmp2, rounding); \
-    tmp3 = _mm_add_epi32(tmp3, rounding); \
-    \
-    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-    \
-    stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-    stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-    \
-    stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);  \
-    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);    \
-    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);   \
-    stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \
-    \
-    stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \
-    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);   \
-    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);   \
-    stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \
-  } \
-    \
-  /* Stage6 */ \
-  { \
-    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
-    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
-    \
-    stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \
-    stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
-    stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
-    stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \
-    stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \
-    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
-    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
-    stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \
-    \
-    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
-                           stg6_0, stg4_0, stg6_0, stg4_0, \
-                           stp2_10, stp2_13, stp2_11, stp2_12) \
+#define IDCT16                                                                 \
+  /* Stage2 */                                                                 \
+  {                                                                            \
+    const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], in[15]);                 \
+    const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], in[15]);                 \
+    const __m128i lo_9_7 = _mm_unpacklo_epi16(in[9], in[7]);                   \
+    const __m128i hi_9_7 = _mm_unpackhi_epi16(in[9], in[7]);                   \
+    const __m128i lo_5_11 = _mm_unpacklo_epi16(in[5], in[11]);                 \
+    const __m128i hi_5_11 = _mm_unpackhi_epi16(in[5], in[11]);                 \
+    const __m128i lo_13_3 = _mm_unpacklo_epi16(in[13], in[3]);                 \
+    const __m128i hi_13_3 = _mm_unpackhi_epi16(in[13], in[3]);                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, stg2_0, stg2_1,   \
+                           stg2_2, stg2_3, stp2_8, stp2_15, stp2_9, stp2_14)   \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_5_11, hi_5_11, lo_13_3, hi_13_3, stg2_4, stg2_5, \
+                           stg2_6, stg2_7, stp2_10, stp2_13, stp2_11, stp2_12) \
+  }                                                                            \
+                                                                               \
+  /* Stage3 */                                                                 \
+  {                                                                            \
+    const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], in[14]);                 \
+    const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], in[14]);                 \
+    const __m128i lo_10_6 = _mm_unpacklo_epi16(in[10], in[6]);                 \
+    const __m128i hi_10_6 = _mm_unpackhi_epi16(in[10], in[6]);                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, stg3_0, stg3_1, \
+                           stg3_2, stg3_3, stp1_4, stp1_7, stp1_5, stp1_6)     \
+                                                                               \
+    stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9);                                  \
+    stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);                                    \
+    stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);                                 \
+    stp1_11 = _mm_add_epi16(stp2_11, stp2_10);                                 \
+                                                                               \
+    stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13);                               \
+    stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);                                 \
+    stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);                                 \
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_14);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage4 */                                                                 \
+  {                                                                            \
+    const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]);                   \
+    const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], in[8]);                   \
+    const __m128i lo_4_12 = _mm_unpacklo_epi16(in[4], in[12]);                 \
+    const __m128i hi_4_12 = _mm_unpackhi_epi16(in[4], in[12]);                 \
+                                                                               \
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);               \
+    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);               \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_0_8, hi_0_8, lo_4_12, hi_4_12, stg4_0, stg4_1,   \
+                           stg4_2, stg4_3, stp2_0, stp2_1, stp2_2, stp2_3)     \
+                                                                               \
+    stp2_4 = _mm_add_epi16(stp1_4, stp1_5);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);                                    \
+    stp2_7 = _mm_add_epi16(stp1_7, stp1_6);                                    \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,       \
+                           stg4_5, stg4_6, stg4_7, stp2_9, stp2_14, stp2_10,   \
+                           stp2_13)                                            \
+  }                                                                            \
+                                                                               \
+  /* Stage5 */                                                                 \
+  {                                                                            \
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);                 \
+    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);                 \
+                                                                               \
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_3);                                    \
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_2);                                    \
+    stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);                                    \
+    stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);                                    \
+                                                                               \
+    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);                                     \
+    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);                                     \
+    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);                                     \
+    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);                                     \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+                                                                               \
+    stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                      \
+    stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                      \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);                                 \
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);                                   \
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11);                                \
+                                                                               \
+    stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0);                               \
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);                                 \
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);                                 \
+    stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0);                               \
+  }                                                                            \
+                                                                               \
+  /* Stage6 */                                                                 \
+  {                                                                            \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);             \
+    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);             \
+                                                                               \
+    stp2_0 = _mm_add_epi16(stp1_0, stp2_7);                                    \
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6);                                    \
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5);                                    \
+    stp2_3 = _mm_add_epi16(stp1_3, stp2_4);                                    \
+    stp2_4 = _mm_sub_epi16(stp1_3, stp2_4);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);                                    \
+    stp2_7 = _mm_sub_epi16(stp1_0, stp2_7);                                    \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
   }
 
-#define IDCT16_10 \
-    /* Stage2 */ \
-    { \
-      const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], zero); \
-      const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], zero); \
-      const __m128i lo_13_3 = _mm_unpacklo_epi16(zero, in[3]); \
-      const __m128i hi_13_3 = _mm_unpackhi_epi16(zero, in[3]); \
-      \
-      MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_13_3, hi_13_3, \
-                             stg2_0, stg2_1, stg2_6, stg2_7, \
-                             stp1_8_0, stp1_15, stp1_11, stp1_12_0) \
-    } \
-      \
-    /* Stage3 */ \
-    { \
-      const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], zero); \
-      const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], zero); \
-      \
-      MULTIPLICATION_AND_ADD_2(lo_2_14, hi_2_14, \
-                               stg3_0, stg3_1,  \
-                               stp2_4, stp2_7) \
-      \
-      stp1_9  =  stp1_8_0; \
-      stp1_10 =  stp1_11;  \
-      \
-      stp1_13 = stp1_12_0; \
-      stp1_14 = stp1_15;   \
-    } \
-    \
-    /* Stage4 */ \
-    { \
-      const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero); \
-      const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], zero); \
-      \
-      const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
-      const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
-      const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-      const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-      \
-      MULTIPLICATION_AND_ADD_2(lo_0_8, hi_0_8, \
-                               stg4_0, stg4_1, \
-                               stp1_0, stp1_1) \
-      stp2_5 = stp2_4; \
-      stp2_6 = stp2_7; \
-      \
-      MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \
-                             stg4_4, stg4_5, stg4_6, stg4_7, \
-                             stp2_9, stp2_14, stp2_10, stp2_13) \
-    } \
-      \
-    /* Stage5 */ \
-    { \
-      const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-      const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-      \
-      stp1_2 = stp1_1; \
-      stp1_3 = stp1_0; \
-      \
-      tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
-      tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
-      tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
-      tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
-      \
-      tmp0 = _mm_add_epi32(tmp0, rounding); \
-      tmp1 = _mm_add_epi32(tmp1, rounding); \
-      tmp2 = _mm_add_epi32(tmp2, rounding); \
-      tmp3 = _mm_add_epi32(tmp3, rounding); \
-      \
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-      \
-      stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-      stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-      \
-      stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);  \
-      stp1_9 = _mm_add_epi16(stp2_9, stp2_10);    \
-      stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);   \
-      stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \
-      \
-      stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \
-      stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);   \
-      stp1_14 = _mm_add_epi16(stp2_14, stp2_13);   \
-      stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \
-    } \
-      \
-    /* Stage6 */ \
-    { \
-      const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-      const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-      const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
-      const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
-      \
-      stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \
-      stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
-      stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
-      stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \
-      stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \
-      stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
-      stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
-      stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \
-      \
-      MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
-                             stg6_0, stg4_0, stg6_0, stg4_0, \
-                             stp2_10, stp2_13, stp2_11, stp2_12) \
-    }
+#define IDCT16_10                                                              \
+  /* Stage2 */                                                                 \
+  {                                                                            \
+    const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], zero);                   \
+    const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], zero);                   \
+    const __m128i lo_13_3 = _mm_unpacklo_epi16(zero, in[3]);                   \
+    const __m128i hi_13_3 = _mm_unpackhi_epi16(zero, in[3]);                   \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_13_3, hi_13_3, stg2_0, stg2_1, \
+                           stg2_6, stg2_7, stp1_8_0, stp1_15, stp1_11,         \
+                           stp1_12_0)                                          \
+  }                                                                            \
+                                                                               \
+  /* Stage3 */                                                                 \
+  {                                                                            \
+    const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], zero);                   \
+    const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], zero);                   \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_2_14, hi_2_14, stg3_0, stg3_1, stp2_4, stp2_7) \
+                                                                               \
+    stp1_9 = stp1_8_0;                                                         \
+    stp1_10 = stp1_11;                                                         \
+                                                                               \
+    stp1_13 = stp1_12_0;                                                       \
+    stp1_14 = stp1_15;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage4 */                                                                 \
+  {                                                                            \
+    const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero);                    \
+    const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], zero);                    \
+                                                                               \
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);               \
+    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);               \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_0_8, hi_0_8, stg4_0, stg4_1, stp1_0, stp1_1)   \
+    stp2_5 = stp2_4;                                                           \
+    stp2_6 = stp2_7;                                                           \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,       \
+                           stg4_5, stg4_6, stg4_7, stp2_9, stp2_14, stp2_10,   \
+                           stp2_13)                                            \
+  }                                                                            \
+                                                                               \
+  /* Stage5 */                                                                 \
+  {                                                                            \
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);                 \
+    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);                 \
+                                                                               \
+    stp1_2 = stp1_1;                                                           \
+    stp1_3 = stp1_0;                                                           \
+                                                                               \
+    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);                                     \
+    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);                                     \
+    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);                                     \
+    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);                                     \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+                                                                               \
+    stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                      \
+    stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                      \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);                                 \
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);                                   \
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11);                                \
+                                                                               \
+    stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0);                               \
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);                                 \
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);                                 \
+    stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0);                               \
+  }                                                                            \
+                                                                               \
+  /* Stage6 */                                                                 \
+  {                                                                            \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);             \
+    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);             \
+                                                                               \
+    stp2_0 = _mm_add_epi16(stp1_0, stp2_7);                                    \
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6);                                    \
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5);                                    \
+    stp2_3 = _mm_add_epi16(stp1_3, stp2_4);                                    \
+    stp2_4 = _mm_sub_epi16(stp1_3, stp2_4);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);                                    \
+    stp2_7 = _mm_sub_epi16(stp1_0, stp2_7);                                    \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
+  }
 
 void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
-                                int stride) {
+                                 int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
@@ -1202,10 +1191,10 @@ void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
 
   __m128i in[16], l[16], r[16], *curr1;
   __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
-          stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
-          stp1_8_0, stp1_12_0;
+      stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+      stp1_8_0, stp1_12_0;
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
-          stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
+      stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
   int i;
 
@@ -1294,8 +1283,7 @@ void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct16x16_1_add_sse2(const int16_t *input,
-                               uint8_t *dest,
+void vp10_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest,
                                int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
@@ -1308,16 +1296,16 @@ void vp10_idct16x16_1_add_sse2(const int16_t *input,
   dc_value = _mm_set1_epi16(a);
 
   for (i = 0; i < 2; ++i) {
-    RECON_AND_STORE(dest +  0 * stride, dc_value);
-    RECON_AND_STORE(dest +  1 * stride, dc_value);
-    RECON_AND_STORE(dest +  2 * stride, dc_value);
-    RECON_AND_STORE(dest +  3 * stride, dc_value);
-    RECON_AND_STORE(dest +  4 * stride, dc_value);
-    RECON_AND_STORE(dest +  5 * stride, dc_value);
-    RECON_AND_STORE(dest +  6 * stride, dc_value);
-    RECON_AND_STORE(dest +  7 * stride, dc_value);
-    RECON_AND_STORE(dest +  8 * stride, dc_value);
-    RECON_AND_STORE(dest +  9 * stride, dc_value);
+    RECON_AND_STORE(dest + 0 * stride, dc_value);
+    RECON_AND_STORE(dest + 1 * stride, dc_value);
+    RECON_AND_STORE(dest + 2 * stride, dc_value);
+    RECON_AND_STORE(dest + 3 * stride, dc_value);
+    RECON_AND_STORE(dest + 4 * stride, dc_value);
+    RECON_AND_STORE(dest + 5 * stride, dc_value);
+    RECON_AND_STORE(dest + 6 * stride, dc_value);
+    RECON_AND_STORE(dest + 7 * stride, dc_value);
+    RECON_AND_STORE(dest + 8 * stride, dc_value);
+    RECON_AND_STORE(dest + 9 * stride, dc_value);
     RECON_AND_STORE(dest + 10 * stride, dc_value);
     RECON_AND_STORE(dest + 11 * stride, dc_value);
     RECON_AND_STORE(dest + 12 * stride, dc_value);
@@ -1901,9 +1889,9 @@ static void vp10_idct16_8col(__m128i *in) {
   u[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
   u[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
 
-  s[8]  = _mm_packs_epi32(u[0], u[1]);
+  s[8] = _mm_packs_epi32(u[0], u[1]);
   s[15] = _mm_packs_epi32(u[2], u[3]);
-  s[9]  = _mm_packs_epi32(u[4], u[5]);
+  s[9] = _mm_packs_epi32(u[4], u[5]);
   s[14] = _mm_packs_epi32(u[6], u[7]);
   s[10] = _mm_packs_epi32(u[8], u[9]);
   s[13] = _mm_packs_epi32(u[10], u[11]);
@@ -2031,7 +2019,7 @@ static void vp10_idct16_8col(__m128i *in) {
   s[7] = _mm_add_epi16(t[6], t[7]);
   s[8] = t[8];
   s[15] = t[15];
-  s[9]  = _mm_packs_epi32(u[8], u[9]);
+  s[9] = _mm_packs_epi32(u[8], u[9]);
   s[14] = _mm_packs_epi32(u[10], u[11]);
   s[10] = _mm_packs_epi32(u[12], u[13]);
   s[13] = _mm_packs_epi32(u[14], u[15]);
@@ -2155,7 +2143,7 @@ void vp10_iadst16_sse2(__m128i *in0, __m128i *in1) {
 }
 
 void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
-                               int stride) {
+                                int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
@@ -2177,11 +2165,11 @@ void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
 
   const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
   __m128i in[16], l[16];
-  __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6,
-          stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
-          stp1_8_0, stp1_12_0;
+  __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_8,
+      stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, stp1_8_0,
+      stp1_12_0;
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
-          stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14;
+      stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
   int i;
   // First 1-D inverse DCT
@@ -2213,7 +2201,7 @@ void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
     tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
     tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
 
-    stp2_8  = _mm_packs_epi32(tmp0, tmp2);
+    stp2_8 = _mm_packs_epi32(tmp0, tmp2);
     stp2_11 = _mm_packs_epi32(tmp5, tmp7);
   }
 
@@ -2277,9 +2265,9 @@ void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
     tmp2 = _mm_add_epi16(stp2_9, stp2_10);
     tmp3 = _mm_sub_epi16(stp2_9, stp2_10);
 
-    stp1_9  = _mm_unpacklo_epi64(tmp2, zero);
+    stp1_9 = _mm_unpacklo_epi64(tmp2, zero);
     stp1_10 = _mm_unpacklo_epi64(tmp3, zero);
-    stp1_8  = _mm_unpacklo_epi64(tmp0, zero);
+    stp1_8 = _mm_unpacklo_epi64(tmp0, zero);
     stp1_11 = _mm_unpacklo_epi64(tmp1, zero);
 
     stp1_13 = _mm_unpackhi_epi64(tmp3, zero);
@@ -2391,650 +2379,647 @@ void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
   }
 }
 
-#define LOAD_DQCOEFF(reg, input) \
-  {  \
-    reg = _mm_load_si128((const __m128i *) input); \
-    input += 8; \
-  }  \
-
-#define IDCT32_34 \
-/* Stage1 */ \
-{ \
-  const __m128i zero = _mm_setzero_si128();\
-  const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], zero); \
-  const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], zero); \
-  \
-  const __m128i lo_25_7= _mm_unpacklo_epi16(zero, in[7]); \
-  const __m128i hi_25_7 = _mm_unpackhi_epi16(zero, in[7]); \
-  \
-  const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], zero); \
-  const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], zero); \
-  \
-  const __m128i lo_29_3 = _mm_unpacklo_epi16(zero, in[3]); \
-  const __m128i hi_29_3 = _mm_unpackhi_epi16(zero, in[3]); \
-  \
-  MULTIPLICATION_AND_ADD_2(lo_1_31, hi_1_31, stg1_0, \
-                         stg1_1, stp1_16, stp1_31); \
-  MULTIPLICATION_AND_ADD_2(lo_25_7, hi_25_7, stg1_6, \
-                         stg1_7, stp1_19, stp1_28); \
-  MULTIPLICATION_AND_ADD_2(lo_5_27, hi_5_27, stg1_8, \
-                         stg1_9, stp1_20, stp1_27); \
-  MULTIPLICATION_AND_ADD_2(lo_29_3, hi_29_3, stg1_14, \
-                         stg1_15, stp1_23, stp1_24); \
-} \
-\
-/* Stage2 */ \
-{ \
-  const __m128i zero = _mm_setzero_si128();\
-  const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], zero); \
-  const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], zero); \
-  \
-  const __m128i lo_26_6 = _mm_unpacklo_epi16(zero, in[6]); \
-  const __m128i hi_26_6 = _mm_unpackhi_epi16(zero, in[6]); \
-  \
-  MULTIPLICATION_AND_ADD_2(lo_2_30, hi_2_30, stg2_0, \
-                         stg2_1, stp2_8, stp2_15); \
-  MULTIPLICATION_AND_ADD_2(lo_26_6, hi_26_6, stg2_6, \
-                         stg2_7, stp2_11, stp2_12); \
-  \
-  stp2_16 = stp1_16; \
-  stp2_19 = stp1_19; \
-  \
-  stp2_20 = stp1_20; \
-  stp2_23 = stp1_23; \
-  \
-  stp2_24 = stp1_24; \
-  stp2_27 = stp1_27; \
-  \
-  stp2_28 = stp1_28; \
-  stp2_31 = stp1_31; \
-} \
-\
-/* Stage3 */ \
-{ \
-  const __m128i zero = _mm_setzero_si128();\
-  const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], zero); \
-  const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], zero); \
-  \
-  const __m128i lo_17_30 = _mm_unpacklo_epi16(stp1_16, stp1_31); \
-  const __m128i hi_17_30 = _mm_unpackhi_epi16(stp1_16, stp1_31); \
-  const __m128i lo_18_29 = _mm_unpacklo_epi16(stp1_19, stp1_28); \
-  const __m128i hi_18_29 = _mm_unpackhi_epi16(stp1_19, stp1_28); \
-  \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp1_20, stp1_27); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp1_20, stp1_27); \
-  const __m128i lo_22_25 = _mm_unpacklo_epi16(stp1_23, stp1_24); \
-  const __m128i hi_22_25 = _mm_unpackhi_epi16(stp1_23, stp2_24); \
-  \
-  MULTIPLICATION_AND_ADD_2(lo_4_28, hi_4_28, stg3_0, \
-                         stg3_1, stp1_4, stp1_7); \
-  \
-  stp1_8 = stp2_8; \
-  stp1_11 = stp2_11; \
-  stp1_12 = stp2_12; \
-  stp1_15 = stp2_15; \
-  \
-  MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \
-                         stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, \
-                         stp1_18, stp1_29) \
-  MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \
-                         stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, \
-                         stp1_22, stp1_25) \
-  \
-  stp1_16 = stp2_16; \
-  stp1_31 = stp2_31; \
-  stp1_19 = stp2_19; \
-  stp1_20 = stp2_20; \
-  stp1_23 = stp2_23; \
-  stp1_24 = stp2_24; \
-  stp1_27 = stp2_27; \
-  stp1_28 = stp2_28; \
-} \
-\
-/* Stage4 */ \
-{ \
-  const __m128i zero = _mm_setzero_si128();\
-  const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], zero); \
-  const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], zero); \
-  \
-  const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp2_15); \
-  const __m128i hi_9_14 = _mm_unpackhi_epi16(stp2_8, stp2_15); \
-  const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp2_12); \
-  const __m128i hi_10_13 = _mm_unpackhi_epi16(stp2_11, stp2_12); \
-  \
-  MULTIPLICATION_AND_ADD_2(lo_0_16, hi_0_16, stg4_0, \
-                         stg4_1, stp2_0, stp2_1); \
-  \
-  stp2_4 = stp1_4; \
-  stp2_5 = stp1_4; \
-  stp2_6 = stp1_7; \
-  stp2_7 = stp1_7; \
-  \
-  MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \
-                         stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, \
-                         stp2_10, stp2_13) \
-  \
-  stp2_8 = stp1_8; \
-  stp2_15 = stp1_15; \
-  stp2_11 = stp1_11; \
-  stp2_12 = stp1_12; \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \
-  stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \
-  stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \
-  stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \
-  stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \
-  stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \
-  stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \
-  stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \
-  \
-  stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \
-  stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \
-  stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \
-  stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \
-  stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \
-  stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \
-  stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \
-} \
-\
-/* Stage5 */ \
-{ \
-  const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-  const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-  const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \
-  const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \
-  \
-  const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \
-  const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \
-  const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \
-  const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \
-  \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  \
-  stp1_0 = stp2_0; \
-  stp1_1 = stp2_1; \
-  stp1_2 = stp2_1; \
-  stp1_3 = stp2_0; \
-  \
-  tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
-  tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
-  tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
-  tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
-  \
-  tmp0 = _mm_add_epi32(tmp0, rounding); \
-  tmp1 = _mm_add_epi32(tmp1, rounding); \
-  tmp2 = _mm_add_epi32(tmp2, rounding); \
-  tmp3 = _mm_add_epi32(tmp3, rounding); \
-  \
-  tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-  tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-  tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-  tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-  \
-  stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-  stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-  \
-  stp1_4 = stp2_4; \
-  stp1_7 = stp2_7; \
-  \
-  stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \
-  stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \
-  stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \
-  stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \
-  stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \
-  stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \
-  stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \
-  stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \
-  \
-  stp1_16 = stp2_16; \
-  stp1_17 = stp2_17; \
-  \
-  MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \
-                         stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, \
-                         stp1_19, stp1_28) \
-  MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \
-                         stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  \
-  stp1_22 = stp2_22; \
-  stp1_23 = stp2_23; \
-  stp1_24 = stp2_24; \
-  stp1_25 = stp2_25; \
-  stp1_30 = stp2_30; \
-  stp1_31 = stp2_31; \
-} \
-\
-/* Stage6 */ \
-{ \
-  const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-  const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-  const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
-  const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
-  \
-  stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \
-  stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
-  stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
-  stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \
-  stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \
-  stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
-  stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
-  stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \
-  \
-  stp2_8 = stp1_8; \
-  stp2_9 = stp1_9; \
-  stp2_14 = stp1_14; \
-  stp2_15 = stp1_15; \
-  \
-  MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
-                         stg6_0, stg4_0, stg6_0, stg4_0, stp2_10, \
-                         stp2_13, stp2_11, stp2_12) \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \
-  stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \
-  stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \
-  stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \
-  stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \
-  stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \
-  stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \
-  stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \
-  \
-  stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \
-  stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \
-  stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \
-  stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \
-  stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \
-  stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \
-  stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \
-} \
-\
-/* Stage7 */ \
-{ \
-  const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \
-  const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  \
-  const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \
-  const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \
-  const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \
-  const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \
-  \
-  stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \
-  stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \
-  stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \
-  stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \
-  stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \
-  stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \
-  stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \
-  stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \
-  stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \
-  stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \
-  stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \
-  stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \
-  stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \
-  stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \
-  stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \
-  stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \
-  \
-  stp1_16 = stp2_16; \
-  stp1_17 = stp2_17; \
-  stp1_18 = stp2_18; \
-  stp1_19 = stp2_19; \
-  \
-  MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \
-                         stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \
-                         stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, \
-                         stp1_23, stp1_24) \
-  \
-  stp1_28 = stp2_28; \
-  stp1_29 = stp2_29; \
-  stp1_30 = stp2_30; \
-  stp1_31 = stp2_31; \
-}
+#define LOAD_DQCOEFF(reg, input)                  \
+  {                                               \
+    reg = _mm_load_si128((const __m128i *)input); \
+    input += 8;                                   \
+  }
 
+#define IDCT32_34                                                              \
+  /* Stage1 */                                                                 \
+  {                                                                            \
+    const __m128i zero = _mm_setzero_si128();                                  \
+    const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], zero);                   \
+    const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], zero);                   \
+                                                                               \
+    const __m128i lo_25_7 = _mm_unpacklo_epi16(zero, in[7]);                   \
+    const __m128i hi_25_7 = _mm_unpackhi_epi16(zero, in[7]);                   \
+                                                                               \
+    const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], zero);                   \
+    const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], zero);                   \
+                                                                               \
+    const __m128i lo_29_3 = _mm_unpacklo_epi16(zero, in[3]);                   \
+    const __m128i hi_29_3 = _mm_unpackhi_epi16(zero, in[3]);                   \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_1_31, hi_1_31, stg1_0, stg1_1, stp1_16,        \
+                             stp1_31);                                         \
+    MULTIPLICATION_AND_ADD_2(lo_25_7, hi_25_7, stg1_6, stg1_7, stp1_19,        \
+                             stp1_28);                                         \
+    MULTIPLICATION_AND_ADD_2(lo_5_27, hi_5_27, stg1_8, stg1_9, stp1_20,        \
+                             stp1_27);                                         \
+    MULTIPLICATION_AND_ADD_2(lo_29_3, hi_29_3, stg1_14, stg1_15, stp1_23,      \
+                             stp1_24);                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage2 */                                                                 \
+  {                                                                            \
+    const __m128i zero = _mm_setzero_si128();                                  \
+    const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], zero);                   \
+    const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], zero);                   \
+                                                                               \
+    const __m128i lo_26_6 = _mm_unpacklo_epi16(zero, in[6]);                   \
+    const __m128i hi_26_6 = _mm_unpackhi_epi16(zero, in[6]);                   \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_2_30, hi_2_30, stg2_0, stg2_1, stp2_8,         \
+                             stp2_15);                                         \
+    MULTIPLICATION_AND_ADD_2(lo_26_6, hi_26_6, stg2_6, stg2_7, stp2_11,        \
+                             stp2_12);                                         \
+                                                                               \
+    stp2_16 = stp1_16;                                                         \
+    stp2_19 = stp1_19;                                                         \
+                                                                               \
+    stp2_20 = stp1_20;                                                         \
+    stp2_23 = stp1_23;                                                         \
+                                                                               \
+    stp2_24 = stp1_24;                                                         \
+    stp2_27 = stp1_27;                                                         \
+                                                                               \
+    stp2_28 = stp1_28;                                                         \
+    stp2_31 = stp1_31;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage3 */                                                                 \
+  {                                                                            \
+    const __m128i zero = _mm_setzero_si128();                                  \
+    const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], zero);                   \
+    const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], zero);                   \
+                                                                               \
+    const __m128i lo_17_30 = _mm_unpacklo_epi16(stp1_16, stp1_31);             \
+    const __m128i hi_17_30 = _mm_unpackhi_epi16(stp1_16, stp1_31);             \
+    const __m128i lo_18_29 = _mm_unpacklo_epi16(stp1_19, stp1_28);             \
+    const __m128i hi_18_29 = _mm_unpackhi_epi16(stp1_19, stp1_28);             \
+                                                                               \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp1_20, stp1_27);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp1_20, stp1_27);             \
+    const __m128i lo_22_25 = _mm_unpacklo_epi16(stp1_23, stp1_24);             \
+    const __m128i hi_22_25 = _mm_unpackhi_epi16(stp1_23, stp2_24);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_4_28, hi_4_28, stg3_0, stg3_1, stp1_4,         \
+                             stp1_7);                                          \
+                                                                               \
+    stp1_8 = stp2_8;                                                           \
+    stp1_11 = stp2_11;                                                         \
+    stp1_12 = stp2_12;                                                         \
+    stp1_15 = stp2_15;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4,     \
+                           stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, stp1_18,  \
+                           stp1_29)                                            \
+    MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8,     \
+                           stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, stp1_22, \
+                           stp1_25)                                            \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_31 = stp2_31;                                                         \
+    stp1_19 = stp2_19;                                                         \
+    stp1_20 = stp2_20;                                                         \
+    stp1_23 = stp2_23;                                                         \
+    stp1_24 = stp2_24;                                                         \
+    stp1_27 = stp2_27;                                                         \
+    stp1_28 = stp2_28;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage4 */                                                                 \
+  {                                                                            \
+    const __m128i zero = _mm_setzero_si128();                                  \
+    const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], zero);                   \
+    const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], zero);                   \
+                                                                               \
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp2_15);               \
+    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp2_8, stp2_15);               \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp2_12);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp2_11, stp2_12);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_0_16, hi_0_16, stg4_0, stg4_1, stp2_0,         \
+                             stp2_1);                                          \
+                                                                               \
+    stp2_4 = stp1_4;                                                           \
+    stp2_5 = stp1_4;                                                           \
+    stp2_6 = stp1_7;                                                           \
+    stp2_7 = stp1_7;                                                           \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,       \
+                           stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, stp2_10,   \
+                           stp2_13)                                            \
+                                                                               \
+    stp2_8 = stp1_8;                                                           \
+    stp2_15 = stp1_15;                                                         \
+    stp2_11 = stp1_11;                                                         \
+    stp2_12 = stp1_12;                                                         \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_19);                                 \
+    stp2_17 = _mm_add_epi16(stp1_17, stp1_18);                                 \
+    stp2_18 = _mm_sub_epi16(stp1_17, stp1_18);                                 \
+    stp2_19 = _mm_sub_epi16(stp1_16, stp1_19);                                 \
+    stp2_20 = _mm_sub_epi16(stp1_23, stp1_20);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_22, stp1_21);                                 \
+    stp2_22 = _mm_add_epi16(stp1_22, stp1_21);                                 \
+    stp2_23 = _mm_add_epi16(stp1_23, stp1_20);                                 \
+                                                                               \
+    stp2_24 = _mm_add_epi16(stp1_24, stp1_27);                                 \
+    stp2_25 = _mm_add_epi16(stp1_25, stp1_26);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_25, stp1_26);                                 \
+    stp2_27 = _mm_sub_epi16(stp1_24, stp1_27);                                 \
+    stp2_28 = _mm_sub_epi16(stp1_31, stp1_28);                                 \
+    stp2_29 = _mm_sub_epi16(stp1_30, stp1_29);                                 \
+    stp2_30 = _mm_add_epi16(stp1_29, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_28, stp1_31);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage5 */                                                                 \
+  {                                                                            \
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);                 \
+    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);                 \
+    const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);             \
+    const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);             \
+                                                                               \
+    const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28);             \
+    const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28);             \
+    const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);             \
+    const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);             \
+                                                                               \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+                                                                               \
+    stp1_0 = stp2_0;                                                           \
+    stp1_1 = stp2_1;                                                           \
+    stp1_2 = stp2_1;                                                           \
+    stp1_3 = stp2_0;                                                           \
+                                                                               \
+    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);                                     \
+    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);                                     \
+    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);                                     \
+    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);                                     \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+                                                                               \
+    stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                      \
+    stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                      \
+                                                                               \
+    stp1_4 = stp2_4;                                                           \
+    stp1_7 = stp2_7;                                                           \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp2_8, stp2_11);                                   \
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);                                   \
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp2_8, stp2_11);                                  \
+    stp1_12 = _mm_sub_epi16(stp2_15, stp2_12);                                 \
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);                                 \
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);                                 \
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_12);                                 \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_17 = stp2_17;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4,     \
+                           stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, stp1_19,  \
+                           stp1_28)                                            \
+    MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6,     \
+                           stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, stp1_21,  \
+                           stp1_26)                                            \
+                                                                               \
+    stp1_22 = stp2_22;                                                         \
+    stp1_23 = stp2_23;                                                         \
+    stp1_24 = stp2_24;                                                         \
+    stp1_25 = stp2_25;                                                         \
+    stp1_30 = stp2_30;                                                         \
+    stp1_31 = stp2_31;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage6 */                                                                 \
+  {                                                                            \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);             \
+    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);             \
+                                                                               \
+    stp2_0 = _mm_add_epi16(stp1_0, stp1_7);                                    \
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6);                                    \
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5);                                    \
+    stp2_3 = _mm_add_epi16(stp1_3, stp1_4);                                    \
+    stp2_4 = _mm_sub_epi16(stp1_3, stp1_4);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);                                    \
+    stp2_7 = _mm_sub_epi16(stp1_0, stp1_7);                                    \
+                                                                               \
+    stp2_8 = stp1_8;                                                           \
+    stp2_9 = stp1_9;                                                           \
+    stp2_14 = stp1_14;                                                         \
+    stp2_15 = stp1_15;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_23);                                 \
+    stp2_17 = _mm_add_epi16(stp1_17, stp1_22);                                 \
+    stp2_18 = _mm_add_epi16(stp1_18, stp1_21);                                 \
+    stp2_19 = _mm_add_epi16(stp1_19, stp1_20);                                 \
+    stp2_20 = _mm_sub_epi16(stp1_19, stp1_20);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_18, stp1_21);                                 \
+    stp2_22 = _mm_sub_epi16(stp1_17, stp1_22);                                 \
+    stp2_23 = _mm_sub_epi16(stp1_16, stp1_23);                                 \
+                                                                               \
+    stp2_24 = _mm_sub_epi16(stp1_31, stp1_24);                                 \
+    stp2_25 = _mm_sub_epi16(stp1_30, stp1_25);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_29, stp1_26);                                 \
+    stp2_27 = _mm_sub_epi16(stp1_28, stp1_27);                                 \
+    stp2_28 = _mm_add_epi16(stp1_27, stp1_28);                                 \
+    stp2_29 = _mm_add_epi16(stp1_26, stp1_29);                                 \
+    stp2_30 = _mm_add_epi16(stp1_25, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_24, stp1_31);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage7 */                                                                 \
+  {                                                                            \
+    const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);             \
+    const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);             \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+                                                                               \
+    const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);             \
+    const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);             \
+    const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24);             \
+    const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24);             \
+                                                                               \
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_15);                                   \
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_14);                                   \
+    stp1_2 = _mm_add_epi16(stp2_2, stp2_13);                                   \
+    stp1_3 = _mm_add_epi16(stp2_3, stp2_12);                                   \
+    stp1_4 = _mm_add_epi16(stp2_4, stp2_11);                                   \
+    stp1_5 = _mm_add_epi16(stp2_5, stp2_10);                                   \
+    stp1_6 = _mm_add_epi16(stp2_6, stp2_9);                                    \
+    stp1_7 = _mm_add_epi16(stp2_7, stp2_8);                                    \
+    stp1_8 = _mm_sub_epi16(stp2_7, stp2_8);                                    \
+    stp1_9 = _mm_sub_epi16(stp2_6, stp2_9);                                    \
+    stp1_10 = _mm_sub_epi16(stp2_5, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp2_4, stp2_11);                                  \
+    stp1_12 = _mm_sub_epi16(stp2_3, stp2_12);                                  \
+    stp1_13 = _mm_sub_epi16(stp2_2, stp2_13);                                  \
+    stp1_14 = _mm_sub_epi16(stp2_1, stp2_14);                                  \
+    stp1_15 = _mm_sub_epi16(stp2_0, stp2_15);                                  \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_17 = stp2_17;                                                         \
+    stp1_18 = stp2_18;                                                         \
+    stp1_19 = stp2_19;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, stp1_21,  \
+                           stp1_26)                                            \
+    MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, stp1_23,  \
+                           stp1_24)                                            \
+                                                                               \
+    stp1_28 = stp2_28;                                                         \
+    stp1_29 = stp2_29;                                                         \
+    stp1_30 = stp2_30;                                                         \
+    stp1_31 = stp2_31;                                                         \
+  }
 
-#define IDCT32 \
-/* Stage1 */ \
-{ \
-  const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], in[31]); \
-  const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], in[31]); \
-  const __m128i lo_17_15 = _mm_unpacklo_epi16(in[17], in[15]); \
-  const __m128i hi_17_15 = _mm_unpackhi_epi16(in[17], in[15]); \
-  \
-  const __m128i lo_9_23 = _mm_unpacklo_epi16(in[9], in[23]); \
-  const __m128i hi_9_23 = _mm_unpackhi_epi16(in[9], in[23]); \
-  const __m128i lo_25_7= _mm_unpacklo_epi16(in[25], in[7]); \
-  const __m128i hi_25_7 = _mm_unpackhi_epi16(in[25], in[7]); \
-  \
-  const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], in[27]); \
-  const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], in[27]); \
-  const __m128i lo_21_11 = _mm_unpacklo_epi16(in[21], in[11]); \
-  const __m128i hi_21_11 = _mm_unpackhi_epi16(in[21], in[11]); \
-  \
-  const __m128i lo_13_19 = _mm_unpacklo_epi16(in[13], in[19]); \
-  const __m128i hi_13_19 = _mm_unpackhi_epi16(in[13], in[19]); \
-  const __m128i lo_29_3 = _mm_unpacklo_epi16(in[29], in[3]); \
-  const __m128i hi_29_3 = _mm_unpackhi_epi16(in[29], in[3]); \
-  \
-  MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0, \
-                         stg1_1, stg1_2, stg1_3, stp1_16, stp1_31, \
-                         stp1_17, stp1_30) \
-  MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4, \
-                         stg1_5, stg1_6, stg1_7, stp1_18, stp1_29, \
-                         stp1_19, stp1_28) \
-  MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8, \
-                         stg1_9, stg1_10, stg1_11, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12, \
-                         stg1_13, stg1_14, stg1_15, stp1_22, stp1_25, \
-                         stp1_23, stp1_24) \
-} \
-\
-/* Stage2 */ \
-{ \
-  const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], in[30]); \
-  const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], in[30]); \
-  const __m128i lo_18_14 = _mm_unpacklo_epi16(in[18], in[14]); \
-  const __m128i hi_18_14 = _mm_unpackhi_epi16(in[18], in[14]); \
-  \
-  const __m128i lo_10_22 = _mm_unpacklo_epi16(in[10], in[22]); \
-  const __m128i hi_10_22 = _mm_unpackhi_epi16(in[10], in[22]); \
-  const __m128i lo_26_6 = _mm_unpacklo_epi16(in[26], in[6]); \
-  const __m128i hi_26_6 = _mm_unpackhi_epi16(in[26], in[6]); \
-  \
-  MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0, \
-                         stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9, \
-                         stp2_14) \
-  MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4, \
-                         stg2_5, stg2_6, stg2_7, stp2_10, stp2_13, \
-                         stp2_11, stp2_12) \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_17); \
-  stp2_17 = _mm_sub_epi16(stp1_16, stp1_17); \
-  stp2_18 = _mm_sub_epi16(stp1_19, stp1_18); \
-  stp2_19 = _mm_add_epi16(stp1_19, stp1_18); \
-  \
-  stp2_20 = _mm_add_epi16(stp1_20, stp1_21); \
-  stp2_21 = _mm_sub_epi16(stp1_20, stp1_21); \
-  stp2_22 = _mm_sub_epi16(stp1_23, stp1_22); \
-  stp2_23 = _mm_add_epi16(stp1_23, stp1_22); \
-  \
-  stp2_24 = _mm_add_epi16(stp1_24, stp1_25); \
-  stp2_25 = _mm_sub_epi16(stp1_24, stp1_25); \
-  stp2_26 = _mm_sub_epi16(stp1_27, stp1_26); \
-  stp2_27 = _mm_add_epi16(stp1_27, stp1_26); \
-  \
-  stp2_28 = _mm_add_epi16(stp1_28, stp1_29); \
-  stp2_29 = _mm_sub_epi16(stp1_28, stp1_29); \
-  stp2_30 = _mm_sub_epi16(stp1_31, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_31, stp1_30); \
-} \
-\
-/* Stage3 */ \
-{ \
-  const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], in[28]); \
-  const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], in[28]); \
-  const __m128i lo_20_12 = _mm_unpacklo_epi16(in[20], in[12]); \
-  const __m128i hi_20_12 = _mm_unpackhi_epi16(in[20], in[12]); \
-  \
-  const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30); \
-  const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30); \
-  const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \
-  const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \
-  \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \
-  const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \
-  \
-  MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0, \
-                         stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5, \
-                         stp1_6) \
-  \
-  stp1_8 = _mm_add_epi16(stp2_8, stp2_9); \
-  stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \
-  stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \
-  stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \
-  stp1_12 = _mm_add_epi16(stp2_12, stp2_13); \
-  stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \
-  stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \
-  stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \
-  \
-  MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \
-                         stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, \
-                         stp1_18, stp1_29) \
-  MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \
-                         stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, \
-                         stp1_22, stp1_25) \
-  \
-  stp1_16 = stp2_16; \
-  stp1_31 = stp2_31; \
-  stp1_19 = stp2_19; \
-  stp1_20 = stp2_20; \
-  stp1_23 = stp2_23; \
-  stp1_24 = stp2_24; \
-  stp1_27 = stp2_27; \
-  stp1_28 = stp2_28; \
-} \
-\
-/* Stage4 */ \
-{ \
-  const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], in[16]); \
-  const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], in[16]); \
-  const __m128i lo_8_24 = _mm_unpacklo_epi16(in[8], in[24]); \
-  const __m128i hi_8_24 = _mm_unpackhi_epi16(in[8], in[24]); \
-  \
-  const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
-  const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
-  const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-  const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-  \
-  MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0, \
-                         stg4_1, stg4_2, stg4_3, stp2_0, stp2_1, \
-                         stp2_2, stp2_3) \
-  \
-  stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \
-  stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \
-  stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \
-  stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \
-  \
-  MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \
-                         stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, \
-                         stp2_10, stp2_13) \
-  \
-  stp2_8 = stp1_8; \
-  stp2_15 = stp1_15; \
-  stp2_11 = stp1_11; \
-  stp2_12 = stp1_12; \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \
-  stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \
-  stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \
-  stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \
-  stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \
-  stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \
-  stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \
-  stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \
-  \
-  stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \
-  stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \
-  stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \
-  stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \
-  stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \
-  stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \
-  stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \
-} \
-\
-/* Stage5 */ \
-{ \
-  const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-  const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-  const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \
-  const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \
-  \
-  const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \
-  const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \
-  const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \
-  const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \
-  \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  \
-  stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \
-  stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \
-  stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \
-  stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \
-  \
-  tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
-  tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
-  tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
-  tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
-  \
-  tmp0 = _mm_add_epi32(tmp0, rounding); \
-  tmp1 = _mm_add_epi32(tmp1, rounding); \
-  tmp2 = _mm_add_epi32(tmp2, rounding); \
-  tmp3 = _mm_add_epi32(tmp3, rounding); \
-  \
-  tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-  tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-  tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-  tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-  \
-  stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-  stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-  \
-  stp1_4 = stp2_4; \
-  stp1_7 = stp2_7; \
-  \
-  stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \
-  stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \
-  stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \
-  stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \
-  stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \
-  stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \
-  stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \
-  stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \
-  \
-  stp1_16 = stp2_16; \
-  stp1_17 = stp2_17; \
-  \
-  MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \
-                         stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, \
-                         stp1_19, stp1_28) \
-  MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \
-                         stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  \
-  stp1_22 = stp2_22; \
-  stp1_23 = stp2_23; \
-  stp1_24 = stp2_24; \
-  stp1_25 = stp2_25; \
-  stp1_30 = stp2_30; \
-  stp1_31 = stp2_31; \
-} \
-\
-/* Stage6 */ \
-{ \
-  const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-  const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-  const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
-  const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
-  \
-  stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \
-  stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
-  stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
-  stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \
-  stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \
-  stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
-  stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
-  stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \
-  \
-  stp2_8 = stp1_8; \
-  stp2_9 = stp1_9; \
-  stp2_14 = stp1_14; \
-  stp2_15 = stp1_15; \
-  \
-  MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
-                         stg6_0, stg4_0, stg6_0, stg4_0, stp2_10, \
-                         stp2_13, stp2_11, stp2_12) \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \
-  stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \
-  stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \
-  stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \
-  stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \
-  stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \
-  stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \
-  stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \
-  \
-  stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \
-  stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \
-  stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \
-  stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \
-  stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \
-  stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \
-  stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \
-} \
-\
-/* Stage7 */ \
-{ \
-  const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \
-  const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  \
-  const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \
-  const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \
-  const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \
-  const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \
-  \
-  stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \
-  stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \
-  stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \
-  stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \
-  stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \
-  stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \
-  stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \
-  stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \
-  stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \
-  stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \
-  stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \
-  stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \
-  stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \
-  stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \
-  stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \
-  stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \
-  \
-  stp1_16 = stp2_16; \
-  stp1_17 = stp2_17; \
-  stp1_18 = stp2_18; \
-  stp1_19 = stp2_19; \
-  \
-  MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \
-                         stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \
-                         stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, \
-                         stp1_23, stp1_24) \
-  \
-  stp1_28 = stp2_28; \
-  stp1_29 = stp2_29; \
-  stp1_30 = stp2_30; \
-  stp1_31 = stp2_31; \
-}
+#define IDCT32                                                                 \
+  /* Stage1 */                                                                 \
+  {                                                                            \
+    const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], in[31]);                 \
+    const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], in[31]);                 \
+    const __m128i lo_17_15 = _mm_unpacklo_epi16(in[17], in[15]);               \
+    const __m128i hi_17_15 = _mm_unpackhi_epi16(in[17], in[15]);               \
+                                                                               \
+    const __m128i lo_9_23 = _mm_unpacklo_epi16(in[9], in[23]);                 \
+    const __m128i hi_9_23 = _mm_unpackhi_epi16(in[9], in[23]);                 \
+    const __m128i lo_25_7 = _mm_unpacklo_epi16(in[25], in[7]);                 \
+    const __m128i hi_25_7 = _mm_unpackhi_epi16(in[25], in[7]);                 \
+                                                                               \
+    const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], in[27]);                 \
+    const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], in[27]);                 \
+    const __m128i lo_21_11 = _mm_unpacklo_epi16(in[21], in[11]);               \
+    const __m128i hi_21_11 = _mm_unpackhi_epi16(in[21], in[11]);               \
+                                                                               \
+    const __m128i lo_13_19 = _mm_unpacklo_epi16(in[13], in[19]);               \
+    const __m128i hi_13_19 = _mm_unpackhi_epi16(in[13], in[19]);               \
+    const __m128i lo_29_3 = _mm_unpacklo_epi16(in[29], in[3]);                 \
+    const __m128i hi_29_3 = _mm_unpackhi_epi16(in[29], in[3]);                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0,       \
+                           stg1_1, stg1_2, stg1_3, stp1_16, stp1_31, stp1_17,  \
+                           stp1_30)                                            \
+    MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4, stg1_5, \
+                           stg1_6, stg1_7, stp1_18, stp1_29, stp1_19, stp1_28) \
+    MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8,       \
+                           stg1_9, stg1_10, stg1_11, stp1_20, stp1_27,         \
+                           stp1_21, stp1_26)                                   \
+    MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12,      \
+                           stg1_13, stg1_14, stg1_15, stp1_22, stp1_25,        \
+                           stp1_23, stp1_24)                                   \
+  }                                                                            \
+                                                                               \
+  /* Stage2 */                                                                 \
+  {                                                                            \
+    const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], in[30]);                 \
+    const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], in[30]);                 \
+    const __m128i lo_18_14 = _mm_unpacklo_epi16(in[18], in[14]);               \
+    const __m128i hi_18_14 = _mm_unpackhi_epi16(in[18], in[14]);               \
+                                                                               \
+    const __m128i lo_10_22 = _mm_unpacklo_epi16(in[10], in[22]);               \
+    const __m128i hi_10_22 = _mm_unpackhi_epi16(in[10], in[22]);               \
+    const __m128i lo_26_6 = _mm_unpacklo_epi16(in[26], in[6]);                 \
+    const __m128i hi_26_6 = _mm_unpackhi_epi16(in[26], in[6]);                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0,       \
+                           stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9,    \
+                           stp2_14)                                            \
+    MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4,       \
+                           stg2_5, stg2_6, stg2_7, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_17);                                 \
+    stp2_17 = _mm_sub_epi16(stp1_16, stp1_17);                                 \
+    stp2_18 = _mm_sub_epi16(stp1_19, stp1_18);                                 \
+    stp2_19 = _mm_add_epi16(stp1_19, stp1_18);                                 \
+                                                                               \
+    stp2_20 = _mm_add_epi16(stp1_20, stp1_21);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_20, stp1_21);                                 \
+    stp2_22 = _mm_sub_epi16(stp1_23, stp1_22);                                 \
+    stp2_23 = _mm_add_epi16(stp1_23, stp1_22);                                 \
+                                                                               \
+    stp2_24 = _mm_add_epi16(stp1_24, stp1_25);                                 \
+    stp2_25 = _mm_sub_epi16(stp1_24, stp1_25);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_27, stp1_26);                                 \
+    stp2_27 = _mm_add_epi16(stp1_27, stp1_26);                                 \
+                                                                               \
+    stp2_28 = _mm_add_epi16(stp1_28, stp1_29);                                 \
+    stp2_29 = _mm_sub_epi16(stp1_28, stp1_29);                                 \
+    stp2_30 = _mm_sub_epi16(stp1_31, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_31, stp1_30);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage3 */                                                                 \
+  {                                                                            \
+    const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], in[28]);                 \
+    const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], in[28]);                 \
+    const __m128i lo_20_12 = _mm_unpacklo_epi16(in[20], in[12]);               \
+    const __m128i hi_20_12 = _mm_unpackhi_epi16(in[20], in[12]);               \
+                                                                               \
+    const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30);             \
+    const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30);             \
+    const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);             \
+    const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);             \
+                                                                               \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+    const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);             \
+    const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0,       \
+                           stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5,     \
+                           stp1_6)                                             \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp2_8, stp2_9);                                    \
+    stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);                                    \
+    stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);                                 \
+    stp1_11 = _mm_add_epi16(stp2_11, stp2_10);                                 \
+    stp1_12 = _mm_add_epi16(stp2_12, stp2_13);                                 \
+    stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);                                 \
+    stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);                                 \
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_14);                                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4,     \
+                           stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, stp1_18,  \
+                           stp1_29)                                            \
+    MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8,     \
+                           stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, stp1_22, \
+                           stp1_25)                                            \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_31 = stp2_31;                                                         \
+    stp1_19 = stp2_19;                                                         \
+    stp1_20 = stp2_20;                                                         \
+    stp1_23 = stp2_23;                                                         \
+    stp1_24 = stp2_24;                                                         \
+    stp1_27 = stp2_27;                                                         \
+    stp1_28 = stp2_28;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage4 */                                                                 \
+  {                                                                            \
+    const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], in[16]);                 \
+    const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], in[16]);                 \
+    const __m128i lo_8_24 = _mm_unpacklo_epi16(in[8], in[24]);                 \
+    const __m128i hi_8_24 = _mm_unpackhi_epi16(in[8], in[24]);                 \
+                                                                               \
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);               \
+    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);               \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0, stg4_1, \
+                           stg4_2, stg4_3, stp2_0, stp2_1, stp2_2, stp2_3)     \
+                                                                               \
+    stp2_4 = _mm_add_epi16(stp1_4, stp1_5);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);                                    \
+    stp2_7 = _mm_add_epi16(stp1_7, stp1_6);                                    \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,       \
+                           stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, stp2_10,   \
+                           stp2_13)                                            \
+                                                                               \
+    stp2_8 = stp1_8;                                                           \
+    stp2_15 = stp1_15;                                                         \
+    stp2_11 = stp1_11;                                                         \
+    stp2_12 = stp1_12;                                                         \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_19);                                 \
+    stp2_17 = _mm_add_epi16(stp1_17, stp1_18);                                 \
+    stp2_18 = _mm_sub_epi16(stp1_17, stp1_18);                                 \
+    stp2_19 = _mm_sub_epi16(stp1_16, stp1_19);                                 \
+    stp2_20 = _mm_sub_epi16(stp1_23, stp1_20);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_22, stp1_21);                                 \
+    stp2_22 = _mm_add_epi16(stp1_22, stp1_21);                                 \
+    stp2_23 = _mm_add_epi16(stp1_23, stp1_20);                                 \
+                                                                               \
+    stp2_24 = _mm_add_epi16(stp1_24, stp1_27);                                 \
+    stp2_25 = _mm_add_epi16(stp1_25, stp1_26);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_25, stp1_26);                                 \
+    stp2_27 = _mm_sub_epi16(stp1_24, stp1_27);                                 \
+    stp2_28 = _mm_sub_epi16(stp1_31, stp1_28);                                 \
+    stp2_29 = _mm_sub_epi16(stp1_30, stp1_29);                                 \
+    stp2_30 = _mm_add_epi16(stp1_29, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_28, stp1_31);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage5 */                                                                 \
+  {                                                                            \
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);                 \
+    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);                 \
+    const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);             \
+    const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);             \
+                                                                               \
+    const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28);             \
+    const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28);             \
+    const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);             \
+    const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);             \
+                                                                               \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+                                                                               \
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_3);                                    \
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_2);                                    \
+    stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);                                    \
+    stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);                                    \
+                                                                               \
+    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);                                     \
+    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);                                     \
+    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);                                     \
+    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);                                     \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+                                                                               \
+    stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                      \
+    stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                      \
+                                                                               \
+    stp1_4 = stp2_4;                                                           \
+    stp1_7 = stp2_7;                                                           \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp2_8, stp2_11);                                   \
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);                                   \
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp2_8, stp2_11);                                  \
+    stp1_12 = _mm_sub_epi16(stp2_15, stp2_12);                                 \
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);                                 \
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);                                 \
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_12);                                 \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_17 = stp2_17;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4,     \
+                           stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, stp1_19,  \
+                           stp1_28)                                            \
+    MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6,     \
+                           stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, stp1_21,  \
+                           stp1_26)                                            \
+                                                                               \
+    stp1_22 = stp2_22;                                                         \
+    stp1_23 = stp2_23;                                                         \
+    stp1_24 = stp2_24;                                                         \
+    stp1_25 = stp2_25;                                                         \
+    stp1_30 = stp2_30;                                                         \
+    stp1_31 = stp2_31;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage6 */                                                                 \
+  {                                                                            \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);             \
+    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);             \
+                                                                               \
+    stp2_0 = _mm_add_epi16(stp1_0, stp1_7);                                    \
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6);                                    \
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5);                                    \
+    stp2_3 = _mm_add_epi16(stp1_3, stp1_4);                                    \
+    stp2_4 = _mm_sub_epi16(stp1_3, stp1_4);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);                                    \
+    stp2_7 = _mm_sub_epi16(stp1_0, stp1_7);                                    \
+                                                                               \
+    stp2_8 = stp1_8;                                                           \
+    stp2_9 = stp1_9;                                                           \
+    stp2_14 = stp1_14;                                                         \
+    stp2_15 = stp1_15;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_23);                                 \
+    stp2_17 = _mm_add_epi16(stp1_17, stp1_22);                                 \
+    stp2_18 = _mm_add_epi16(stp1_18, stp1_21);                                 \
+    stp2_19 = _mm_add_epi16(stp1_19, stp1_20);                                 \
+    stp2_20 = _mm_sub_epi16(stp1_19, stp1_20);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_18, stp1_21);                                 \
+    stp2_22 = _mm_sub_epi16(stp1_17, stp1_22);                                 \
+    stp2_23 = _mm_sub_epi16(stp1_16, stp1_23);                                 \
+                                                                               \
+    stp2_24 = _mm_sub_epi16(stp1_31, stp1_24);                                 \
+    stp2_25 = _mm_sub_epi16(stp1_30, stp1_25);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_29, stp1_26);                                 \
+    stp2_27 = _mm_sub_epi16(stp1_28, stp1_27);                                 \
+    stp2_28 = _mm_add_epi16(stp1_27, stp1_28);                                 \
+    stp2_29 = _mm_add_epi16(stp1_26, stp1_29);                                 \
+    stp2_30 = _mm_add_epi16(stp1_25, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_24, stp1_31);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage7 */                                                                 \
+  {                                                                            \
+    const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);             \
+    const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);             \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+                                                                               \
+    const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);             \
+    const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);             \
+    const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24);             \
+    const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24);             \
+                                                                               \
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_15);                                   \
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_14);                                   \
+    stp1_2 = _mm_add_epi16(stp2_2, stp2_13);                                   \
+    stp1_3 = _mm_add_epi16(stp2_3, stp2_12);                                   \
+    stp1_4 = _mm_add_epi16(stp2_4, stp2_11);                                   \
+    stp1_5 = _mm_add_epi16(stp2_5, stp2_10);                                   \
+    stp1_6 = _mm_add_epi16(stp2_6, stp2_9);                                    \
+    stp1_7 = _mm_add_epi16(stp2_7, stp2_8);                                    \
+    stp1_8 = _mm_sub_epi16(stp2_7, stp2_8);                                    \
+    stp1_9 = _mm_sub_epi16(stp2_6, stp2_9);                                    \
+    stp1_10 = _mm_sub_epi16(stp2_5, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp2_4, stp2_11);                                  \
+    stp1_12 = _mm_sub_epi16(stp2_3, stp2_12);                                  \
+    stp1_13 = _mm_sub_epi16(stp2_2, stp2_13);                                  \
+    stp1_14 = _mm_sub_epi16(stp2_1, stp2_14);                                  \
+    stp1_15 = _mm_sub_epi16(stp2_0, stp2_15);                                  \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_17 = stp2_17;                                                         \
+    stp1_18 = stp2_18;                                                         \
+    stp1_19 = stp2_19;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, stp1_21,  \
+                           stp1_26)                                            \
+    MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, stp1_23,  \
+                           stp1_24)                                            \
+                                                                               \
+    stp1_28 = stp2_28;                                                         \
+    stp1_29 = stp2_29;                                                         \
+    stp1_30 = stp2_30;                                                         \
+    stp1_31 = stp2_31;                                                         \
+  }
 
 // Only upper-left 8x8 has non-zero coeff
 void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
-                               int stride) {
+                                int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
-  const __m128i final_rounding = _mm_set1_epi16(1<<5);
+  const __m128i final_rounding = _mm_set1_epi16(1 << 5);
 
   // vp10_idct constants for each stage
   const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
@@ -3070,15 +3055,13 @@ void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
 
   __m128i in[32], col[32];
   __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
-          stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
-          stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22,
-          stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29,
-          stp1_30, stp1_31;
+      stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+      stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, stp1_23,
+      stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, stp1_30, stp1_31;
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
-          stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
-          stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22,
-          stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29,
-          stp2_30, stp2_31;
+      stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
+      stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, stp2_23,
+      stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, stp2_30, stp2_31;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
   int i;
 
@@ -3191,7 +3174,7 @@ void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
 }
 
 void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
-                                 int stride) {
+                                  int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
@@ -3246,15 +3229,13 @@ void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
 
   __m128i in[32], col[128], zero_idx[16];
   __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
-          stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
-          stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22,
-          stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29,
-          stp1_30, stp1_31;
+      stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+      stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, stp1_23,
+      stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, stp1_30, stp1_31;
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
-          stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
-          stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22,
-          stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29,
-          stp2_30, stp2_31;
+      stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
+      stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, stp2_23,
+      stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, stp2_30, stp2_31;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
   int i, j, i32;
 
@@ -3466,8 +3447,7 @@ void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct32x32_1_add_sse2(const int16_t *input,
-                               uint8_t *dest,
+void vp10_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest,
                                int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
@@ -3503,7 +3483,7 @@ static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
 }
 
 void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int bd) {
+                                     int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -3607,8 +3587,7 @@ void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
     tran_low_t temp_in[4], temp_out[4];
     // Columns
     for (i = 0; i < 4; ++i) {
-      for (j = 0; j < 4; ++j)
-        temp_in[j] = out[j * 4 + i];
+      for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
       vp10_highbd_idct4_c(temp_in, temp_out, bd);
       for (j = 0; j < 4; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
@@ -3619,7 +3598,7 @@ void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int bd) {
+                                     int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3697,19 +3676,18 @@ void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
       __m128i d[8];
       for (i = 0; i < 8; i++) {
         inptr[i] = _mm_add_epi16(inptr[i], sixteen);
-        d[i] = _mm_loadu_si128((const __m128i *)(dest + stride*i));
+        d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
         inptr[i] = _mm_srai_epi16(inptr[i], 5);
         d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd);
         // Store
-        _mm_storeu_si128((__m128i *)(dest + stride*i), d[i]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]);
       }
     }
   } else {
     // Run the un-optimised column transform
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
-      for (j = 0; j < 8; ++j)
-        temp_in[j] = out[j * 8 + i];
+      for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
       vp10_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
@@ -3720,7 +3698,7 @@ void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                    int stride, int bd) {
+                                     int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3801,19 +3779,18 @@ void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
       __m128i d[8];
       for (i = 0; i < 8; i++) {
         inptr[i] = _mm_add_epi16(inptr[i], sixteen);
-        d[i] = _mm_loadu_si128((const __m128i *)(dest + stride*i));
+        d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
         inptr[i] = _mm_srai_epi16(inptr[i], 5);
         d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd);
         // Store
-        _mm_storeu_si128((__m128i *)(dest + stride*i), d[i]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]);
       }
     }
   } else {
     // Run the un-optimised column transform
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
-      for (j = 0; j < 8; ++j)
-        temp_in[j] = out[j * 8 + i];
+      for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
       vp10_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
@@ -3824,7 +3801,7 @@ void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                       int stride, int bd) {
+                                        int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
   int i, j, test;
@@ -3909,25 +3886,24 @@ void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
     {
       __m128i d[2];
       for (i = 0; i < 16; i++) {
-        inptr[i   ] = _mm_add_epi16(inptr[i   ], rounding);
-        inptr[i+16] = _mm_add_epi16(inptr[i+16], rounding);
-        d[0] = _mm_loadu_si128((const __m128i *)(dest + stride*i));
-        d[1] = _mm_loadu_si128((const __m128i *)(dest + stride*i + 8));
-        inptr[i   ] = _mm_srai_epi16(inptr[i   ], 6);
-        inptr[i+16] = _mm_srai_epi16(inptr[i+16], 6);
-        d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i   ]), bd);
-        d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i+16]), bd);
+        inptr[i] = _mm_add_epi16(inptr[i], rounding);
+        inptr[i + 16] = _mm_add_epi16(inptr[i + 16], rounding);
+        d[0] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
+        d[1] = _mm_loadu_si128((const __m128i *)(dest + stride * i + 8));
+        inptr[i] = _mm_srai_epi16(inptr[i], 6);
+        inptr[i + 16] = _mm_srai_epi16(inptr[i + 16], 6);
+        d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i]), bd);
+        d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i + 16]), bd);
         // Store
-        _mm_storeu_si128((__m128i *)(dest + stride*i), d[0]);
-        _mm_storeu_si128((__m128i *)(dest + stride*i + 8), d[1]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i), d[0]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i + 8), d[1]);
       }
     }
   } else {
     // Run the un-optimised column transform
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
-      for (j = 0; j < 16; ++j)
-        temp_in[j] = out[j * 16 + i];
+      for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
       vp10_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
@@ -3938,7 +3914,7 @@ void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
 }
 
 void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
-                                      int stride, int bd) {
+                                       int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
   int i, j, test;
@@ -4028,25 +4004,24 @@ void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
     {
       __m128i d[2];
       for (i = 0; i < 16; i++) {
-        inptr[i   ] = _mm_add_epi16(inptr[i   ], rounding);
-        inptr[i+16] = _mm_add_epi16(inptr[i+16], rounding);
-        d[0] = _mm_loadu_si128((const __m128i *)(dest + stride*i));
-        d[1] = _mm_loadu_si128((const __m128i *)(dest + stride*i + 8));
-        inptr[i   ] = _mm_srai_epi16(inptr[i   ], 6);
-        inptr[i+16] = _mm_srai_epi16(inptr[i+16], 6);
-        d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i   ]), bd);
-        d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i+16]), bd);
+        inptr[i] = _mm_add_epi16(inptr[i], rounding);
+        inptr[i + 16] = _mm_add_epi16(inptr[i + 16], rounding);
+        d[0] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
+        d[1] = _mm_loadu_si128((const __m128i *)(dest + stride * i + 8));
+        inptr[i] = _mm_srai_epi16(inptr[i], 6);
+        inptr[i + 16] = _mm_srai_epi16(inptr[i + 16], 6);
+        d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i]), bd);
+        d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i + 16]), bd);
         // Store
-        _mm_storeu_si128((__m128i *)(dest + stride*i), d[0]);
-        _mm_storeu_si128((__m128i *)(dest + stride*i + 8), d[1]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i), d[0]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i + 8), d[1]);
       }
     }
   } else {
     // Run the un-optimised column transform
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
-      for (j = 0; j < 16; ++j)
-        temp_in[j] = out[j * 16 + i];
+      for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
       vp10_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
diff --git a/vp10/common/x86/vp10_inv_txfm_sse2.h b/vp10/common/x86/vp10_inv_txfm_sse2.h
index b79781aeeb277d1ee57e2cfacd3b44196ca67f4a..9d8c46bfc0fb5be5d64348111bd58908ca059036 100644
--- a/vp10/common/x86/vp10_inv_txfm_sse2.h
+++ b/vp10/common/x86/vp10_inv_txfm_sse2.h
@@ -46,16 +46,16 @@ static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
   res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
 }
 
-#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \
+#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1)   \
   {                                                     \
     const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
     const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
                                                         \
-    in0 = _mm_unpacklo_epi32(tr0_0, tr0_1);  /* i1 i0 */  \
-    in1 = _mm_unpackhi_epi32(tr0_0, tr0_1);  /* i3 i2 */  \
+    in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \
+    in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \
   }
 
-static INLINE void array_transpose_4X8(__m128i *in, __m128i * out) {
+static INLINE void array_transpose_4X8(__m128i *in, __m128i *out) {
   const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
   const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
   const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
@@ -90,36 +90,36 @@ static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
 }
 
 static INLINE void load_buffer_8x16(const int16_t *input, __m128i *in) {
-  in[0]  = _mm_load_si128((const __m128i *)(input + 0 * 16));
-  in[1]  = _mm_load_si128((const __m128i *)(input + 1 * 16));
-  in[2]  = _mm_load_si128((const __m128i *)(input + 2 * 16));
-  in[3]  = _mm_load_si128((const __m128i *)(input + 3 * 16));
-  in[4]  = _mm_load_si128((const __m128i *)(input + 4 * 16));
-  in[5]  = _mm_load_si128((const __m128i *)(input + 5 * 16));
-  in[6]  = _mm_load_si128((const __m128i *)(input + 6 * 16));
-  in[7]  = _mm_load_si128((const __m128i *)(input + 7 * 16));
-
-  in[8]  = _mm_load_si128((const __m128i *)(input + 8 * 16));
-  in[9]  = _mm_load_si128((const __m128i *)(input + 9 * 16));
-  in[10]  = _mm_load_si128((const __m128i *)(input + 10 * 16));
-  in[11]  = _mm_load_si128((const __m128i *)(input + 11 * 16));
-  in[12]  = _mm_load_si128((const __m128i *)(input + 12 * 16));
-  in[13]  = _mm_load_si128((const __m128i *)(input + 13 * 16));
-  in[14]  = _mm_load_si128((const __m128i *)(input + 14 * 16));
-  in[15]  = _mm_load_si128((const __m128i *)(input + 15 * 16));
+  in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16));
+  in[1] = _mm_load_si128((const __m128i *)(input + 1 * 16));
+  in[2] = _mm_load_si128((const __m128i *)(input + 2 * 16));
+  in[3] = _mm_load_si128((const __m128i *)(input + 3 * 16));
+  in[4] = _mm_load_si128((const __m128i *)(input + 4 * 16));
+  in[5] = _mm_load_si128((const __m128i *)(input + 5 * 16));
+  in[6] = _mm_load_si128((const __m128i *)(input + 6 * 16));
+  in[7] = _mm_load_si128((const __m128i *)(input + 7 * 16));
+
+  in[8] = _mm_load_si128((const __m128i *)(input + 8 * 16));
+  in[9] = _mm_load_si128((const __m128i *)(input + 9 * 16));
+  in[10] = _mm_load_si128((const __m128i *)(input + 10 * 16));
+  in[11] = _mm_load_si128((const __m128i *)(input + 11 * 16));
+  in[12] = _mm_load_si128((const __m128i *)(input + 12 * 16));
+  in[13] = _mm_load_si128((const __m128i *)(input + 13 * 16));
+  in[14] = _mm_load_si128((const __m128i *)(input + 14 * 16));
+  in[15] = _mm_load_si128((const __m128i *)(input + 15 * 16));
 }
 
-#define RECON_AND_STORE(dest, in_x) \
-  {                                                     \
-     __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
-      d0 = _mm_unpacklo_epi8(d0, zero); \
-      d0 = _mm_add_epi16(in_x, d0); \
-      d0 = _mm_packus_epi16(d0, d0); \
-      _mm_storel_epi64((__m128i *)(dest), d0); \
+#define RECON_AND_STORE(dest, in_x)                  \
+  {                                                  \
+    __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
+    d0 = _mm_unpacklo_epi8(d0, zero);                \
+    d0 = _mm_add_epi16(in_x, d0);                    \
+    d0 = _mm_packus_epi16(d0, d0);                   \
+    _mm_storel_epi64((__m128i *)(dest), d0);         \
   }
 
 static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
-  const __m128i final_rounding = _mm_set1_epi16(1<<5);
+  const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
   // Final rounding and shift
   in[0] = _mm_adds_epi16(in[0], final_rounding);
@@ -156,16 +156,16 @@ static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
   in[14] = _mm_srai_epi16(in[14], 6);
   in[15] = _mm_srai_epi16(in[15], 6);
 
-  RECON_AND_STORE(dest +  0 * stride, in[0]);
-  RECON_AND_STORE(dest +  1 * stride, in[1]);
-  RECON_AND_STORE(dest +  2 * stride, in[2]);
-  RECON_AND_STORE(dest +  3 * stride, in[3]);
-  RECON_AND_STORE(dest +  4 * stride, in[4]);
-  RECON_AND_STORE(dest +  5 * stride, in[5]);
-  RECON_AND_STORE(dest +  6 * stride, in[6]);
-  RECON_AND_STORE(dest +  7 * stride, in[7]);
-  RECON_AND_STORE(dest +  8 * stride, in[8]);
-  RECON_AND_STORE(dest +  9 * stride, in[9]);
+  RECON_AND_STORE(dest + 0 * stride, in[0]);
+  RECON_AND_STORE(dest + 1 * stride, in[1]);
+  RECON_AND_STORE(dest + 2 * stride, in[2]);
+  RECON_AND_STORE(dest + 3 * stride, in[3]);
+  RECON_AND_STORE(dest + 4 * stride, in[4]);
+  RECON_AND_STORE(dest + 5 * stride, in[5]);
+  RECON_AND_STORE(dest + 6 * stride, in[6]);
+  RECON_AND_STORE(dest + 7 * stride, in[7]);
+  RECON_AND_STORE(dest + 8 * stride, in[8]);
+  RECON_AND_STORE(dest + 9 * stride, in[9]);
   RECON_AND_STORE(dest + 10 * stride, in[10]);
   RECON_AND_STORE(dest + 11 * stride, in[11]);
   RECON_AND_STORE(dest + 12 * stride, in[12]);
diff --git a/vp10/decoder/decodeframe.c b/vp10/decoder/decodeframe.c
index 6b5d1463031c115a626a96ce81f58da13114cb60..719c061419a69a30447685729993e5837d459518 100644
--- a/vp10/decoder/decodeframe.c
+++ b/vp10/decoder/decodeframe.c
@@ -47,23 +47,21 @@
 
 static int is_compound_reference_allowed(const VP10_COMMON *cm) {
   int i;
-  if (frame_is_intra_only(cm))
-    return 0;
+  if (frame_is_intra_only(cm)) return 0;
   for (i = 1; i < REFS_PER_FRAME; ++i)
-    if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
-      return 1;
+    if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) return 1;
 
   return 0;
 }
 
 static void setup_compound_reference_mode(VP10_COMMON *cm) {
   if (cm->ref_frame_sign_bias[LAST_FRAME] ==
-          cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
+      cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
     cm->comp_fixed_ref = ALTREF_FRAME;
     cm->comp_var_ref[0] = LAST_FRAME;
     cm->comp_var_ref[1] = GOLDEN_FRAME;
   } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
-                 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
+             cm->ref_frame_sign_bias[ALTREF_FRAME]) {
     cm->comp_fixed_ref = GOLDEN_FRAME;
     cm->comp_var_ref[0] = LAST_FRAME;
     cm->comp_var_ref[1] = ALTREF_FRAME;
@@ -90,8 +88,7 @@ static TX_MODE read_tx_mode(struct vpx_read_bit_buffer *rb) {
 #else
 static TX_MODE read_tx_mode(vpx_reader *r) {
   TX_MODE tx_mode = vpx_read_literal(r, 2);
-  if (tx_mode == ALLOW_32X32)
-    tx_mode += vpx_read_bit(r);
+  if (tx_mode == ALLOW_32X32) tx_mode += vpx_read_bit(r);
   return tx_mode;
 }
 #endif
@@ -127,12 +124,12 @@ static void read_inter_mode_probs(FRAME_CONTEXT *fc, vpx_reader *r) {
 }
 
 #if CONFIG_MISC_FIXES
-static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm,
-    struct vpx_read_bit_buffer *rb) {
+static REFERENCE_MODE read_frame_reference_mode(
+    const VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
   if (is_compound_reference_allowed(cm)) {
-    return vpx_rb_read_bit(rb) ? REFERENCE_MODE_SELECT
-                               : (vpx_rb_read_bit(rb) ? COMPOUND_REFERENCE
-                                                      : SINGLE_REFERENCE);
+    return vpx_rb_read_bit(rb)
+               ? REFERENCE_MODE_SELECT
+               : (vpx_rb_read_bit(rb) ? COMPOUND_REFERENCE : SINGLE_REFERENCE);
   } else {
     return SINGLE_REFERENCE;
   }
@@ -141,9 +138,9 @@ static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm,
 static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm,
                                                 vpx_reader *r) {
   if (is_compound_reference_allowed(cm)) {
-    return vpx_read_bit(r) ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT
-                                              : COMPOUND_REFERENCE)
-                           : SINGLE_REFERENCE;
+    return vpx_read_bit(r)
+               ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT : COMPOUND_REFERENCE)
+               : SINGLE_REFERENCE;
   } else {
     return SINGLE_REFERENCE;
   }
@@ -175,8 +172,7 @@ static void update_mv_probs(vpx_prob *p, int n, vpx_reader *r) {
 #if CONFIG_MISC_FIXES
     vp10_diff_update_prob(r, &p[i]);
 #else
-    if (vpx_read(r, MV_UPDATE_PROB))
-      p[i] = (vpx_read_literal(r, 7) << 1) | 1;
+    if (vpx_read(r, MV_UPDATE_PROB)) p[i] = (vpx_read_literal(r, 7) << 1) | 1;
 #endif
 }
 
@@ -209,10 +205,9 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) {
   }
 }
 
-static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
-                                          const TX_SIZE tx_size,
-                                          uint8_t *dst, int stride,
-                                          int eob, int block) {
+static void inverse_transform_block_inter(MACROBLOCKD *xd, int plane,
+                                          const TX_SIZE tx_size, uint8_t *dst,
+                                          int stride, int eob, int block) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   TX_TYPE tx_type = get_tx_type(pd->plane_type, xd, block);
   const int seg_id = xd->mi[0]->mbmi.segment_id;
@@ -237,9 +232,7 @@ static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
           vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
                                          tx_type);
           break;
-        default:
-          assert(0 && "Invalid transform size");
-          return;
+        default: assert(0 && "Invalid transform size"); return;
       }
     } else {
 #endif  // CONFIG_VPX_HIGHBITDEPTH
@@ -257,9 +250,7 @@ static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
         case TX_32X32:
           vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
           break;
-        default:
-          assert(0 && "Invalid transform size");
-          return;
+        default: assert(0 && "Invalid transform size"); return;
       }
 #if CONFIG_VPX_HIGHBITDEPTH
     }
@@ -278,11 +269,10 @@ static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
   }
 }
 
-static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
+static void inverse_transform_block_intra(MACROBLOCKD *xd, int plane,
                                           const TX_TYPE tx_type,
-                                          const TX_SIZE tx_size,
-                                          uint8_t *dst, int stride,
-                                          int eob) {
+                                          const TX_SIZE tx_size, uint8_t *dst,
+                                          int stride, int eob) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   const int seg_id = xd->mi[0]->mbmi.segment_id;
   if (eob > 0) {
@@ -306,9 +296,7 @@ static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
           vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
                                          tx_type);
           break;
-        default:
-          assert(0 && "Invalid transform size");
-          return;
+        default: assert(0 && "Invalid transform size"); return;
       }
     } else {
 #endif  // CONFIG_VPX_HIGHBITDEPTH
@@ -326,9 +314,7 @@ static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
         case TX_32X32:
           vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
           break;
-        default:
-          assert(0 && "Invalid transform size");
-          return;
+        default: assert(0 && "Invalid transform size"); return;
       }
 #if CONFIG_VPX_HIGHBITDEPTH
     }
@@ -350,8 +336,7 @@ static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
 static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
                                                 vpx_reader *r,
                                                 MB_MODE_INFO *const mbmi,
-                                                int plane,
-                                                int row, int col,
+                                                int plane, int row, int col,
                                                 TX_SIZE tx_size) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   PREDICTION_MODE mode = (plane == 0) ? mbmi->mode : mbmi->uv_mode;
@@ -361,43 +346,42 @@ static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
   dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
 
   if (mbmi->sb_type < BLOCK_8X8)
-    if (plane == 0)
-      mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
+    if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
 
-  vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode,
-                          dst, pd->dst.stride, dst, pd->dst.stride,
-                          col, row, plane);
+  vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
+                           pd->dst.stride, dst, pd->dst.stride, col, row,
+                           plane);
 
   if (!mbmi->skip) {
     TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx);
     const scan_order *sc = get_scan(tx_size, tx_type);
     const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
                                              r, mbmi->segment_id);
-    inverse_transform_block_intra(xd, plane, tx_type, tx_size,
-                                  dst, pd->dst.stride, eob);
+    inverse_transform_block_intra(xd, plane, tx_type, tx_size, dst,
+                                  pd->dst.stride, eob);
   }
 }
 
 static int reconstruct_inter_block(MACROBLOCKD *const xd, vpx_reader *r,
-                                   MB_MODE_INFO *const mbmi, int plane,
-                                   int row, int col, TX_SIZE tx_size) {
+                                   MB_MODE_INFO *const mbmi, int plane, int row,
+                                   int col, TX_SIZE tx_size) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
   int block_idx = (row << 1) + col;
   TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx);
   const scan_order *sc = get_scan(tx_size, tx_type);
   const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size, r,
-                                          mbmi->segment_id);
+                                           mbmi->segment_id);
 
-  inverse_transform_block_inter(xd, plane, tx_size,
-                            &pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
-                            pd->dst.stride, eob, block_idx);
+  inverse_transform_block_inter(
+      xd, plane, tx_size, &pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
+      pd->dst.stride, eob, block_idx);
   return eob;
 }
 
-static void build_mc_border(const uint8_t *src, int src_stride,
-                            uint8_t *dst, int dst_stride,
-                            int x, int y, int b_w, int b_h, int w, int h) {
+static void build_mc_border(const uint8_t *src, int src_stride, uint8_t *dst,
+                            int dst_stride, int x, int y, int b_w, int b_h,
+                            int w, int h) {
   // Get a pointer to the start of the real data for this row.
   const uint8_t *ref_row = src - x - y * src_stride;
 
@@ -410,39 +394,31 @@ static void build_mc_border(const uint8_t *src, int src_stride,
     int right = 0, copy;
     int left = x < 0 ? -x : 0;
 
-    if (left > b_w)
-      left = b_w;
+    if (left > b_w) left = b_w;
 
-    if (x + b_w > w)
-      right = x + b_w - w;
+    if (x + b_w > w) right = x + b_w - w;
 
-    if (right > b_w)
-      right = b_w;
+    if (right > b_w) right = b_w;
 
     copy = b_w - left - right;
 
-    if (left)
-      memset(dst, ref_row[0], left);
+    if (left) memset(dst, ref_row[0], left);
 
-    if (copy)
-      memcpy(dst + left, ref_row + x + left, copy);
+    if (copy) memcpy(dst + left, ref_row + x + left, copy);
 
-    if (right)
-      memset(dst + left + copy, ref_row[w - 1], right);
+    if (right) memset(dst + left + copy, ref_row[w - 1], right);
 
     dst += dst_stride;
     ++y;
 
-    if (y > 0 && y < h)
-      ref_row += src_stride;
+    if (y > 0 && y < h) ref_row += src_stride;
   } while (--b_h);
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static void high_build_mc_border(const uint8_t *src8, int src_stride,
-                                 uint16_t *dst, int dst_stride,
-                                 int x, int y, int b_w, int b_h,
-                                 int w, int h) {
+                                 uint16_t *dst, int dst_stride, int x, int y,
+                                 int b_w, int b_h, int w, int h) {
   // Get a pointer to the start of the real data for this row.
   const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   const uint16_t *ref_row = src - x - y * src_stride;
@@ -456,31 +432,24 @@ static void high_build_mc_border(const uint8_t *src8, int src_stride,
     int right = 0, copy;
     int left = x < 0 ? -x : 0;
 
-    if (left > b_w)
-      left = b_w;
+    if (left > b_w) left = b_w;
 
-    if (x + b_w > w)
-      right = x + b_w - w;
+    if (x + b_w > w) right = x + b_w - w;
 
-    if (right > b_w)
-      right = b_w;
+    if (right > b_w) right = b_w;
 
     copy = b_w - left - right;
 
-    if (left)
-      vpx_memset16(dst, ref_row[0], left);
+    if (left) vpx_memset16(dst, ref_row[0], left);
 
-    if (copy)
-      memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
+    if (copy) memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
 
-    if (right)
-      vpx_memset16(dst + left + copy, ref_row[w - 1], right);
+    if (right) vpx_memset16(dst + left + copy, ref_row[w - 1], right);
 
     dst += dst_stride;
     ++y;
 
-    if (y > 0 && y < h)
-      ref_row += src_stride;
+    if (y > 0 && y < h) ref_row += src_stride;
   } while (--b_h);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
@@ -489,71 +458,65 @@ static void high_build_mc_border(const uint8_t *src8, int src_stride,
 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
                                int x0, int y0, int b_w, int b_h,
                                int frame_width, int frame_height,
-                               int border_offset,
-                               uint8_t *const dst, int dst_buf_stride,
-                               int subpel_x, int subpel_y,
+                               int border_offset, uint8_t *const dst,
+                               int dst_buf_stride, int subpel_x, int subpel_y,
                                const InterpKernel *kernel,
-                               const struct scale_factors *sf,
-                               MACROBLOCKD *xd,
+                               const struct scale_factors *sf, MACROBLOCKD *xd,
                                int w, int h, int ref, int xs, int ys) {
   DECLARE_ALIGNED(16, uint16_t, mc_buf_high[80 * 2 * 80 * 2]);
   const uint8_t *buf_ptr;
 
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w,
-                         x0, y0, b_w, b_h, frame_width, frame_height);
+    high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w, x0, y0,
+                         b_w, b_h, frame_width, frame_height);
     buf_ptr = CONVERT_TO_BYTEPTR(mc_buf_high) + border_offset;
   } else {
-    build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w,
-                    x0, y0, b_w, b_h, frame_width, frame_height);
+    build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w, x0,
+                    y0, b_w, b_h, frame_width, frame_height);
     buf_ptr = ((uint8_t *)mc_buf_high) + border_offset;
   }
 
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    high_inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
-                         subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
+    high_inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y,
+                         sf, w, h, ref, kernel, xs, ys, xd->bd);
   } else {
-    inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
-                    subpel_y, sf, w, h, ref, kernel, xs, ys);
+    inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf,
+                    w, h, ref, kernel, xs, ys);
   }
 }
 #else
 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
                                int x0, int y0, int b_w, int b_h,
                                int frame_width, int frame_height,
-                               int border_offset,
-                               uint8_t *const dst, int dst_buf_stride,
-                               int subpel_x, int subpel_y,
+                               int border_offset, uint8_t *const dst,
+                               int dst_buf_stride, int subpel_x, int subpel_y,
                                const InterpKernel *kernel,
-                               const struct scale_factors *sf,
-                               int w, int h, int ref, int xs, int ys) {
+                               const struct scale_factors *sf, int w, int h,
+                               int ref, int xs, int ys) {
   DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]);
   const uint8_t *buf_ptr;
 
-  build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w,
-                  x0, y0, b_w, b_h, frame_width, frame_height);
+  build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w, x0, y0, b_w, b_h,
+                  frame_width, frame_height);
   buf_ptr = mc_buf + border_offset;
 
-  inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
-                  subpel_y, sf, w, h, ref, kernel, xs, ys);
+  inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf, w,
+                  h, ref, kernel, xs, ys);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
-                                       int plane, int bw, int bh, int x,
-                                       int y, int w, int h, int mi_x, int mi_y,
-                                       const InterpKernel *kernel,
-                                       const struct scale_factors *sf,
-                                       struct buf_2d *pre_buf,
-                                       struct buf_2d *dst_buf, const MV* mv,
-                                       RefCntBuffer *ref_frame_buf,
-                                       int is_scaled, int ref) {
+static void dec_build_inter_predictors(
+    VP10Decoder *const pbi, MACROBLOCKD *xd, int plane, int bw, int bh, int x,
+    int y, int w, int h, int mi_x, int mi_y, const InterpKernel *kernel,
+    const struct scale_factors *sf, struct buf_2d *pre_buf,
+    struct buf_2d *dst_buf, const MV *mv, RefCntBuffer *ref_frame_buf,
+    int is_scaled, int ref) {
   VP10_COMMON *const cm = &pbi->common;
   struct macroblockd_plane *const pd = &xd->plane[plane];
   uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
   MV32 scaled_mv;
-  int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height,
-      buf_stride, subpel_x, subpel_y;
+  int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride,
+      subpel_x, subpel_y;
   uint8_t *ref_frame, *buf_ptr;
 
   // Get reference frame pointer, width and height.
@@ -564,14 +527,13 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
   } else {
     frame_width = ref_frame_buf->buf.uv_crop_width;
     frame_height = ref_frame_buf->buf.uv_crop_height;
-    ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer
-                         : ref_frame_buf->buf.v_buffer;
+    ref_frame =
+        plane == 1 ? ref_frame_buf->buf.u_buffer : ref_frame_buf->buf.v_buffer;
   }
 
   if (is_scaled) {
-    const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, mv, bw, bh,
-                                               pd->subsampling_x,
-                                               pd->subsampling_y);
+    const MV mv_q4 = clamp_mv_to_umv_border_sb(
+        xd, mv, bw, bh, pd->subsampling_x, pd->subsampling_y);
     // Co-ordinate of containing block to pixel precision.
     int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
     int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
@@ -623,8 +585,8 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
 
   // Do border extension if there is motion or the
   // width/height is not a multiple of 8 pixels.
-  if (is_scaled || scaled_mv.col || scaled_mv.row ||
-      (frame_width & 0x7) || (frame_height & 0x7)) {
+  if (is_scaled || scaled_mv.col || scaled_mv.row || (frame_width & 0x7) ||
+      (frame_height & 0x7)) {
     int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
 
     // Get reference block bottom right horizontal coordinate.
@@ -658,11 +620,9 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
       const int b_h = y1 - y0 + 1;
       const int border_offset = y_pad * 3 * b_w + x_pad * 3;
 
-      extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h,
-                         frame_width, frame_height, border_offset,
-                         dst, dst_buf->stride,
-                         subpel_x, subpel_y,
-                         kernel, sf,
+      extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h, frame_width,
+                         frame_height, border_offset, dst, dst_buf->stride,
+                         subpel_x, subpel_y, kernel, sf,
 #if CONFIG_VPX_HIGHBITDEPTH
                          xd,
 #endif
@@ -672,11 +632,11 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
   } else {
     // Wait until reference block is ready. Pad 7 more pixels as last 7
     // pixels of each superblock row can be changed by next superblock row.
-     if (cm->frame_parallel_decode) {
-       const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS;
-       vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
-                             VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
-     }
+    if (cm->frame_parallel_decode) {
+      const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS;
+      vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
+                            VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
+    }
   }
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
@@ -687,14 +647,14 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
                     subpel_y, sf, w, h, ref, kernel, xs, ys);
   }
 #else
-  inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
-                  subpel_y, sf, w, h, ref, kernel, xs, ys);
+  inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, subpel_y,
+                  sf, w, h, ref, kernel, xs, ys);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 }
 
 static void dec_build_inter_predictors_sb(VP10Decoder *const pbi,
-                                          MACROBLOCKD *xd,
-                                          int mi_row, int mi_col) {
+                                          MACROBLOCKD *xd, int mi_row,
+                                          int mi_col) {
   int plane;
   const int mi_x = mi_col * MI_SIZE;
   const int mi_y = mi_row * MI_SIZE;
@@ -733,28 +693,27 @@ static void dec_build_inter_predictors_sb(VP10Decoder *const pbi,
         for (y = 0; y < num_4x4_h; ++y) {
           for (x = 0; x < num_4x4_w; ++x) {
             const MV mv = average_split_mvs(pd, mi, ref, y * 2 + x);
-            dec_build_inter_predictors(pbi, xd, plane, n4w_x4, n4h_x4,
-                                       4 * x, 4 * y, pw, ph, mi_x, mi_y, kernel,
-                                       sf, pre_buf, dst_buf, &mv,
-                                       ref_frame_buf, is_scaled, ref);
+            dec_build_inter_predictors(pbi, xd, plane, n4w_x4, n4h_x4, 4 * x,
+                                       4 * y, pw, ph, mi_x, mi_y, kernel, sf,
+                                       pre_buf, dst_buf, &mv, ref_frame_buf,
+                                       is_scaled, ref);
           }
         }
       } else {
         const MV mv = mi->mbmi.mv[ref].as_mv;
-        dec_build_inter_predictors(pbi, xd, plane, n4w_x4, n4h_x4,
-                                   0, 0, n4w_x4, n4h_x4, mi_x, mi_y, kernel,
-                                   sf, pre_buf, dst_buf, &mv, ref_frame_buf,
-                                   is_scaled, ref);
+        dec_build_inter_predictors(pbi, xd, plane, n4w_x4, n4h_x4, 0, 0, n4w_x4,
+                                   n4h_x4, mi_x, mi_y, kernel, sf, pre_buf,
+                                   dst_buf, &mv, ref_frame_buf, is_scaled, ref);
       }
     }
   }
 }
 
-static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi,
-                                         int n4_wl, int n4_hl) {
+static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi, int n4_wl,
+                                         int n4_hl) {
   // get minimum log2 num4x4s dimension
   const int x = VPXMIN(n4_wl, n4_hl);
-  return VPXMIN(mbmi->tx_size,  x);
+  return VPXMIN(mbmi->tx_size, x);
 }
 
 static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
@@ -779,8 +738,8 @@ static void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl,
 
 static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
                                  BLOCK_SIZE bsize, int mi_row, int mi_col,
-                                 int bw, int bh, int x_mis, int y_mis,
-                                 int bwl, int bhl) {
+                                 int bw, int bh, int x_mis, int y_mis, int bwl,
+                                 int bhl) {
   const int offset = mi_row * cm->mi_stride + mi_col;
   int x, y;
   const TileInfo *const tile = &xd->tile;
@@ -808,9 +767,8 @@ static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
 }
 
 static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
-                         int mi_row, int mi_col,
-                         vpx_reader *r, BLOCK_SIZE bsize,
-                         int bwl, int bhl) {
+                         int mi_row, int mi_col, vpx_reader *r,
+                         BLOCK_SIZE bsize, int bwl, int bhl) {
   VP10_COMMON *const cm = &pbi->common;
   const int less8x8 = bsize < BLOCK_8X8;
   const int bw = 1 << (bwl - 1);
@@ -818,15 +776,15 @@ static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
   const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
   const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
 
-  MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col,
-                                   bw, bh, x_mis, y_mis, bwl, bhl);
+  MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis,
+                                   y_mis, bwl, bhl);
 
   if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
     const BLOCK_SIZE uv_subsize =
         ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
     if (uv_subsize == BLOCK_INVALID)
-      vpx_internal_error(xd->error_info,
-                         VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
+      vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+                         "Invalid block size.");
   }
 
   vp10_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
@@ -841,20 +799,24 @@ static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
       const struct macroblockd_plane *const pd = &xd->plane[plane];
       const TX_SIZE tx_size =
           plane ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl)
-                  : mbmi->tx_size;
+                : mbmi->tx_size;
       const int num_4x4_w = pd->n4_w;
       const int num_4x4_h = pd->n4_h;
       const int step = (1 << tx_size);
       int row, col;
-      const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ?
-          0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
-      const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ?
-          0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+      const int max_blocks_wide =
+          num_4x4_w + (xd->mb_to_right_edge >= 0
+                           ? 0
+                           : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+      const int max_blocks_high =
+          num_4x4_h + (xd->mb_to_bottom_edge >= 0
+                           ? 0
+                           : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
 
       for (row = 0; row < max_blocks_high; row += step)
         for (col = 0; col < max_blocks_wide; col += step)
-          predict_and_reconstruct_intra_block(xd, r, mbmi, plane,
-                                              row, col, tx_size);
+          predict_and_reconstruct_intra_block(xd, r, mbmi, plane, row, col,
+                                              tx_size);
     }
   } else {
     // Prediction
@@ -869,20 +831,24 @@ static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
         const struct macroblockd_plane *const pd = &xd->plane[plane];
         const TX_SIZE tx_size =
             plane ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl)
-                    : mbmi->tx_size;
+                  : mbmi->tx_size;
         const int num_4x4_w = pd->n4_w;
         const int num_4x4_h = pd->n4_h;
         const int step = (1 << tx_size);
         int row, col;
-        const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ?
-            0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
-        const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ?
-            0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+        const int max_blocks_wide =
+            num_4x4_w + (xd->mb_to_right_edge >= 0
+                             ? 0
+                             : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+        const int max_blocks_high =
+            num_4x4_h +
+            (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >>
+                                                  (5 + pd->subsampling_y));
 
         for (row = 0; row < max_blocks_high; row += step)
           for (col = 0; col < max_blocks_wide; col += step)
-            eobtotal += reconstruct_inter_block(xd, r, mbmi, plane, row, col,
-                                                tx_size);
+            eobtotal +=
+                reconstruct_inter_block(xd, r, mbmi, plane, row, col, tx_size);
       }
 
       if (!less8x8 && eobtotal == 0)
@@ -897,21 +863,19 @@ static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
   xd->corrupted |= vpx_reader_has_error(r);
 }
 
-static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd,
-                                              int mi_row, int mi_col,
-                                              int bsl) {
+static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd, int mi_row,
+                                              int mi_col, int bsl) {
   const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col;
   const PARTITION_CONTEXT *left_ctx = xd->left_seg_context + (mi_row & MI_MASK);
-  int above = (*above_ctx >> bsl) & 1 , left = (*left_ctx >> bsl) & 1;
+  int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
 
-//  assert(bsl >= 0);
+  //  assert(bsl >= 0);
 
   return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
 }
 
-static INLINE void dec_update_partition_context(MACROBLOCKD *xd,
-                                                int mi_row, int mi_col,
-                                                BLOCK_SIZE subsize,
+static INLINE void dec_update_partition_context(MACROBLOCKD *xd, int mi_row,
+                                                int mi_col, BLOCK_SIZE subsize,
                                                 int bw) {
   PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col;
   PARTITION_CONTEXT *const left_ctx = xd->left_seg_context + (mi_row & MI_MASK);
@@ -940,16 +904,15 @@ static PARTITION_TYPE read_partition(VP10_COMMON *cm, MACROBLOCKD *xd,
   else
     p = PARTITION_SPLIT;
 
-  if (counts)
-    ++counts->partition[ctx][p];
+  if (counts) ++counts->partition[ctx][p];
 
   return p;
 }
 
 // TODO(slavarnway): eliminate bsize and subsize in future commits
 static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
-                             int mi_row, int mi_col,
-                             vpx_reader* r, BLOCK_SIZE bsize, int n4x4_l2) {
+                             int mi_row, int mi_col, vpx_reader *r,
+                             BLOCK_SIZE bsize, int n4x4_l2) {
   VP10_COMMON *const cm = &pbi->common;
   const int n8x8_l2 = n4x4_l2 - 1;
   const int num_8x8_wh = 1 << n8x8_l2;
@@ -959,11 +922,10 @@ static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
   const int has_rows = (mi_row + hbs) < cm->mi_rows;
   const int has_cols = (mi_col + hbs) < cm->mi_cols;
 
-  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
-    return;
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
 
-  partition = read_partition(cm, xd, mi_row, mi_col, r, has_rows, has_cols,
-                             n8x8_l2);
+  partition =
+      read_partition(cm, xd, mi_row, mi_col, r, has_rows, has_cols, n8x8_l2);
   subsize = subsize_lookup[partition][bsize];  // get_subsize(bsize, partition);
   if (!hbs) {
     // calculate bmode block dimensions (log 2)
@@ -994,8 +956,7 @@ static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
         decode_partition(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize,
                          n8x8_l2);
         break;
-      default:
-        assert(0 && "Invalid partition type");
+      default: assert(0 && "Invalid partition type");
     }
   }
 
@@ -1005,12 +966,10 @@ static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
     dec_update_partition_context(xd, mi_row, mi_col, subsize, num_8x8_wh);
 }
 
-static void setup_token_decoder(const uint8_t *data,
-                                const uint8_t *data_end,
+static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
                                 size_t read_size,
                                 struct vpx_internal_error_info *error_info,
-                                vpx_reader *r,
-                                vpx_decrypt_cb decrypt_cb,
+                                vpx_reader *r, vpx_decrypt_cb decrypt_cb,
                                 void *decrypt_state) {
   // Validate the calculated partition length. If the buffer
   // described by the partition can't be fully read, then restrict
@@ -1037,12 +996,11 @@ static void read_coef_probs_common(vp10_coeff_probs_model *coef_probs,
               vp10_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
 }
 
-static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
-                            vpx_reader *r) {
-    const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
-    TX_SIZE tx_size;
-    for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
-      read_coef_probs_common(fc->coef_probs[tx_size], r);
+static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, vpx_reader *r) {
+  const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
+  TX_SIZE tx_size;
+  for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
+    read_coef_probs_common(fc->coef_probs[tx_size], r);
 }
 
 static void setup_segmentation(VP10_COMMON *const cm,
@@ -1057,8 +1015,7 @@ static void setup_segmentation(VP10_COMMON *const cm,
   seg->update_data = 0;
 
   seg->enabled = vpx_rb_read_bit(rb);
-  if (!seg->enabled)
-    return;
+  if (!seg->enabled) return;
 
   // Segmentation map update
   if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
@@ -1069,8 +1026,8 @@ static void setup_segmentation(VP10_COMMON *const cm,
   if (seg->update_map) {
 #if !CONFIG_MISC_FIXES
     for (i = 0; i < SEG_TREE_PROBS; i++)
-      segp->tree_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8)
-                                                : MAX_PROB;
+      segp->tree_probs[i] =
+          vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB;
 #endif
     if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
       seg->temporal_update = 0;
@@ -1080,11 +1037,10 @@ static void setup_segmentation(VP10_COMMON *const cm,
 #if !CONFIG_MISC_FIXES
     if (seg->temporal_update) {
       for (i = 0; i < PREDICTION_PROBS; i++)
-        segp->pred_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8)
-                                                  : MAX_PROB;
+        segp->pred_probs[i] =
+            vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB;
     } else {
-      for (i = 0; i < PREDICTION_PROBS; i++)
-        segp->pred_probs[i] = MAX_PROB;
+      for (i = 0; i < PREDICTION_PROBS; i++) segp->pred_probs[i] = MAX_PROB;
     }
 #endif
   }
@@ -1139,8 +1095,9 @@ static void setup_loopfilter(struct loopfilter *lf,
 }
 
 static INLINE int read_delta_q(struct vpx_read_bit_buffer *rb) {
-  return vpx_rb_read_bit(rb) ?
-      vpx_rb_read_inv_signed_literal(rb, CONFIG_MISC_FIXES ? 6 : 4) : 0;
+  return vpx_rb_read_bit(rb)
+             ? vpx_rb_read_inv_signed_literal(rb, CONFIG_MISC_FIXES ? 6 : 4)
+             : 0;
 }
 
 static void setup_quantization(VP10_COMMON *const cm,
@@ -1158,24 +1115,25 @@ static void setup_segmentation_dequant(VP10_COMMON *const cm) {
     int i;
     for (i = 0; i < MAX_SEGMENTS; ++i) {
       const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex);
-      cm->y_dequant[i][0] = vp10_dc_quant(qindex, cm->y_dc_delta_q,
-                                         cm->bit_depth);
+      cm->y_dequant[i][0] =
+          vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
       cm->y_dequant[i][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
-      cm->uv_dequant[i][0] = vp10_dc_quant(qindex, cm->uv_dc_delta_q,
-                                          cm->bit_depth);
-      cm->uv_dequant[i][1] = vp10_ac_quant(qindex, cm->uv_ac_delta_q,
-                                          cm->bit_depth);
+      cm->uv_dequant[i][0] =
+          vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+      cm->uv_dequant[i][1] =
+          vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
     }
   } else {
     const int qindex = cm->base_qindex;
     // When segmentation is disabled, only the first value is used.  The
     // remaining are don't cares.
-    cm->y_dequant[0][0] = vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+    cm->y_dequant[0][0] =
+        vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
     cm->y_dequant[0][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
-    cm->uv_dequant[0][0] = vp10_dc_quant(qindex, cm->uv_dc_delta_q,
-                                        cm->bit_depth);
-    cm->uv_dequant[0][1] = vp10_ac_quant(qindex, cm->uv_ac_delta_q,
-                                        cm->bit_depth);
+    cm->uv_dequant[0][0] =
+        vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+    cm->uv_dequant[0][1] =
+        vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
   }
 }
 
@@ -1183,8 +1141,7 @@ static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) {
   return vpx_rb_read_bit(rb) ? SWITCHABLE : vpx_rb_read_literal(rb, 2);
 }
 
-static void setup_render_size(VP10_COMMON *cm,
-                              struct vpx_read_bit_buffer *rb) {
+static void setup_render_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
   cm->render_width = cm->width;
   cm->render_height = cm->height;
   if (vpx_rb_read_bit(rb))
@@ -1210,7 +1167,7 @@ static void resize_context_buffers(VP10_COMMON *cm, int width, int height) {
     const int new_mi_rows =
         ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
     const int new_mi_cols =
-        ALIGN_POWER_OF_TWO(width,  MI_SIZE_LOG2) >> MI_SIZE_LOG2;
+        ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
 
     // Allocations in vp10_alloc_context_buffers() depend on individual
     // dimensions as well as the overall size.
@@ -1240,13 +1197,12 @@ static void setup_frame_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
 
   lock_buffer_pool(pool);
   if (vpx_realloc_frame_buffer(
-          get_frame_new_buffer(cm), cm->width, cm->height,
-          cm->subsampling_x, cm->subsampling_y,
+          get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
+          cm->subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
           cm->use_highbitdepth,
 #endif
-          VPX_DEC_BORDER_IN_PIXELS,
-          cm->byte_alignment,
+          VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
           &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
           pool->cb_priv)) {
     unlock_buffer_pool(pool);
@@ -1260,7 +1216,7 @@ static void setup_frame_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
   pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
   pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
   pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
-  pool->frame_bufs[cm->new_fb_idx].buf.render_width  = cm->render_width;
+  pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
   pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
 }
 
@@ -1307,22 +1263,19 @@ static void setup_frame_size_with_refs(VP10_COMMON *cm,
   // has valid dimensions.
   for (i = 0; i < REFS_PER_FRAME; ++i) {
     RefBuffer *const ref_frame = &cm->frame_refs[i];
-    has_valid_ref_frame |= valid_ref_frame_size(ref_frame->buf->y_crop_width,
-                                                ref_frame->buf->y_crop_height,
-                                                width, height);
+    has_valid_ref_frame |=
+        valid_ref_frame_size(ref_frame->buf->y_crop_width,
+                             ref_frame->buf->y_crop_height, width, height);
   }
   if (!has_valid_ref_frame)
     vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
                        "Referenced frame has invalid size");
   for (i = 0; i < REFS_PER_FRAME; ++i) {
     RefBuffer *const ref_frame = &cm->frame_refs[i];
-    if (!valid_ref_frame_img_fmt(
-            ref_frame->buf->bit_depth,
-            ref_frame->buf->subsampling_x,
-            ref_frame->buf->subsampling_y,
-            cm->bit_depth,
-            cm->subsampling_x,
-            cm->subsampling_y))
+    if (!valid_ref_frame_img_fmt(ref_frame->buf->bit_depth,
+                                 ref_frame->buf->subsampling_x,
+                                 ref_frame->buf->subsampling_y, cm->bit_depth,
+                                 cm->subsampling_x, cm->subsampling_y))
       vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
                          "Referenced frame has incompatible color format");
   }
@@ -1334,13 +1287,12 @@ static void setup_frame_size_with_refs(VP10_COMMON *cm,
 
   lock_buffer_pool(pool);
   if (vpx_realloc_frame_buffer(
-          get_frame_new_buffer(cm), cm->width, cm->height,
-          cm->subsampling_x, cm->subsampling_y,
+          get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
+          cm->subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
           cm->use_highbitdepth,
 #endif
-          VPX_DEC_BORDER_IN_PIXELS,
-          cm->byte_alignment,
+          VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
           &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
           pool->cb_priv)) {
     unlock_buffer_pool(pool);
@@ -1354,7 +1306,7 @@ static void setup_frame_size_with_refs(VP10_COMMON *cm,
   pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
   pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
   pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
-  pool->frame_bufs[cm->new_fb_idx].buf.render_width  = cm->render_width;
+  pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
   pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
 }
 
@@ -1365,8 +1317,7 @@ static void setup_tile_info(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
   // columns
   max_ones = max_log2_tile_cols - min_log2_tile_cols;
   cm->log2_tile_cols = min_log2_tile_cols;
-  while (max_ones-- && vpx_rb_read_bit(rb))
-    cm->log2_tile_cols++;
+  while (max_ones-- && vpx_rb_read_bit(rb)) cm->log2_tile_cols++;
 
   if (cm->log2_tile_cols > 6)
     vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
@@ -1374,8 +1325,7 @@ static void setup_tile_info(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
 
   // rows
   cm->log2_tile_rows = vpx_rb_read_bit(rb);
-  if (cm->log2_tile_rows)
-    cm->log2_tile_rows += vpx_rb_read_bit(rb);
+  if (cm->log2_tile_rows) cm->log2_tile_rows += vpx_rb_read_bit(rb);
 
 #if CONFIG_MISC_FIXES
   // tile size magnitude
@@ -1395,14 +1345,10 @@ typedef struct TileBuffer {
 
 static int mem_get_varsize(const uint8_t *data, const int mag) {
   switch (mag) {
-    case 0:
-      return data[0];
-    case 1:
-      return mem_get_le16(data);
-    case 2:
-      return mem_get_le24(data);
-    case 3:
-      return mem_get_le32(data);
+    case 0: return data[0];
+    case 1: return mem_get_le16(data);
+    case 2: return mem_get_le24(data);
+    case 3: return mem_get_le32(data);
   }
 
   assert("Invalid tile size marker value" && 0);
@@ -1415,9 +1361,8 @@ static int mem_get_varsize(const uint8_t *data, const int mag) {
 static void get_tile_buffer(const uint8_t *const data_end,
                             const int tile_sz_mag, int is_last,
                             struct vpx_internal_error_info *error_info,
-                            const uint8_t **data,
-                            vpx_decrypt_cb decrypt_cb, void *decrypt_state,
-                            TileBuffer *buf) {
+                            const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+                            void *decrypt_state, TileBuffer *buf) {
   size_t size;
 
   if (!is_last) {
@@ -1447,9 +1392,9 @@ static void get_tile_buffer(const uint8_t *const data_end,
   *data += size;
 }
 
-static void get_tile_buffers(VP10Decoder *pbi,
-                             const uint8_t *data, const uint8_t *data_end,
-                             int tile_cols, int tile_rows,
+static void get_tile_buffers(VP10Decoder *pbi, const uint8_t *data,
+                             const uint8_t *data_end, int tile_cols,
+                             int tile_rows,
                              TileBuffer (*tile_buffers)[1 << 6]) {
   int r, c;
 
@@ -1458,15 +1403,14 @@ static void get_tile_buffers(VP10Decoder *pbi,
       const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
       TileBuffer *const buf = &tile_buffers[r][c];
       buf->col = c;
-      get_tile_buffer(data_end, pbi->common.tile_sz_mag,
-                      is_last, &pbi->common.error, &data,
-                      pbi->decrypt_cb, pbi->decrypt_state, buf);
+      get_tile_buffer(data_end, pbi->common.tile_sz_mag, is_last,
+                      &pbi->common.error, &data, pbi->decrypt_cb,
+                      pbi->decrypt_state, buf);
     }
   }
 }
 
-static const uint8_t *decode_tiles(VP10Decoder *pbi,
-                                   const uint8_t *data,
+static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
                                    const uint8_t *data_end) {
   VP10_COMMON *const cm = &pbi->common;
   const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
@@ -1490,11 +1434,11 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi,
   }
 
   if (cm->lf.filter_level && !cm->skip_loop_filter) {
-    LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+    LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
     // Be sure to sync as we might be resuming after a failed frame decode.
     winterface->sync(&pbi->lf_worker);
     vp10_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
-                               pbi->mb.plane);
+                                pbi->mb.plane);
   }
 
   assert(tile_rows <= 4);
@@ -1510,12 +1454,10 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi,
 
   get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
 
-  if (pbi->tile_data == NULL ||
-      (tile_cols * tile_rows) != pbi->total_tiles) {
+  if (pbi->tile_data == NULL || (tile_cols * tile_rows) != pbi->total_tiles) {
     vpx_free(pbi->tile_data);
     CHECK_MEM_ERROR(
-        cm,
-        pbi->tile_data,
+        cm, pbi->tile_data,
         vpx_memalign(32, tile_cols * tile_rows * (sizeof(*pbi->tile_data))));
     pbi->total_tiles = tile_rows * tile_cols;
   }
@@ -1529,8 +1471,9 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi,
       tile_data->xd = pbi->mb;
       tile_data->xd.corrupted = 0;
       tile_data->xd.counts =
-          cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD ?
-              &cm->counts : NULL;
+          cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
+              ? &cm->counts
+              : NULL;
       vp10_zero(tile_data->dqcoeff);
       vp10_tile_init(&tile_data->xd.tile, tile_data->cm, tile_row, tile_col);
       setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
@@ -1548,26 +1491,26 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi,
     for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
          mi_row += MI_BLOCK_SIZE) {
       for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
-        const int col = pbi->inv_tile_order ?
-                        tile_cols - tile_col - 1 : tile_col;
+        const int col =
+            pbi->inv_tile_order ? tile_cols - tile_col - 1 : tile_col;
         tile_data = pbi->tile_data + tile_cols * tile_row + col;
         vp10_tile_set_col(&tile, tile_data->cm, col);
         vp10_zero(tile_data->xd.left_context);
         vp10_zero(tile_data->xd.left_seg_context);
         for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
              mi_col += MI_BLOCK_SIZE) {
-          decode_partition(pbi, &tile_data->xd, mi_row,
-                           mi_col, &tile_data->bit_reader, BLOCK_64X64, 4);
+          decode_partition(pbi, &tile_data->xd, mi_row, mi_col,
+                           &tile_data->bit_reader, BLOCK_64X64, 4);
         }
         pbi->mb.corrupted |= tile_data->xd.corrupted;
         if (pbi->mb.corrupted)
-            vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
-                               "Failed to decode tile data");
+          vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+                             "Failed to decode tile data");
       }
       // Loopfilter one row.
       if (cm->lf.filter_level && !cm->skip_loop_filter) {
         const int lf_start = mi_row - MI_BLOCK_SIZE;
-        LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+        LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
 
         // delay the loopfilter by 1 macroblock row.
         if (lf_start < 0) continue;
@@ -1588,14 +1531,13 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi,
       // still be changed by the longest loopfilter of the next superblock
       // row.
       if (cm->frame_parallel_decode)
-        vp10_frameworker_broadcast(pbi->cur_buf,
-                                  mi_row << MI_BLOCK_SIZE_LOG2);
+        vp10_frameworker_broadcast(pbi->cur_buf, mi_row << MI_BLOCK_SIZE_LOG2);
     }
   }
 
   // Loopfilter remaining rows in the frame.
   if (cm->lf.filter_level && !cm->skip_loop_filter) {
-    LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+    LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
     winterface->sync(&pbi->lf_worker);
     lf_data->start = lf_data->stop;
     lf_data->stop = cm->mi_rows;
@@ -1629,9 +1571,8 @@ static int tile_worker_hook(TileWorkerData *const tile_data,
     vp10_zero(tile_data->xd.left_seg_context);
     for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
          mi_col += MI_BLOCK_SIZE) {
-      decode_partition(tile_data->pbi, &tile_data->xd,
-                       mi_row, mi_col, &tile_data->bit_reader,
-                       BLOCK_64X64, 4);
+      decode_partition(tile_data->pbi, &tile_data->xd, mi_row, mi_col,
+                       &tile_data->bit_reader, BLOCK_64X64, 4);
     }
   }
   return !tile_data->xd.corrupted;
@@ -1639,13 +1580,12 @@ static int tile_worker_hook(TileWorkerData *const tile_data,
 
 // sorts in descending order
 static int compare_tile_buffers(const void *a, const void *b) {
-  const TileBuffer *const buf1 = (const TileBuffer*)a;
-  const TileBuffer *const buf2 = (const TileBuffer*)b;
+  const TileBuffer *const buf1 = (const TileBuffer *)a;
+  const TileBuffer *const buf2 = (const TileBuffer *)b;
   return (int)(buf2->size - buf1->size);
 }
 
-static const uint8_t *decode_tiles_mt(VP10Decoder *pbi,
-                                      const uint8_t *data,
+static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
                                       const uint8_t *data_end) {
   VP10_COMMON *const cm = &pbi->common;
   const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
@@ -1672,9 +1612,9 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi,
     // Ensure tile data offsets will be properly aligned. This may fail on
     // platforms without DECLARE_ALIGNED().
     assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
-    CHECK_MEM_ERROR(cm, pbi->tile_worker_data,
-                    vpx_memalign(32, num_threads *
-                                 sizeof(*pbi->tile_worker_data)));
+    CHECK_MEM_ERROR(
+        cm, pbi->tile_worker_data,
+        vpx_memalign(32, num_threads * sizeof(*pbi->tile_worker_data)));
     CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
                     vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
     for (i = 0; i < num_threads; ++i) {
@@ -1734,7 +1674,7 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi,
 
     for (i = 0; i < num_workers; ++i) {
       TileWorkerData *const tile_data =
-          (TileWorkerData*)pbi->tile_workers[i].data1;
+          (TileWorkerData *)pbi->tile_workers[i].data1;
       vp10_zero(tile_data->counts);
     }
   }
@@ -1744,16 +1684,17 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi,
     int i;
     for (i = 0; i < num_workers && n < tile_cols; ++i) {
       VPxWorker *const worker = &pbi->tile_workers[i];
-      TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
-      TileInfo *const tile = (TileInfo*)worker->data2;
+      TileWorkerData *const tile_data = (TileWorkerData *)worker->data1;
+      TileInfo *const tile = (TileInfo *)worker->data2;
       TileBuffer *const buf = &tile_buffers[0][n];
 
       tile_data->pbi = pbi;
       tile_data->xd = pbi->mb;
       tile_data->xd.corrupted = 0;
       tile_data->xd.counts =
-          cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD ?
-              &tile_data->counts : NULL;
+          cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
+              ? &tile_data->counts
+              : NULL;
       vp10_zero(tile_data->dqcoeff);
       vp10_tile_init(tile, cm, 0, buf->col);
       vp10_tile_init(&tile_data->xd.tile, cm, 0, buf->col);
@@ -1788,7 +1729,7 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi,
     }
     if (final_worker > -1) {
       TileWorkerData *const tile_data =
-          (TileWorkerData*)pbi->tile_workers[final_worker].data1;
+          (TileWorkerData *)pbi->tile_workers[final_worker].data1;
       bit_reader_end = vpx_reader_find_end(&tile_data->bit_reader);
       final_worker = -1;
     }
@@ -1798,7 +1739,7 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi,
         cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
       for (i = 0; i < num_workers; ++i) {
         TileWorkerData *const tile_data =
-            (TileWorkerData*)pbi->tile_workers[i].data1;
+            (TileWorkerData *)pbi->tile_workers[i].data1;
         vp10_accumulate_frame_counts(cm, &tile_data->counts, 1);
       }
     }
@@ -1812,8 +1753,8 @@ static void error_handler(void *data) {
   vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
 }
 
-static void read_bitdepth_colorspace_sampling(
-    VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void read_bitdepth_colorspace_sampling(VP10_COMMON *cm,
+                                              struct vpx_read_bit_buffer *rb) {
   if (cm->profile >= PROFILE_2) {
     cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -1869,8 +1810,8 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
   cm->last_intra_only = cm->intra_only;
 
   if (vpx_rb_read_literal(rb, 2) != VPX_FRAME_MARKER)
-      vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
-                         "Invalid frame marker");
+    vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+                       "Invalid frame marker");
 
   cm->profile = vp10_read_profile(rb);
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -1908,7 +1849,7 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
     return 0;
   }
 
-  cm->frame_type = (FRAME_TYPE) vpx_rb_read_bit(rb);
+  cm->frame_type = (FRAME_TYPE)vpx_rb_read_bit(rb);
   cm->show_frame = vpx_rb_read_bit(rb);
   cm->error_resilient_mode = vpx_rb_read_bit(rb);
 
@@ -1934,21 +1875,21 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
     cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb);
 
     if (cm->error_resilient_mode) {
-        cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
+      cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
     } else {
 #if CONFIG_MISC_FIXES
       if (cm->intra_only) {
-          cm->reset_frame_context =
-              vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_ALL
-                                  : RESET_FRAME_CONTEXT_CURRENT;
-      } else {
-          cm->reset_frame_context =
-              vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_CURRENT
-                                  : RESET_FRAME_CONTEXT_NONE;
-          if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT)
-            cm->reset_frame_context =
-                  vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_ALL
+        cm->reset_frame_context = vpx_rb_read_bit(rb)
+                                      ? RESET_FRAME_CONTEXT_ALL
                                       : RESET_FRAME_CONTEXT_CURRENT;
+      } else {
+        cm->reset_frame_context = vpx_rb_read_bit(rb)
+                                      ? RESET_FRAME_CONTEXT_CURRENT
+                                      : RESET_FRAME_CONTEXT_NONE;
+        if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT)
+          cm->reset_frame_context = vpx_rb_read_bit(rb)
+                                        ? RESET_FRAME_CONTEXT_ALL
+                                        : RESET_FRAME_CONTEXT_CURRENT;
       }
 #else
       static const RESET_FRAME_CONTEXT_MODE reset_frame_context_conv_tbl[4] = {
@@ -1991,7 +1932,7 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
         memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
         pbi->need_resync = 0;
       }
-    } else if (pbi->need_resync != 1) {  /* Skip if need resync */
+    } else if (pbi->need_resync != 1) { /* Skip if need resync */
       pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
       for (i = 0; i < REFS_PER_FRAME; ++i) {
         const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2);
@@ -2010,16 +1951,14 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
       for (i = 0; i < REFS_PER_FRAME; ++i) {
         RefBuffer *const ref_buf = &cm->frame_refs[i];
 #if CONFIG_VPX_HIGHBITDEPTH
-        vp10_setup_scale_factors_for_frame(&ref_buf->sf,
-                                          ref_buf->buf->y_crop_width,
-                                          ref_buf->buf->y_crop_height,
-                                          cm->width, cm->height,
-                                          cm->use_highbitdepth);
+        vp10_setup_scale_factors_for_frame(
+            &ref_buf->sf, ref_buf->buf->y_crop_width,
+            ref_buf->buf->y_crop_height, cm->width, cm->height,
+            cm->use_highbitdepth);
 #else
-        vp10_setup_scale_factors_for_frame(&ref_buf->sf,
-                                          ref_buf->buf->y_crop_width,
-                                          ref_buf->buf->y_crop_height,
-                                          cm->width, cm->height);
+        vp10_setup_scale_factors_for_frame(
+            &ref_buf->sf, ref_buf->buf->y_crop_width,
+            ref_buf->buf->y_crop_height, cm->width, cm->height);
 #endif
       }
     }
@@ -2029,7 +1968,7 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
 #endif
   get_frame_new_buffer(cm)->color_space = cm->color_space;
   get_frame_new_buffer(cm)->color_range = cm->color_range;
-  get_frame_new_buffer(cm)->render_width  = cm->render_width;
+  get_frame_new_buffer(cm)->render_width = cm->render_width;
   get_frame_new_buffer(cm)->render_height = cm->render_height;
 
   if (pbi->need_resync) {
@@ -2039,13 +1978,13 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
   }
 
   if (!cm->error_resilient_mode) {
-    cm->refresh_frame_context =
-        vpx_rb_read_bit(rb) ? REFRESH_FRAME_CONTEXT_FORWARD
-                            : REFRESH_FRAME_CONTEXT_OFF;
+    cm->refresh_frame_context = vpx_rb_read_bit(rb)
+                                    ? REFRESH_FRAME_CONTEXT_FORWARD
+                                    : REFRESH_FRAME_CONTEXT_OFF;
     if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
-        cm->refresh_frame_context =
-            vpx_rb_read_bit(rb) ? REFRESH_FRAME_CONTEXT_FORWARD
-                                : REFRESH_FRAME_CONTEXT_BACKWARD;
+      cm->refresh_frame_context = vpx_rb_read_bit(rb)
+                                      ? REFRESH_FRAME_CONTEXT_FORWARD
+                                      : REFRESH_FRAME_CONTEXT_BACKWARD;
 #if !CONFIG_MISC_FIXES
     } else {
       vpx_rb_read_bit(rb);  // parallel decoding mode flag
@@ -2097,20 +2036,18 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
   {
     int i;
     for (i = 0; i < MAX_SEGMENTS; ++i) {
-      const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled ?
-          vp10_get_qindex(&cm->seg, i, cm->base_qindex) :
-          cm->base_qindex;
-      xd->lossless[i] = qindex == 0 &&
-          cm->y_dc_delta_q == 0 &&
-          cm->uv_dc_delta_q == 0 &&
-          cm->uv_ac_delta_q == 0;
+      const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled
+                             ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+                             : cm->base_qindex;
+      xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
+                        cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
     }
   }
 
   setup_segmentation_dequant(cm);
 #if CONFIG_MISC_FIXES
-  cm->tx_mode = (!cm->seg.enabled && xd->lossless[0]) ? ONLY_4X4
-                                                      : read_tx_mode(rb);
+  cm->tx_mode =
+      (!cm->seg.enabled && xd->lossless[0]) ? ONLY_4X4 : read_tx_mode(rb);
   cm->reference_mode = read_frame_reference_mode(cm, rb);
 #endif
 
@@ -2159,8 +2096,7 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
 #if !CONFIG_MISC_FIXES
   cm->tx_mode = xd->lossless[0] ? ONLY_4X4 : read_tx_mode(&r);
 #endif
-  if (cm->tx_mode == TX_MODE_SELECT)
-    read_tx_mode_probs(&fc->tx_probs, &r);
+  if (cm->tx_mode == TX_MODE_SELECT) read_tx_mode_probs(&fc->tx_probs, &r);
   read_coef_probs(fc, cm->tx_mode, &r);
 
   for (k = 0; k < SKIP_CONTEXTS; ++k)
@@ -2198,8 +2134,7 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
 
     read_inter_mode_probs(fc, &r);
 
-    if (cm->interp_filter == SWITCHABLE)
-      read_switchable_interp_probs(fc, &r);
+    if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
 
     for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
       vp10_diff_update_prob(&r, &fc->intra_inter_prob[i]);
@@ -2230,7 +2165,7 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
 
 #ifdef NDEBUG
 #define debug_check_frame_counts(cm) (void)0
-#else  // !NDEBUG
+#else   // !NDEBUG
 // Counts should only be incremented when frame_parallel_decoding_mode and
 // error_resilient_mode are disabled.
 static void debug_check_frame_counts(const VP10_COMMON *const cm) {
@@ -2244,8 +2179,7 @@ static void debug_check_frame_counts(const VP10_COMMON *const cm) {
                  sizeof(cm->counts.uv_mode)));
   assert(!memcmp(cm->counts.partition, zero_counts.partition,
                  sizeof(cm->counts.partition)));
-  assert(!memcmp(cm->counts.coef, zero_counts.coef,
-                 sizeof(cm->counts.coef)));
+  assert(!memcmp(cm->counts.coef, zero_counts.coef, sizeof(cm->counts.coef)));
   assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
                  sizeof(cm->counts.eob_branch)));
   assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
@@ -2271,11 +2205,8 @@ static void debug_check_frame_counts(const VP10_COMMON *const cm) {
 #endif  // NDEBUG
 
 static struct vpx_read_bit_buffer *init_read_bit_buffer(
-    VP10Decoder *pbi,
-    struct vpx_read_bit_buffer *rb,
-    const uint8_t *data,
-    const uint8_t *data_end,
-    uint8_t clear_data[MAX_VP10_HEADER_SIZE]) {
+    VP10Decoder *pbi, struct vpx_read_bit_buffer *rb, const uint8_t *data,
+    const uint8_t *data_end, uint8_t clear_data[MAX_VP10_HEADER_SIZE]) {
   rb->bit_offset = 0;
   rb->error_handler = error_handler;
   rb->error_handler_data = &pbi->common;
@@ -2299,8 +2230,8 @@ int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb) {
          vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_2;
 }
 
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb,
-                         int *width, int *height) {
+void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
+                          int *height) {
   *width = vpx_rb_read_literal(rb, 16) + 1;
   *height = vpx_rb_read_literal(rb, 16) + 1;
 }
@@ -2308,21 +2239,19 @@ void vp10_read_frame_size(struct vpx_read_bit_buffer *rb,
 BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb) {
   int profile = vpx_rb_read_bit(rb);
   profile |= vpx_rb_read_bit(rb) << 1;
-  if (profile > 2)
-    profile += vpx_rb_read_bit(rb);
-  return (BITSTREAM_PROFILE) profile;
+  if (profile > 2) profile += vpx_rb_read_bit(rb);
+  return (BITSTREAM_PROFILE)profile;
 }
 
-void vp10_decode_frame(VP10Decoder *pbi,
-                      const uint8_t *data, const uint8_t *data_end,
-                      const uint8_t **p_data_end) {
+void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
+                       const uint8_t *data_end, const uint8_t **p_data_end) {
   VP10_COMMON *const cm = &pbi->common;
   MACROBLOCKD *const xd = &pbi->mb;
   struct vpx_read_bit_buffer rb;
   int context_updated = 0;
   uint8_t clear_data[MAX_VP10_HEADER_SIZE];
-  const size_t first_partition_size = read_uncompressed_header(pbi,
-      init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
+  const size_t first_partition_size = read_uncompressed_header(
+      pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
   const int tile_rows = 1 << cm->log2_tile_rows;
   const int tile_cols = 1 << cm->log2_tile_cols;
   YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
@@ -2339,12 +2268,10 @@ void vp10_decode_frame(VP10Decoder *pbi,
     vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
                        "Truncated packet or corrupt header length");
 
-  cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
-                           cm->width == cm->last_width &&
-                           cm->height == cm->last_height &&
-                           !cm->last_intra_only &&
-                           cm->last_show_frame &&
-                           (cm->last_frame_type != KEY_FRAME);
+  cm->use_prev_frame_mvs =
+      !cm->error_resilient_mode && cm->width == cm->last_width &&
+      cm->height == cm->last_height && !cm->last_intra_only &&
+      cm->last_show_frame && (cm->last_frame_type != KEY_FRAME);
 
   vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
 
@@ -2392,13 +2319,12 @@ void vp10_decode_frame(VP10Decoder *pbi,
         // If multiple threads are used to decode tiles, then we use those
         // threads to do parallel loopfiltering.
         vp10_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
-                                 cm->lf.filter_level, 0, 0, pbi->tile_workers,
-                                 pbi->num_tile_workers, &pbi->lf_row_sync);
+                                  cm->lf.filter_level, 0, 0, pbi->tile_workers,
+                                  pbi->num_tile_workers, &pbi->lf_row_sync);
       }
     } else {
       vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
                          "Decode failed. Frame data is corrupted.");
-
     }
   } else {
     *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
diff --git a/vp10/decoder/decodeframe.h b/vp10/decoder/decodeframe.h
index 770ae154e597734dda4e0fc988615e72f41909f1..7fdff0b29857021be6741c580a75b6013e4744bb 100644
--- a/vp10/decoder/decodeframe.h
+++ b/vp10/decoder/decodeframe.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_DECODER_DECODEFRAME_H_
 #define VP10_DECODER_DECODEFRAME_H_
 
@@ -20,13 +19,12 @@ struct VP10Decoder;
 struct vpx_read_bit_buffer;
 
 int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb);
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb,
-                         int *width, int *height);
+void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
+                          int *height);
 BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb);
 
-void vp10_decode_frame(struct VP10Decoder *pbi,
-                      const uint8_t *data, const uint8_t *data_end,
-                      const uint8_t **p_data_end);
+void vp10_decode_frame(struct VP10Decoder *pbi, const uint8_t *data,
+                       const uint8_t *data_end, const uint8_t **p_data_end);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/decoder/decodemv.c b/vp10/decoder/decodemv.c
index a28ae559277e02961126c8226bc8cf96237510b4..4a34e5287e6b9d44b7b81f97ef28615389040e39 100644
--- a/vp10/decoder/decodemv.c
+++ b/vp10/decoder/decodemv.c
@@ -27,7 +27,7 @@
 static INLINE int read_uniform(vpx_reader *r, int n) {
   int l = get_unsigned_bits(n);
   int m = (1 << l) - n;
-  int v = vpx_read_literal(r, l-1);
+  int v = vpx_read_literal(r, l - 1);
 
   assert(l != 0);
 
@@ -46,35 +46,32 @@ static PREDICTION_MODE read_intra_mode_y(VP10_COMMON *cm, MACROBLOCKD *xd,
   const PREDICTION_MODE y_mode =
       read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
   FRAME_COUNTS *counts = xd->counts;
-  if (counts)
-    ++counts->y_mode[size_group][y_mode];
+  if (counts) ++counts->y_mode[size_group][y_mode];
   return y_mode;
 }
 
 static PREDICTION_MODE read_intra_mode_uv(VP10_COMMON *cm, MACROBLOCKD *xd,
                                           vpx_reader *r,
                                           PREDICTION_MODE y_mode) {
-  const PREDICTION_MODE uv_mode = read_intra_mode(r,
-                                         cm->fc->uv_mode_prob[y_mode]);
+  const PREDICTION_MODE uv_mode =
+      read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]);
   FRAME_COUNTS *counts = xd->counts;
-  if (counts)
-    ++counts->uv_mode[y_mode][uv_mode];
+  if (counts) ++counts->uv_mode[y_mode][uv_mode];
   return uv_mode;
 }
 
 static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
                                        vpx_reader *r, int ctx) {
-  const int mode = vpx_read_tree(r, vp10_inter_mode_tree,
-                                 cm->fc->inter_mode_probs[ctx]);
+  const int mode =
+      vpx_read_tree(r, vp10_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
   FRAME_COUNTS *counts = xd->counts;
-  if (counts)
-    ++counts->inter_mode[ctx][mode];
+  if (counts) ++counts->inter_mode[ctx][mode];
 
   return NEARESTMV + mode;
 }
 
 static int read_segment_id(vpx_reader *r,
-    const struct segmentation_probs *segp) {
+                           const struct segmentation_probs *segp) {
   return vpx_read_tree(r, vp10_segment_tree, segp->tree_probs);
 }
 
@@ -90,18 +87,16 @@ static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
       tx_size += vpx_read(r, tx_probs[2]);
   }
 
-  if (counts)
-    ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size];
+  if (counts) ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size];
   return (TX_SIZE)tx_size;
 }
 
-static TX_SIZE read_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
-                            int allow_select, vpx_reader *r) {
+static TX_SIZE read_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd, int allow_select,
+                            vpx_reader *r) {
   TX_MODE tx_mode = cm->tx_mode;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
-  if (xd->lossless[xd->mi[0]->mbmi.segment_id])
-    return TX_4X4;
+  if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
   if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8)
     return read_selected_tx_size(cm, xd, max_tx_size, r);
   else
@@ -121,8 +116,8 @@ static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids,
   return segment_id;
 }
 
-static void set_segment_id(VP10_COMMON *cm, int mi_offset,
-                           int x_mis, int y_mis, int segment_id) {
+static void set_segment_id(VP10_COMMON *cm, int mi_offset, int x_mis, int y_mis,
+                           int segment_id) {
   int x, y;
 
   assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
@@ -145,33 +140,32 @@ static int read_intra_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
   int segment_id;
 
 #if !CONFIG_MISC_FIXES
-  (void) xd;
+  (void)xd;
 #endif
 
-  if (!seg->enabled)
-    return 0;  // Default for disabled segmentation
+  if (!seg->enabled) return 0;  // Default for disabled segmentation
 
   assert(seg->update_map && !seg->temporal_update);
 
   segment_id = read_segment_id(r, segp);
 #if CONFIG_MISC_FIXES
-  if (counts)
-    ++counts->seg.tree_total[segment_id];
+  if (counts) ++counts->seg.tree_total[segment_id];
 #endif
   set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
   return segment_id;
 }
 
 static void copy_segment_id(const VP10_COMMON *cm,
-                           const uint8_t *last_segment_ids,
-                           uint8_t *current_segment_ids,
-                           int mi_offset, int x_mis, int y_mis) {
+                            const uint8_t *last_segment_ids,
+                            uint8_t *current_segment_ids, int mi_offset,
+                            int x_mis, int y_mis) {
   int x, y;
 
   for (y = 0; y < y_mis; y++)
     for (x = 0; x < x_mis; x++)
-      current_segment_ids[mi_offset + y * cm->mi_cols + x] =  last_segment_ids ?
-          last_segment_ids[mi_offset + y * cm->mi_cols + x] : 0;
+      current_segment_ids[mi_offset + y * cm->mi_cols + x] =
+          last_segment_ids ? last_segment_ids[mi_offset + y * cm->mi_cols + x]
+                           : 0;
 }
 
 static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
@@ -193,12 +187,12 @@ static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
   const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw);
   const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh);
 
-  if (!seg->enabled)
-    return 0;  // Default for disabled segmentation
+  if (!seg->enabled) return 0;  // Default for disabled segmentation
 
-  predicted_segment_id = cm->last_frame_seg_map ?
-      dec_get_segment_id(cm, cm->last_frame_seg_map, mi_offset, x_mis, y_mis) :
-      0;
+  predicted_segment_id = cm->last_frame_seg_map
+                             ? dec_get_segment_id(cm, cm->last_frame_seg_map,
+                                                  mi_offset, x_mis, y_mis)
+                             : 0;
 
   if (!seg->update_map) {
     copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
@@ -211,50 +205,46 @@ static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
     const vpx_prob pred_prob = segp->pred_probs[ctx];
     mbmi->seg_id_predicted = vpx_read(r, pred_prob);
 #if CONFIG_MISC_FIXES
-    if (counts)
-      ++counts->seg.pred[ctx][mbmi->seg_id_predicted];
+    if (counts) ++counts->seg.pred[ctx][mbmi->seg_id_predicted];
 #endif
     if (mbmi->seg_id_predicted) {
       segment_id = predicted_segment_id;
     } else {
       segment_id = read_segment_id(r, segp);
 #if CONFIG_MISC_FIXES
-      if (counts)
-        ++counts->seg.tree_mispred[segment_id];
+      if (counts) ++counts->seg.tree_mispred[segment_id];
 #endif
     }
   } else {
     segment_id = read_segment_id(r, segp);
 #if CONFIG_MISC_FIXES
-    if (counts)
-      ++counts->seg.tree_total[segment_id];
+    if (counts) ++counts->seg.tree_total[segment_id];
 #endif
   }
   set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
   return segment_id;
 }
 
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd,
-                     int segment_id, vpx_reader *r) {
+static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+                     vpx_reader *r) {
   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
     return 1;
   } else {
     const int ctx = vp10_get_skip_context(xd);
     const int skip = vpx_read(r, cm->fc->skip_probs[ctx]);
     FRAME_COUNTS *counts = xd->counts;
-    if (counts)
-      ++counts->skip[ctx][skip];
+    if (counts) ++counts->skip[ctx][skip];
     return skip;
   }
 }
 
 static void read_intra_frame_mode_info(VP10_COMMON *const cm,
-                                       MACROBLOCKD *const xd,
-                                       int mi_row, int mi_col, vpx_reader *r) {
+                                       MACROBLOCKD *const xd, int mi_row,
+                                       int mi_col, vpx_reader *r) {
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const MODE_INFO *above_mi = xd->above_mi;
-  const MODE_INFO *left_mi  = xd->left_mi;
+  const MODE_INFO *left_mi = xd->left_mi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
   int i;
   const int mi_offset = mi_row * cm->mi_cols + mi_col;
@@ -291,20 +281,19 @@ static void read_intra_frame_mode_info(VP10_COMMON *const cm,
           read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 2));
       break;
     default:
-      mbmi->mode = read_intra_mode(r,
-          get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
+      mbmi->mode =
+          read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
   }
 
   mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode);
 
-  if (mbmi->tx_size < TX_32X32 &&
-      cm->base_qindex > 0 && !mbmi->skip &&
+  if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
     FRAME_COUNTS *counts = xd->counts;
     TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
-    mbmi->tx_type = vpx_read_tree(
-        r, vp10_ext_tx_tree,
-        cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
+    mbmi->tx_type =
+        vpx_read_tree(r, vp10_ext_tx_tree,
+                      cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
     if (counts)
       ++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
   } else {
@@ -312,8 +301,8 @@ static void read_intra_frame_mode_info(VP10_COMMON *const cm,
   }
 }
 
-static int read_mv_component(vpx_reader *r,
-                             const nmv_component *mvcomp, int usehp) {
+static int read_mv_component(vpx_reader *r, const nmv_component *mvcomp,
+                             int usehp) {
   int mag, d, fr, hp;
   const int sign = vpx_read(r, mvcomp->sign);
   const int mv_class = vpx_read_tree(r, vp10_mv_class_tree, mvcomp->classes);
@@ -328,18 +317,16 @@ static int read_mv_component(vpx_reader *r,
     const int n = mv_class + CLASS0_BITS - 1;  // number of bits
 
     d = 0;
-    for (i = 0; i < n; ++i)
-      d |= vpx_read(r, mvcomp->bits[i]) << i;
+    for (i = 0; i < n; ++i) d |= vpx_read(r, mvcomp->bits[i]) << i;
     mag = CLASS0_SIZE << (mv_class + 2);
   }
 
   // Fractional part
-  fr = vpx_read_tree(r, vp10_mv_fp_tree, class0 ? mvcomp->class0_fp[d]
-                                               : mvcomp->fp);
+  fr = vpx_read_tree(r, vp10_mv_fp_tree,
+                     class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
 
   // High precision part (if hp is not used, the default value of the hp is 1)
-  hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp)
-             : 1;
+  hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
 
   // Result
   mag += ((d << 3) | (fr << 1) | hp) + 1;
@@ -347,12 +334,12 @@ static int read_mv_component(vpx_reader *r,
 }
 
 static INLINE void read_mv(vpx_reader *r, MV *mv, const MV *ref,
-                           const nmv_context *ctx,
-                           nmv_context_counts *counts, int allow_hp) {
+                           const nmv_context *ctx, nmv_context_counts *counts,
+                           int allow_hp) {
   const MV_JOINT_TYPE joint_type =
       (MV_JOINT_TYPE)vpx_read_tree(r, vp10_mv_joint_tree, ctx->joints);
   const int use_hp = allow_hp && vp10_use_mv_hp(ref);
-  MV diff = {0, 0};
+  MV diff = { 0, 0 };
 
   if (mv_joint_vertical(joint_type))
     diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
@@ -374,8 +361,7 @@ static REFERENCE_MODE read_block_reference_mode(VP10_COMMON *cm,
     const REFERENCE_MODE mode =
         (REFERENCE_MODE)vpx_read(r, cm->fc->comp_inter_prob[ctx]);
     FRAME_COUNTS *counts = xd->counts;
-    if (counts)
-      ++counts->comp_inter[ctx][mode];
+    if (counts) ++counts->comp_inter[ctx][mode];
     return mode;  // SINGLE_REFERENCE or COMPOUND_REFERENCE
   } else {
     return cm->reference_mode;
@@ -384,8 +370,8 @@ static REFERENCE_MODE read_block_reference_mode(VP10_COMMON *cm,
 
 // Read the referncence frame
 static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
-                            vpx_reader *r,
-                            int segment_id, MV_REFERENCE_FRAME ref_frame[2]) {
+                            vpx_reader *r, int segment_id,
+                            MV_REFERENCE_FRAME ref_frame[2]) {
   FRAME_CONTEXT *const fc = cm->fc;
   FRAME_COUNTS *counts = xd->counts;
 
@@ -400,20 +386,17 @@ static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
       const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
       const int ctx = vp10_get_pred_context_comp_ref_p(cm, xd);
       const int bit = vpx_read(r, fc->comp_ref_prob[ctx]);
-      if (counts)
-        ++counts->comp_ref[ctx][bit];
+      if (counts) ++counts->comp_ref[ctx][bit];
       ref_frame[idx] = cm->comp_fixed_ref;
       ref_frame[!idx] = cm->comp_var_ref[bit];
     } else if (mode == SINGLE_REFERENCE) {
       const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
       const int bit0 = vpx_read(r, fc->single_ref_prob[ctx0][0]);
-      if (counts)
-        ++counts->single_ref[ctx0][0][bit0];
+      if (counts) ++counts->single_ref[ctx0][0][bit0];
       if (bit0) {
         const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
         const int bit1 = vpx_read(r, fc->single_ref_prob[ctx1][1]);
-        if (counts)
-          ++counts->single_ref[ctx1][1][bit1];
+        if (counts) ++counts->single_ref[ctx1][1][bit1];
         ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
       } else {
         ref_frame[0] = LAST_FRAME;
@@ -426,17 +409,14 @@ static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
   }
 }
 
-
-static INLINE INTERP_FILTER read_switchable_interp_filter(
-    VP10_COMMON *const cm, MACROBLOCKD *const xd,
-    vpx_reader *r) {
+static INLINE INTERP_FILTER read_switchable_interp_filter(VP10_COMMON *const cm,
+                                                          MACROBLOCKD *const xd,
+                                                          vpx_reader *r) {
   const int ctx = vp10_get_pred_context_switchable_interp(xd);
-  const INTERP_FILTER type =
-      (INTERP_FILTER)vpx_read_tree(r, vp10_switchable_interp_tree,
-                                   cm->fc->switchable_interp_prob[ctx]);
+  const INTERP_FILTER type = (INTERP_FILTER)vpx_read_tree(
+      r, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
   FRAME_COUNTS *counts = xd->counts;
-  if (counts)
-    ++counts->switchable_interp[ctx][type];
+  if (counts) ++counts->switchable_interp[ctx][type];
   return type;
 }
 
@@ -457,14 +437,12 @@ static void read_intra_block_mode_info(VP10_COMMON *const cm,
       mbmi->mode = mi->bmi[3].as_mode;
       break;
     case BLOCK_4X8:
-      mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd,
-                                                                  r, 0);
+      mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd, r, 0);
       mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode =
           read_intra_mode_y(cm, xd, r, 0);
       break;
     case BLOCK_8X4:
-      mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd,
-                                                                  r, 0);
+      mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd, r, 0);
       mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode =
           read_intra_mode_y(cm, xd, r, 0);
       break;
@@ -476,15 +454,15 @@ static void read_intra_block_mode_info(VP10_COMMON *const cm,
 }
 
 static INLINE int is_mv_valid(const MV *mv) {
-  return mv->row > MV_LOW && mv->row < MV_UPP &&
-         mv->col > MV_LOW && mv->col < MV_UPP;
+  return mv->row > MV_LOW && mv->row < MV_UPP && mv->col > MV_LOW &&
+         mv->col < MV_UPP;
 }
 
 static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
-                            PREDICTION_MODE mode,
-                            int_mv mv[2], int_mv ref_mv[2],
-                            int_mv nearest_mv[2], int_mv near_mv[2],
-                            int is_compound, int allow_hp, vpx_reader *r) {
+                            PREDICTION_MODE mode, int_mv mv[2],
+                            int_mv ref_mv[2], int_mv nearest_mv[2],
+                            int_mv near_mv[2], int is_compound, int allow_hp,
+                            vpx_reader *r) {
   int i;
   int ret = 1;
 
@@ -501,25 +479,20 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
     }
     case NEARESTMV: {
       mv[0].as_int = nearest_mv[0].as_int;
-      if (is_compound)
-        mv[1].as_int = nearest_mv[1].as_int;
+      if (is_compound) mv[1].as_int = nearest_mv[1].as_int;
       break;
     }
     case NEARMV: {
       mv[0].as_int = near_mv[0].as_int;
-      if (is_compound)
-        mv[1].as_int = near_mv[1].as_int;
+      if (is_compound) mv[1].as_int = near_mv[1].as_int;
       break;
     }
     case ZEROMV: {
       mv[0].as_int = 0;
-      if (is_compound)
-        mv[1].as_int = 0;
+      if (is_compound) mv[1].as_int = 0;
       break;
     }
-    default: {
-      return 0;
-    }
+    default: { return 0; }
   }
   return ret;
 }
@@ -532,8 +505,7 @@ static int read_is_inter_block(VP10_COMMON *const cm, MACROBLOCKD *const xd,
     const int ctx = vp10_get_intra_inter_context(xd);
     const int is_inter = vpx_read(r, cm->fc->intra_inter_prob[ctx]);
     FRAME_COUNTS *counts = xd->counts;
-    if (counts)
-      ++counts->intra_inter[ctx][is_inter];
+    if (counts) ++counts->intra_inter[ctx][is_inter];
     return is_inter;
   }
 }
@@ -541,13 +513,13 @@ static int read_is_inter_block(VP10_COMMON *const cm, MACROBLOCKD *const xd,
 static void fpm_sync(void *const data, int mi_row) {
   VP10Decoder *const pbi = (VP10Decoder *)data;
   vp10_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
-                       mi_row << MI_BLOCK_SIZE_LOG2);
+                        mi_row << MI_BLOCK_SIZE_LOG2);
 }
 
 static void read_inter_block_mode_info(VP10Decoder *const pbi,
                                        MACROBLOCKD *const xd,
-                                       MODE_INFO *const mi,
-                                       int mi_row, int mi_col, vpx_reader *r) {
+                                       MODE_INFO *const mi, int mi_row,
+                                       int mi_col, vpx_reader *r) {
   VP10_COMMON *const cm = &pbi->common;
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
@@ -568,23 +540,22 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
     if ((!vp10_is_valid_scale(&ref_buf->sf)))
       vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
                          "Reference frame has invalid dimensions");
-    vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col,
-                         &ref_buf->sf);
-    vp10_find_mv_refs(cm, xd, mi, frame, ref_mvs[frame],
-                     mi_row, mi_col, fpm_sync, (void *)pbi, inter_mode_ctx);
+    vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
+    vp10_find_mv_refs(cm, xd, mi, frame, ref_mvs[frame], mi_row, mi_col,
+                      fpm_sync, (void *)pbi, inter_mode_ctx);
   }
 
   if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
     mbmi->mode = ZEROMV;
     if (bsize < BLOCK_8X8) {
-        vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
-                           "Invalid usage of segement feature on small blocks");
-        return;
+      vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+                         "Invalid usage of segement feature on small blocks");
+      return;
     }
   } else {
     if (bsize >= BLOCK_8X8)
-      mbmi->mode = read_inter_mode(cm, xd, r,
-                                   inter_mode_ctx[mbmi->ref_frame[0]]);
+      mbmi->mode =
+          read_inter_mode(cm, xd, r, inter_mode_ctx[mbmi->ref_frame[0]]);
   }
 
   if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
@@ -595,8 +566,8 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
   }
 
   mbmi->interp_filter = (cm->interp_filter == SWITCHABLE)
-                      ? read_switchable_interp_filter(cm, xd, r)
-                      : cm->interp_filter;
+                            ? read_switchable_interp_filter(cm, xd, r)
+                            : cm->interp_filter;
 
   if (bsize < BLOCK_8X8) {
     const int num_4x4_w = 1 << xd->bmode_blocks_wl;
@@ -614,26 +585,21 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
           uint8_t dummy_mode_ctx[MAX_REF_FRAMES];
           for (ref = 0; ref < 1 + is_compound; ++ref)
             vp10_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
-                                          &nearest_sub8x8[ref],
-                                          &near_sub8x8[ref],
-                                          dummy_mode_ctx);
+                                           &nearest_sub8x8[ref],
+                                           &near_sub8x8[ref], dummy_mode_ctx);
         }
 
-        if (!assign_mv(cm, xd, b_mode, block, nearestmv,
-                       nearest_sub8x8, near_sub8x8,
-                       is_compound, allow_hp, r)) {
+        if (!assign_mv(cm, xd, b_mode, block, nearestmv, nearest_sub8x8,
+                       near_sub8x8, is_compound, allow_hp, r)) {
           xd->corrupted |= 1;
           break;
         };
 
         mi->bmi[j].as_mv[0].as_int = block[0].as_int;
-        if (is_compound)
-          mi->bmi[j].as_mv[1].as_int = block[1].as_int;
+        if (is_compound) mi->bmi[j].as_mv[1].as_int = block[1].as_int;
 
-        if (num_4x4_h == 2)
-          mi->bmi[j + 2] = mi->bmi[j];
-        if (num_4x4_w == 2)
-          mi->bmi[j + 1] = mi->bmi[j];
+        if (num_4x4_h == 2) mi->bmi[j + 2] = mi->bmi[j];
+        if (num_4x4_w == 2) mi->bmi[j + 1] = mi->bmi[j];
       }
     }
 
@@ -648,8 +614,8 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
 }
 
 static void read_inter_frame_mode_info(VP10Decoder *const pbi,
-                                       MACROBLOCKD *const xd,
-                                       int mi_row, int mi_col, vpx_reader *r) {
+                                       MACROBLOCKD *const xd, int mi_row,
+                                       int mi_col, vpx_reader *r) {
   VP10_COMMON *const cm = &pbi->common;
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
@@ -667,21 +633,18 @@ static void read_inter_frame_mode_info(VP10Decoder *const pbi,
   else
     read_intra_block_mode_info(cm, xd, mi, r);
 
-  if (mbmi->tx_size < TX_32X32 &&
-      cm->base_qindex > 0 && !mbmi->skip &&
+  if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
     FRAME_COUNTS *counts = xd->counts;
     if (inter_block) {
-      mbmi->tx_type = vpx_read_tree(
-          r, vp10_ext_tx_tree,
-          cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
-      if (counts)
-        ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
+      mbmi->tx_type = vpx_read_tree(r, vp10_ext_tx_tree,
+                                    cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
+      if (counts) ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
     } else {
       const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
-      mbmi->tx_type = vpx_read_tree(
-          r, vp10_ext_tx_tree,
-          cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
+      mbmi->tx_type =
+          vpx_read_tree(r, vp10_ext_tx_tree,
+                        cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
       if (counts)
         ++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
     }
@@ -690,12 +653,11 @@ static void read_inter_frame_mode_info(VP10Decoder *const pbi,
   }
 }
 
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd,
-                        int mi_row, int mi_col, vpx_reader *r,
-                        int x_mis, int y_mis) {
+void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
+                         int mi_col, vpx_reader *r, int x_mis, int y_mis) {
   VP10_COMMON *const cm = &pbi->common;
   MODE_INFO *const mi = xd->mi[0];
-  MV_REF* frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+  MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
   int w, h;
 
   if (frame_is_intra_only(cm)) {
diff --git a/vp10/decoder/decodemv.h b/vp10/decoder/decodemv.h
index 6653be5f69138d6a2800f41054cddbcf2ed88327..f7bc73882963d9316afc91ee9c60a5d87435e800 100644
--- a/vp10/decoder/decodemv.h
+++ b/vp10/decoder/decodemv.h
@@ -19,9 +19,8 @@
 extern "C" {
 #endif
 
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd,
-                        int mi_row, int mi_col, vpx_reader *r,
-                        int x_mis, int y_mis);
+void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
+                         int mi_col, vpx_reader *r, int x_mis, int y_mis);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/decoder/decoder.c b/vp10/decoder/decoder.c
index 8144ef5621b6045bdea5051553ab5fd95168a4e5..f979abf2f2e02460b09fa6de13e1b8d4472ac636 100644
--- a/vp10/decoder/decoder.c
+++ b/vp10/decoder/decoder.c
@@ -54,12 +54,10 @@ static void vp10_dec_setup_mi(VP10_COMMON *cm) {
 
 static int vp10_dec_alloc_mi(VP10_COMMON *cm, int mi_size) {
   cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
-  if (!cm->mip)
-    return 1;
+  if (!cm->mip) return 1;
   cm->mi_alloc_size = mi_size;
-  cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
-  if (!cm->mi_grid_base)
-    return 1;
+  cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+  if (!cm->mi_grid_base) return 1;
   return 0;
 }
 
@@ -74,8 +72,7 @@ VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
   VP10Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
   VP10_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
 
-  if (!cm)
-    return NULL;
+  if (!cm) return NULL;
 
   vp10_zero(*pbi);
 
@@ -87,11 +84,10 @@ VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
 
   cm->error.setjmp = 1;
 
-  CHECK_MEM_ERROR(cm, cm->fc,
-                  (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
-  CHECK_MEM_ERROR(cm, cm->frame_contexts,
-                  (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS,
-                  sizeof(*cm->frame_contexts)));
+  CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+  CHECK_MEM_ERROR(
+      cm, cm->frame_contexts,
+      (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
 
   pbi->need_resync = 1;
   once(initialize_dec);
@@ -123,8 +119,7 @@ VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
 void vp10_decoder_remove(VP10Decoder *pbi) {
   int i;
 
-  if (!pbi)
-    return;
+  if (!pbi) return;
 
   vpx_get_worker_interface()->end(&pbi->lf_worker);
   vpx_free(pbi->lf_worker.data1);
@@ -146,8 +141,8 @@ void vp10_decoder_remove(VP10Decoder *pbi) {
 
 static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
                             const YV12_BUFFER_CONFIG *b) {
-    return a->y_height == b->y_height && a->y_width == b->y_width &&
-           a->uv_height == b->uv_height && a->uv_width == b->uv_width;
+  return a->y_height == b->y_height && a->y_width == b->y_width &&
+         a->uv_height == b->uv_height && a->uv_width == b->uv_width;
 }
 
 vpx_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi,
@@ -173,17 +168,15 @@ vpx_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi,
     else
       vpx_yv12_copy_frame(cfg, sd);
   } else {
-    vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
-                       "Invalid reference frame");
+    vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
   }
 
   return cm->error.error_code;
 }
 
-
 vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
-                                      VPX_REFFRAME ref_frame_flag,
-                                      YV12_BUFFER_CONFIG *sd) {
+                                       VPX_REFFRAME ref_frame_flag,
+                                       YV12_BUFFER_CONFIG *sd) {
   RefBuffer *ref_buf = NULL;
   RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
 
@@ -198,8 +191,7 @@ vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
   } else if (ref_frame_flag == VPX_ALT_FLAG) {
     ref_buf = &cm->frame_refs[2];
   } else {
-    vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
-                       "Invalid reference frame");
+    vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
     return cm->error.error_code;
   }
 
@@ -211,8 +203,7 @@ vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
 
     // Find an empty frame buffer.
     const int free_fb = get_free_fb(cm);
-    if (cm->new_fb_idx == INVALID_IDX)
-      return VPX_CODEC_MEM_ERROR;
+    if (cm->new_fb_idx == INVALID_IDX) return VPX_CODEC_MEM_ERROR;
 
     // Decrease ref_count since it will be increased again in
     // ref_cnt_fb() below.
@@ -269,8 +260,8 @@ static void swap_frame_buffers(VP10Decoder *pbi) {
     cm->frame_refs[ref_index].idx = -1;
 }
 
-int vp10_receive_compressed_data(VP10Decoder *pbi,
-                                size_t size, const uint8_t **psource) {
+int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
+                                 const uint8_t **psource) {
   VP10_COMMON *volatile const cm = &pbi->common;
   BufferPool *volatile const pool = cm->buffer_pool;
   RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
@@ -297,14 +288,13 @@ int vp10_receive_compressed_data(VP10Decoder *pbi,
 
   // Check if the previous frame was a frame without any references to it.
   // Release frame buffer if not decoding in frame parallel mode.
-  if (!cm->frame_parallel_decode && cm->new_fb_idx >= 0
-      && frame_bufs[cm->new_fb_idx].ref_count == 0)
+  if (!cm->frame_parallel_decode && cm->new_fb_idx >= 0 &&
+      frame_bufs[cm->new_fb_idx].ref_count == 0)
     pool->release_fb_cb(pool->cb_priv,
                         &frame_bufs[cm->new_fb_idx].raw_frame_buffer);
   // Find a free frame buffer. Return error if can not find any.
   cm->new_fb_idx = get_free_fb(cm);
-  if (cm->new_fb_idx == INVALID_IDX)
-    return VPX_CODEC_MEM_ERROR;
+  if (cm->new_fb_idx == INVALID_IDX) return VPX_CODEC_MEM_ERROR;
 
   // Assign a MV array to the frame buffer.
   cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
@@ -323,7 +313,6 @@ int vp10_receive_compressed_data(VP10Decoder *pbi,
     pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
   }
 
-
   if (setjmp(cm->error.jmp)) {
     const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
     int i;
@@ -414,14 +403,12 @@ int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
   VP10_COMMON *const cm = &pbi->common;
   int ret = -1;
 
-  if (pbi->ready_for_new_data == 1)
-    return ret;
+  if (pbi->ready_for_new_data == 1) return ret;
 
   pbi->ready_for_new_data = 1;
 
   /* no raw frame to show!!! */
-  if (!cm->show_frame)
-    return ret;
+  if (!cm->show_frame) return ret;
 
   pbi->ready_for_new_data = 1;
 
@@ -431,11 +418,10 @@ int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
   return ret;
 }
 
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data,
-                                           size_t data_sz,
-                                           uint32_t sizes[8], int *count,
-                                           vpx_decrypt_cb decrypt_cb,
-                                           void *decrypt_state) {
+vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
+                                            uint32_t sizes[8], int *count,
+                                            vpx_decrypt_cb decrypt_cb,
+                                            void *decrypt_state) {
   // A chunk ending with a byte matching 0xc0 is an invalid chunk unless
   // it is a super frame index. If the last byte of real video compression
   // data is 0xc0 the encoder must add a 0 byte. If we have the marker but
@@ -458,18 +444,16 @@ vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data,
 
     // This chunk is marked as having a superframe index but doesn't have
     // enough data for it, thus it's an invalid superframe index.
-    if (data_sz < index_sz)
-      return VPX_CODEC_CORRUPT_FRAME;
+    if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
 
     {
-      const uint8_t marker2 = read_marker(decrypt_cb, decrypt_state,
-                                          data + data_sz - index_sz);
+      const uint8_t marker2 =
+          read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz);
 
       // This chunk is marked as having a superframe index but doesn't have
       // the matching marker byte at the front of the index therefore it's an
       // invalid chunk.
-      if (marker != marker2)
-        return VPX_CODEC_CORRUPT_FRAME;
+      if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
     }
 
     {
@@ -488,8 +472,7 @@ vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data,
       for (i = 0; i < frames - CONFIG_MISC_FIXES; ++i) {
         uint32_t this_sz = 0;
 
-        for (j = 0; j < mag; ++j)
-          this_sz |= (*x++) << (j * 8);
+        for (j = 0; j < mag; ++j) this_sz |= (*x++) << (j * 8);
         this_sz += CONFIG_MISC_FIXES;
         sizes[i] = this_sz;
 #if CONFIG_MISC_FIXES
diff --git a/vp10/decoder/decoder.h b/vp10/decoder/decoder.h
index 2dec2223a0365300884d5a6771007fb21706242d..45d4f3d3929a496480fcc66a58fa2ca6f88db70d 100644
--- a/vp10/decoder/decoder.h
+++ b/vp10/decoder/decoder.h
@@ -58,9 +58,9 @@ typedef struct VP10Decoder {
 
   // TODO(hkuang): Combine this with cur_buf in macroblockd as they are
   // the same.
-  RefCntBuffer *cur_buf;   //  Current decoding frame buffer.
+  RefCntBuffer *cur_buf;  //  Current decoding frame buffer.
 
-  VPxWorker *frame_worker_owner;   // frame_worker that owns this pbi.
+  VPxWorker *frame_worker_owner;  // frame_worker that owns this pbi.
   VPxWorker lf_worker;
   VPxWorker *tile_workers;
   TileWorkerData *tile_worker_data;
@@ -77,26 +77,25 @@ typedef struct VP10Decoder {
 
   int max_threads;
   int inv_tile_order;
-  int need_resync;  // wait for key/intra-only frame.
+  int need_resync;   // wait for key/intra-only frame.
   int hold_ref_buf;  // hold the reference buffer.
 } VP10Decoder;
 
-int vp10_receive_compressed_data(struct VP10Decoder *pbi,
-                                size_t size, const uint8_t **dest);
+int vp10_receive_compressed_data(struct VP10Decoder *pbi, size_t size,
+                                 const uint8_t **dest);
 
 int vp10_get_raw_frame(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd);
 
 vpx_codec_err_t vp10_copy_reference_dec(struct VP10Decoder *pbi,
-                                       VPX_REFFRAME ref_frame_flag,
-                                       YV12_BUFFER_CONFIG *sd);
+                                        VPX_REFFRAME ref_frame_flag,
+                                        YV12_BUFFER_CONFIG *sd);
 
 vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
-                                      VPX_REFFRAME ref_frame_flag,
-                                      YV12_BUFFER_CONFIG *sd);
+                                       VPX_REFFRAME ref_frame_flag,
+                                       YV12_BUFFER_CONFIG *sd);
 
 static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
-                                  void *decrypt_state,
-                                  const uint8_t *data) {
+                                  void *decrypt_state, const uint8_t *data) {
   if (decrypt_cb) {
     uint8_t marker;
     decrypt_cb(decrypt_state, data, &marker, 1);
@@ -107,11 +106,10 @@ static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
 
 // This function is exposed for use in tests, as well as the inlined function
 // "read_marker".
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data,
-                                           size_t data_sz,
-                                           uint32_t sizes[8], int *count,
-                                           vpx_decrypt_cb decrypt_cb,
-                                           void *decrypt_state);
+vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
+                                            uint32_t sizes[8], int *count,
+                                            vpx_decrypt_cb decrypt_cb,
+                                            void *decrypt_state);
 
 struct VP10Decoder *vp10_decoder_create(BufferPool *const pool);
 
diff --git a/vp10/decoder/detokenize.c b/vp10/decoder/detokenize.c
index 04bc0b80619b8834f7b0e3d9e06194fba4f50025..e57d1efde89c6fdfa318347c2572f9e7594ee4bf 100644
--- a/vp10/decoder/detokenize.c
+++ b/vp10/decoder/detokenize.c
@@ -20,33 +20,30 @@
 
 #include "vp10/decoder/detokenize.h"
 
-#define EOB_CONTEXT_NODE            0
-#define ZERO_CONTEXT_NODE           1
-#define ONE_CONTEXT_NODE            2
-#define LOW_VAL_CONTEXT_NODE        0
-#define TWO_CONTEXT_NODE            1
-#define THREE_CONTEXT_NODE          2
-#define HIGH_LOW_CONTEXT_NODE       3
-#define CAT_ONE_CONTEXT_NODE        4
-#define CAT_THREEFOUR_CONTEXT_NODE  5
-#define CAT_THREE_CONTEXT_NODE      6
-#define CAT_FIVE_CONTEXT_NODE       7
+#define EOB_CONTEXT_NODE 0
+#define ZERO_CONTEXT_NODE 1
+#define ONE_CONTEXT_NODE 2
+#define LOW_VAL_CONTEXT_NODE 0
+#define TWO_CONTEXT_NODE 1
+#define THREE_CONTEXT_NODE 2
+#define HIGH_LOW_CONTEXT_NODE 3
+#define CAT_ONE_CONTEXT_NODE 4
+#define CAT_THREEFOUR_CONTEXT_NODE 5
+#define CAT_THREE_CONTEXT_NODE 6
+#define CAT_FIVE_CONTEXT_NODE 7
 
-#define INCREMENT_COUNT(token)                              \
-  do {                                                      \
-     if (counts)                                            \
-       ++coef_counts[band][ctx][token];                     \
+#define INCREMENT_COUNT(token)                   \
+  do {                                           \
+    if (counts) ++coef_counts[band][ctx][token]; \
   } while (0)
 
 static INLINE int read_coeff(const vpx_prob *probs, int n, vpx_reader *r) {
   int i, val = 0;
-  for (i = 0; i < n; ++i)
-    val = (val << 1) | vpx_read(r, probs[i]);
+  for (i = 0; i < n; ++i) val = (val << 1) | vpx_read(r, probs[i]);
   return val;
 }
 
-static int decode_coefs(const MACROBLOCKD *xd,
-                        PLANE_TYPE type,
+static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
                         tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq,
                         int ctx, const int16_t *scan, const int16_t *nb,
                         vpx_reader *r) {
@@ -55,11 +52,11 @@ static int decode_coefs(const MACROBLOCKD *xd,
   const FRAME_CONTEXT *const fc = xd->fc;
   const int ref = is_inter_block(&xd->mi[0]->mbmi);
   int band, c = 0;
-  const vpx_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+  const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
       fc->coef_probs[tx_size][type][ref];
   const vpx_prob *prob;
-  unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
-  unsigned int (*eob_branch_count)[COEFF_CONTEXTS];
+  unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
+  unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
   uint8_t token_cache[32 * 32];
   const uint8_t *band_translate = get_band_translate(tx_size);
   const int dq_shift = (tx_size == TX_32X32);
@@ -115,8 +112,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
     int val = -1;
     band = *band_translate++;
     prob = coef_probs[band][ctx];
-    if (counts)
-      ++eob_branch_count[band][ctx];
+    if (counts) ++eob_branch_count[band][ctx];
     if (!vpx_read(r, prob[EOB_CONTEXT_NODE])) {
       INCREMENT_COUNT(EOB_MODEL_TOKEN);
       break;
@@ -127,8 +123,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
       dqv = dq[1];
       token_cache[scan[c]] = 0;
       ++c;
-      if (c >= max_eob)
-        return c;  // zero tokens at the end (no eob token)
+      if (c >= max_eob) return c;  // zero tokens at the end (no eob token)
       ctx = get_coef_context(nb, token_cache, c);
       band = *band_translate++;
       prob = coef_probs[band][ctx];
@@ -145,9 +140,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
       switch (token) {
         case TWO_TOKEN:
         case THREE_TOKEN:
-        case FOUR_TOKEN:
-          val = token;
-          break;
+        case FOUR_TOKEN: val = token; break;
         case CATEGORY1_TOKEN:
           val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, r);
           break;
@@ -181,9 +174,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
             case VPX_BITS_12:
               val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, r);
               break;
-            default:
-              assert(0);
-              return -1;
+            default: assert(0); return -1;
           }
 #else
           val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r);
@@ -195,8 +186,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
     v = (val * dqv) >> dq_shift;
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
 #if CONFIG_VPX_HIGHBITDEPTH
-    dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v),
-                                          xd->bd);
+    dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v), xd->bd);
 #else
     dqcoeff[scan[c]] = check_range(vpx_read_bit(r) ? -v : v);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
@@ -212,12 +202,12 @@ static int decode_coefs(const MACROBLOCKD *xd,
   return c;
 }
 
-// TODO(slavarnway): Decode version of vp10_set_context.  Modify vp10_set_context
+// TODO(slavarnway): Decode version of vp10_set_context.  Modify
+// vp10_set_context
 // after testing is complete, then delete this version.
-static
-void dec_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
-                      TX_SIZE tx_size, int has_eob,
-                      int aoff, int loff) {
+static void dec_set_contexts(const MACROBLOCKD *xd,
+                             struct macroblockd_plane *pd, TX_SIZE tx_size,
+                             int has_eob, int aoff, int loff) {
   ENTROPY_CONTEXT *const a = pd->above_context + aoff;
   ENTROPY_CONTEXT *const l = pd->left_context + loff;
   const int tx_size_in_blocks = 1 << tx_size;
@@ -225,16 +215,14 @@ void dec_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
   // above
   if (has_eob && xd->mb_to_right_edge < 0) {
     int i;
-    const int blocks_wide = pd->n4_w +
-                            (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+    const int blocks_wide =
+        pd->n4_w + (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
     int above_contexts = tx_size_in_blocks;
     if (above_contexts + aoff > blocks_wide)
       above_contexts = blocks_wide - aoff;
 
-    for (i = 0; i < above_contexts; ++i)
-      a[i] = has_eob;
-    for (i = above_contexts; i < tx_size_in_blocks; ++i)
-      a[i] = 0;
+    for (i = 0; i < above_contexts; ++i) a[i] = has_eob;
+    for (i = above_contexts; i < tx_size_in_blocks; ++i) a[i] = 0;
   } else {
     memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
   }
@@ -242,35 +230,27 @@ void dec_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
   // left
   if (has_eob && xd->mb_to_bottom_edge < 0) {
     int i;
-    const int blocks_high = pd->n4_h +
-                            (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+    const int blocks_high =
+        pd->n4_h + (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
     int left_contexts = tx_size_in_blocks;
-    if (left_contexts + loff > blocks_high)
-      left_contexts = blocks_high - loff;
+    if (left_contexts + loff > blocks_high) left_contexts = blocks_high - loff;
 
-    for (i = 0; i < left_contexts; ++i)
-      l[i] = has_eob;
-    for (i = left_contexts; i < tx_size_in_blocks; ++i)
-      l[i] = 0;
+    for (i = 0; i < left_contexts; ++i) l[i] = has_eob;
+    for (i = left_contexts; i < tx_size_in_blocks; ++i) l[i] = 0;
   } else {
     memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
   }
 }
 
-int vp10_decode_block_tokens(MACROBLOCKD *xd,
-                            int plane, const scan_order *sc,
-                            int x, int y,
-                            TX_SIZE tx_size, vpx_reader *r,
-                            int seg_id) {
+int vp10_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
+                             int x, int y, TX_SIZE tx_size, vpx_reader *r,
+                             int seg_id) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   const int16_t *const dequant = pd->seg_dequant[seg_id];
-  const int ctx = get_entropy_context(tx_size, pd->above_context + x,
-                                               pd->left_context + y);
-  const int eob = decode_coefs(xd, pd->plane_type,
-                               pd->dqcoeff, tx_size,
+  const int ctx =
+      get_entropy_context(tx_size, pd->above_context + x, pd->left_context + y);
+  const int eob = decode_coefs(xd, pd->plane_type, pd->dqcoeff, tx_size,
                                dequant, ctx, sc->scan, sc->neighbors, r);
   dec_set_contexts(xd, pd, tx_size, eob > 0, x, y);
   return eob;
 }
-
-
diff --git a/vp10/decoder/detokenize.h b/vp10/decoder/detokenize.h
index c3fd90a7282ddc0e9ebeeab16b154e867c221545..d7b459dcd946fb5ce518c98308fe07395f2d5be4 100644
--- a/vp10/decoder/detokenize.h
+++ b/vp10/decoder/detokenize.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_DECODER_DETOKENIZE_H_
 #define VP10_DECODER_DETOKENIZE_H_
 
@@ -20,11 +19,9 @@
 extern "C" {
 #endif
 
-int vp10_decode_block_tokens(MACROBLOCKD *xd,
-                            int plane, const scan_order *sc,
-                            int x, int y,
-                            TX_SIZE tx_size, vpx_reader *r,
-                            int seg_id);
+int vp10_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
+                             int x, int y, TX_SIZE tx_size, vpx_reader *r,
+                             int seg_id);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/decoder/dsubexp.c b/vp10/decoder/dsubexp.c
index 36c1917bc2b436c5684820d1c7067f245547dfdf..4e1ccc0f0aa2c4946be2987f874a2094849b17c6 100644
--- a/vp10/decoder/dsubexp.c
+++ b/vp10/decoder/dsubexp.c
@@ -15,8 +15,7 @@
 #include "vp10/decoder/dsubexp.h"
 
 static int inv_recenter_nonneg(int v, int m) {
-  if (v > 2 * m)
-    return v;
+  if (v > 2 * m) return v;
 
   return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1);
 }
@@ -25,10 +24,11 @@ static int decode_uniform(vpx_reader *r) {
   const int l = 8;
   const int m = (1 << l) - 191 + CONFIG_MISC_FIXES;
   const int v = vpx_read_literal(r, l - 1);
-  return v < m ?  v : (v << 1) - m + vpx_read_bit(r);
+  return v < m ? v : (v << 1) - m + vpx_read_bit(r);
 }
 
 static int inv_remap_prob(int v, int m) {
+  /* clang-format off */
   static uint8_t inv_map_table[MAX_PROB - CONFIG_MISC_FIXES] = {
       7,  20,  33,  46,  59,  72,  85,  98, 111, 124, 137, 150, 163, 176, 189,
     202, 215, 228, 241, 254,   1,   2,   3,   4,   5,   6,   8,   9,  10,  11,
@@ -50,7 +50,7 @@ static int inv_remap_prob(int v, int m) {
 #if !CONFIG_MISC_FIXES
     253
 #endif
-  };
+  }; /* clang-format on */
   assert(v < (int)(sizeof(inv_map_table) / sizeof(inv_map_table[0])));
   v = inv_map_table[v];
   m--;
@@ -62,16 +62,13 @@ static int inv_remap_prob(int v, int m) {
 }
 
 static int decode_term_subexp(vpx_reader *r) {
-  if (!vpx_read_bit(r))
-    return vpx_read_literal(r, 4);
-  if (!vpx_read_bit(r))
-    return vpx_read_literal(r, 4) + 16;
-  if (!vpx_read_bit(r))
-    return vpx_read_literal(r, 5) + 32;
+  if (!vpx_read_bit(r)) return vpx_read_literal(r, 4);
+  if (!vpx_read_bit(r)) return vpx_read_literal(r, 4) + 16;
+  if (!vpx_read_bit(r)) return vpx_read_literal(r, 5) + 32;
   return decode_uniform(r) + 64;
 }
 
-void vp10_diff_update_prob(vpx_reader *r, vpx_prob* p) {
+void vp10_diff_update_prob(vpx_reader *r, vpx_prob *p) {
   if (vpx_read(r, DIFF_UPDATE_PROB)) {
     const int delp = decode_term_subexp(r);
     *p = (vpx_prob)inv_remap_prob(delp, *p);
diff --git a/vp10/decoder/dsubexp.h b/vp10/decoder/dsubexp.h
index 1a7ed991049c9cef6a6f85b5f114ecfa280a45c5..8d47df47628e3208450ea190b2a472a4460d1028 100644
--- a/vp10/decoder/dsubexp.h
+++ b/vp10/decoder/dsubexp.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_DECODER_DSUBEXP_H_
 #define VP10_DECODER_DSUBEXP_H_
 
@@ -18,7 +17,7 @@
 extern "C" {
 #endif
 
-void vp10_diff_update_prob(vpx_reader *r, vpx_prob* p);
+void vp10_diff_update_prob(vpx_reader *r, vpx_prob *p);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/decoder/dthread.c b/vp10/decoder/dthread.c
index ac3cd2bb9f74cc945570344ead5207da6236a989..4dbcfbc7171a96dc71779d1afa01f057061d64ba 100644
--- a/vp10/decoder/dthread.c
+++ b/vp10/decoder/dthread.c
@@ -60,10 +60,9 @@ void vp10_frameworker_signal_stats(VPxWorker *const worker) {
 
 // TODO(hkuang): Remove worker parameter as it is only used in debug code.
 void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
-                          int row) {
+                           int row) {
 #if CONFIG_MULTITHREAD
-  if (!ref_buf)
-    return;
+  if (!ref_buf) return;
 
 #ifndef BUILDING_WITH_TSAN
   // The following line of code will get harmless tsan error but it is the key
@@ -135,7 +134,7 @@ void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row) {
 }
 
 void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
-                                  VPxWorker *const src_worker) {
+                                   VPxWorker *const src_worker) {
 #if CONFIG_MULTITHREAD
   FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
   FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
@@ -147,11 +146,12 @@ void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
   vp10_frameworker_lock_stats(src_worker);
   while (!src_worker_data->frame_context_ready) {
     pthread_cond_wait(&src_worker_data->stats_cond,
-        &src_worker_data->stats_mutex);
+                      &src_worker_data->stats_mutex);
   }
 
-  dst_cm->last_frame_seg_map = src_cm->seg.enabled ?
-      src_cm->current_frame_seg_map : src_cm->last_frame_seg_map;
+  dst_cm->last_frame_seg_map = src_cm->seg.enabled
+                                   ? src_cm->current_frame_seg_map
+                                   : src_cm->last_frame_seg_map;
   dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
   vp10_frameworker_unlock_stats(src_worker);
 
@@ -159,17 +159,18 @@ void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
 #if CONFIG_VPX_HIGHBITDEPTH
   dst_cm->use_highbitdepth = src_cm->use_highbitdepth;
 #endif
-  dst_cm->prev_frame = src_cm->show_existing_frame ?
-                       src_cm->prev_frame : src_cm->cur_frame;
-  dst_cm->last_width = !src_cm->show_existing_frame ?
-                       src_cm->width : src_cm->last_width;
-  dst_cm->last_height = !src_cm->show_existing_frame ?
-                        src_cm->height : src_cm->last_height;
+  dst_cm->prev_frame =
+      src_cm->show_existing_frame ? src_cm->prev_frame : src_cm->cur_frame;
+  dst_cm->last_width =
+      !src_cm->show_existing_frame ? src_cm->width : src_cm->last_width;
+  dst_cm->last_height =
+      !src_cm->show_existing_frame ? src_cm->height : src_cm->last_height;
   dst_cm->subsampling_x = src_cm->subsampling_x;
   dst_cm->subsampling_y = src_cm->subsampling_y;
   dst_cm->frame_type = src_cm->frame_type;
-  dst_cm->last_show_frame = !src_cm->show_existing_frame ?
-                            src_cm->show_frame : src_cm->last_show_frame;
+  dst_cm->last_show_frame = !src_cm->show_existing_frame
+                                ? src_cm->show_frame
+                                : src_cm->last_show_frame;
   for (i = 0; i < REF_FRAMES; ++i)
     dst_cm->ref_frame_map[i] = src_cm->next_ref_frame_map[i];
 
@@ -183,7 +184,7 @@ void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
   memcpy(dst_cm->frame_contexts, src_cm->frame_contexts,
          FRAME_CONTEXTS * sizeof(dst_cm->frame_contexts[0]));
 #else
-  (void) dst_worker;
-  (void) src_worker;
+  (void)dst_worker;
+  (void)src_worker;
 #endif  // CONFIG_MULTITHREAD
 }
diff --git a/vp10/decoder/dthread.h b/vp10/decoder/dthread.h
index 1b0dc0191d2b9e38c16abdac9ebacd34c1e9e0e8..cb78462ca563436607831abe19e90f9b5526bceb 100644
--- a/vp10/decoder/dthread.h
+++ b/vp10/decoder/dthread.h
@@ -57,7 +57,7 @@ void vp10_frameworker_signal_stats(VPxWorker *const worker);
 // start decoding next frame. So need to check whether worker is still decoding
 // ref_buf.
 void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
-                          int row);
+                           int row);
 
 // FrameWorker broadcasts its decoding progress so other workers that are
 // waiting on it can resume decoding.
@@ -65,10 +65,10 @@ void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row);
 
 // Copy necessary decoding context from src worker to dst worker.
 void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
-                                  VPxWorker *const src_worker);
+                                   VPxWorker *const src_worker);
 
 #ifdef __cplusplus
-}    // extern "C"
+}  // extern "C"
 #endif
 
 #endif  // VP10_DECODER_DTHREAD_H_
diff --git a/vp10/encoder/aq_complexity.c b/vp10/encoder/aq_complexity.c
index 2506a4e5525414bf3d8acb48f9dbf901e4eb8d21..912babc3c0583ab96033e409216e2fd571884abc 100644
--- a/vp10/encoder/aq_complexity.c
+++ b/vp10/encoder/aq_complexity.c
@@ -19,25 +19,27 @@
 #include "vpx_dsp/vpx_dsp_common.h"
 #include "vpx_ports/system_state.h"
 
-#define AQ_C_SEGMENTS  5
-#define DEFAULT_AQ2_SEG 3   // Neutral Q segment
+#define AQ_C_SEGMENTS 5
+#define DEFAULT_AQ2_SEG 3  // Neutral Q segment
 #define AQ_C_STRENGTHS 3
-static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
-  { {1.75, 1.25, 1.05, 1.00, 0.90},
-    {2.00, 1.50, 1.15, 1.00, 0.85},
-    {2.50, 1.75, 1.25, 1.00, 0.80} };
-static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
-  { {0.15, 0.30, 0.55, 2.00, 100.0},
-    {0.20, 0.40, 0.65, 2.00, 100.0},
-    {0.25, 0.50, 0.75, 2.00, 100.0} };
-static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
-  { {-4.0, -3.0, -2.0, 100.00, 100.0},
-    {-3.5, -2.5, -1.5, 100.00, 100.0},
-    {-3.0, -2.0, -1.0, 100.00, 100.0} };
+static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+  { 1.75, 1.25, 1.05, 1.00, 0.90 },
+  { 2.00, 1.50, 1.15, 1.00, 0.85 },
+  { 2.50, 1.75, 1.25, 1.00, 0.80 }
+};
+static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+  { 0.15, 0.30, 0.55, 2.00, 100.0 },
+  { 0.20, 0.40, 0.65, 2.00, 100.0 },
+  { 0.25, 0.50, 0.75, 2.00, 100.0 }
+};
+static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+  { -4.0, -3.0, -2.0, 100.00, 100.0 },
+  { -3.5, -2.5, -1.5, 100.00, 100.0 },
+  { -3.0, -2.0, -1.0, 100.00, 100.0 }
+};
 
 #define DEFAULT_COMPLEXITY 64
 
-
 static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) {
   // Approximate base quatizer (truncated to int)
   const int base_quant = vp10_ac_quant(q_index, 0, bit_depth) / 4;
@@ -81,14 +83,11 @@ void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
     for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) {
       int qindex_delta;
 
-      if (segment == DEFAULT_AQ2_SEG)
-        continue;
-
-      qindex_delta =
-        vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
-                                   aq_c_q_adj_factor[aq_strength][segment],
-                                   cm->bit_depth);
+      if (segment == DEFAULT_AQ2_SEG) continue;
 
+      qindex_delta = vp10_compute_qdelta_by_rate(
+          &cpi->rc, cm->frame_type, cm->base_qindex,
+          aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
 
       // For AQ complexity mode, we dont allow Q0 in a segment if the base
       // Q is not 0. Q0 (lossless) implies 4x4 only and in AQ mode 2 a segment
@@ -112,7 +111,7 @@ void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
 // The choice of segment for a block depends on the ratio of the projected
 // bits for the block vs a target average and its spatial complexity.
 void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
-                            int mi_row, int mi_col, int projected_rate) {
+                             int mi_row, int mi_col, int projected_rate) {
   VP10_COMMON *const cm = &cpi->common;
 
   const int mi_offset = mi_row * cm->mi_cols + mi_col;
@@ -129,26 +128,25 @@ void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
   } else {
     // Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh).
     // It is converted to bits * 256 units.
-    const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) /
-                            (bw * bh);
+    const int target_rate =
+        (cpi->rc.sb64_target_rate * xmis * ymis * 256) / (bw * bh);
     double logvar;
     double low_var_thresh;
     const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
 
     vpx_clear_system_state();
-    low_var_thresh = (cpi->oxcf.pass == 2)
-      ? VPXMAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH)
-      : DEFAULT_LV_THRESH;
+    low_var_thresh = (cpi->oxcf.pass == 2) ? VPXMAX(cpi->twopass.mb_av_energy,
+                                                    MIN_DEFAULT_LV_THRESH)
+                                           : DEFAULT_LV_THRESH;
 
     vp10_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
     logvar = vp10_log_block_var(cpi, mb, bs);
 
-    segment = AQ_C_SEGMENTS - 1;    // Just in case no break out below.
+    segment = AQ_C_SEGMENTS - 1;  // Just in case no break out below.
     for (i = 0; i < AQ_C_SEGMENTS; ++i) {
       // Test rate against a threshold value and variance against a threshold.
       // Increasing segment number (higher variance and complexity) = higher Q.
-      if ((projected_rate <
-           target_rate * aq_c_transitions[aq_strength][i]) &&
+      if ((projected_rate < target_rate * aq_c_transitions[aq_strength][i]) &&
           (logvar < (low_var_thresh + aq_c_var_thresholds[aq_strength][i]))) {
         segment = i;
         break;
diff --git a/vp10/encoder/aq_complexity.h b/vp10/encoder/aq_complexity.h
index f9de2ada3e6ea780d8c825e7ade9d6e471ce56f9..5ed461bc68bfafd181c337ebbc24f0d3d8689182 100644
--- a/vp10/encoder/aq_complexity.h
+++ b/vp10/encoder/aq_complexity.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_AQ_COMPLEXITY_H_
 #define VP10_ENCODER_AQ_COMPLEXITY_H_
 
@@ -23,8 +22,8 @@ struct macroblock;
 
 // Select a segment for the current Block.
 void vp10_caq_select_segment(struct VP10_COMP *cpi, struct macroblock *,
-                            BLOCK_SIZE bs,
-                            int mi_row, int mi_col, int projected_rate);
+                             BLOCK_SIZE bs, int mi_row, int mi_col,
+                             int projected_rate);
 
 // This function sets up a set of segments with delta Q values around
 // the baseline frame quantizer.
diff --git a/vp10/encoder/aq_cyclicrefresh.c b/vp10/encoder/aq_cyclicrefresh.c
index 7a36d6128bf0d85eb9d84352c42e04446a4c2b99..af49ee629acc6eaa43a8f1515585a1b637ec437d 100644
--- a/vp10/encoder/aq_cyclicrefresh.c
+++ b/vp10/encoder/aq_cyclicrefresh.c
@@ -59,8 +59,7 @@ struct CYCLIC_REFRESH {
 CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
   size_t last_coded_q_map_size;
   CYCLIC_REFRESH *const cr = vpx_calloc(1, sizeof(*cr));
-  if (cr == NULL)
-    return NULL;
+  if (cr == NULL) return NULL;
 
   cr->map = vpx_calloc(mi_rows * mi_cols, sizeof(*cr->map));
   if (cr->map == NULL) {
@@ -94,7 +93,7 @@ static int apply_cyclic_refresh_bitrate(const VP10_COMMON *cm,
   // Average bits available per frame = avg_frame_bandwidth
   // Number of (8x8) blocks in frame = mi_rows * mi_cols;
   const float factor = 0.25;
-  const int number_blocks = cm->mi_rows  * cm->mi_cols;
+  const int number_blocks = cm->mi_rows * cm->mi_cols;
   // The condition below corresponds to turning off at target bitrates:
   // (at 30fps), ~12kbps for CIF, 36kbps for VGA, 100kps for HD/720p.
   // Also turn off at very small frame sizes, to avoid too large fraction of
@@ -111,10 +110,8 @@ static int apply_cyclic_refresh_bitrate(const VP10_COMMON *cm,
 // size of the coding block (i.e., below min_block size rejected), coding
 // mode, and rate/distortion.
 static int candidate_refresh_aq(const CYCLIC_REFRESH *cr,
-                                const MB_MODE_INFO *mbmi,
-                                int64_t rate,
-                                int64_t dist,
-                                int bsize) {
+                                const MB_MODE_INFO *mbmi, int64_t rate,
+                                int64_t dist, int bsize) {
   MV mv = mbmi->mv[0].as_mv;
   // Reject the block for lower-qp coding if projected distortion
   // is above the threshold, and any of the following is true:
@@ -126,11 +123,9 @@ static int candidate_refresh_aq(const CYCLIC_REFRESH *cr,
        mv.col > cr->motion_thresh || mv.col < -cr->motion_thresh ||
        !is_inter_block(mbmi)))
     return CR_SEGMENT_ID_BASE;
-  else  if (bsize >= BLOCK_16X16 &&
-            rate < cr->thresh_rate_sb &&
-            is_inter_block(mbmi) &&
-            mbmi->mv[0].as_int == 0 &&
-            cr->rate_boost_fac > 10)
+  else if (bsize >= BLOCK_16X16 && rate < cr->thresh_rate_sb &&
+           is_inter_block(mbmi) && mbmi->mv[0].as_int == 0 &&
+           cr->rate_boost_fac > 10)
     // More aggressive delta-q for bigger blocks with zero motion.
     return CR_SEGMENT_ID_BOOST2;
   else
@@ -141,9 +136,8 @@ static int candidate_refresh_aq(const CYCLIC_REFRESH *cr,
 static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) {
   const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   const RATE_CONTROL *const rc = &cpi->rc;
-  int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type,
-                                          q, rate_factor,
-                                          cpi->common.bit_depth);
+  int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
+                                           rate_factor, cpi->common.bit_depth);
   if ((-deltaq) > cr->max_qdelta_perc * q / 100) {
     deltaq = -cr->max_qdelta_perc * q / 100;
   }
@@ -155,7 +149,7 @@ static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) {
 // (with different delta-q). Note this function is called in the postencode
 // (called from rc_update_rate_correction_factors()).
 int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
-                                          double correction_factor) {
+                                           double correction_factor) {
   const VP10_COMMON *const cm = &cpi->common;
   const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   int estimated_bits;
@@ -166,17 +160,18 @@ int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
   double weight_segment1 = (double)cr->actual_num_seg1_blocks / num8x8bl;
   double weight_segment2 = (double)cr->actual_num_seg2_blocks / num8x8bl;
   // Take segment weighted average for estimated bits.
-  estimated_bits = (int)((1.0 - weight_segment1 - weight_segment2) *
-      vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
-                             correction_factor, cm->bit_depth) +
-                             weight_segment1 *
-      vp10_estimate_bits_at_q(cm->frame_type,
-                             cm->base_qindex + cr->qindex_delta[1], mbs,
-                             correction_factor, cm->bit_depth) +
-                             weight_segment2 *
-      vp10_estimate_bits_at_q(cm->frame_type,
-                             cm->base_qindex + cr->qindex_delta[2], mbs,
-                             correction_factor, cm->bit_depth));
+  estimated_bits =
+      (int)((1.0 - weight_segment1 - weight_segment2) *
+                vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
+                                        correction_factor, cm->bit_depth) +
+            weight_segment1 *
+                vp10_estimate_bits_at_q(cm->frame_type,
+                                        cm->base_qindex + cr->qindex_delta[1],
+                                        mbs, correction_factor, cm->bit_depth) +
+            weight_segment2 *
+                vp10_estimate_bits_at_q(cm->frame_type,
+                                        cm->base_qindex + cr->qindex_delta[2],
+                                        mbs, correction_factor, cm->bit_depth));
   return estimated_bits;
 }
 
@@ -186,24 +181,28 @@ int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
 // Note: the segment map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or
 // to 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock, prior to encoding.
 int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
-                                      double correction_factor) {
+                                       double correction_factor) {
   const VP10_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   int bits_per_mb;
   int num8x8bl = cm->MBs << 2;
   // Weight for segment prior to encoding: take the average of the target
   // number for the frame to be encoded and the actual from the previous frame.
-  double weight_segment = (double)((cr->target_num_seg_blocks +
-      cr->actual_num_seg1_blocks + cr->actual_num_seg2_blocks) >> 1) /
+  double weight_segment =
+      (double)((cr->target_num_seg_blocks + cr->actual_num_seg1_blocks +
+                cr->actual_num_seg2_blocks) >>
+               1) /
       num8x8bl;
   // Compute delta-q corresponding to qindex i.
   int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta);
   // Take segment weighted average for bits per mb.
-  bits_per_mb = (int)((1.0 - weight_segment) *
-      vp10_rc_bits_per_mb(cm->frame_type, i, correction_factor, cm->bit_depth) +
-      weight_segment *
-      vp10_rc_bits_per_mb(cm->frame_type, i + deltaq, correction_factor,
-                         cm->bit_depth));
+  bits_per_mb =
+      (int)((1.0 - weight_segment) * vp10_rc_bits_per_mb(cm->frame_type, i,
+                                                         correction_factor,
+                                                         cm->bit_depth) +
+            weight_segment * vp10_rc_bits_per_mb(cm->frame_type, i + deltaq,
+                                                 correction_factor,
+                                                 cm->bit_depth));
   return bits_per_mb;
 }
 
@@ -211,12 +210,9 @@ int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
 // check if we should reset the segment_id, and update the cyclic_refresh map
 // and segmentation map.
 void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
-                                       MB_MODE_INFO *const mbmi,
-                                       int mi_row, int mi_col,
-                                       BLOCK_SIZE bsize,
-                                       int64_t rate,
-                                       int64_t dist,
-                                       int skip) {
+                                        MB_MODE_INFO *const mbmi, int mi_row,
+                                        int mi_col, BLOCK_SIZE bsize,
+                                        int64_t rate, int64_t dist, int skip) {
   const VP10_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   const int bw = num_8x8_blocks_wide_lookup[bsize];
@@ -224,19 +220,19 @@ void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
   const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
   const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
   const int block_index = mi_row * cm->mi_cols + mi_col;
-  const int refresh_this_block = candidate_refresh_aq(cr, mbmi, rate, dist,
-                                                      bsize);
+  const int refresh_this_block =
+      candidate_refresh_aq(cr, mbmi, rate, dist, bsize);
   // Default is to not update the refresh map.
   int new_map_value = cr->map[block_index];
-  int x = 0; int y = 0;
+  int x = 0;
+  int y = 0;
 
   // If this block is labeled for refresh, check if we should reset the
   // segment_id.
   if (cyclic_refresh_segment_id_boosted(mbmi->segment_id)) {
     mbmi->segment_id = refresh_this_block;
     // Reset segment_id if will be skipped.
-    if (skip)
-      mbmi->segment_id = CR_SEGMENT_ID_BASE;
+    if (skip) mbmi->segment_id = CR_SEGMENT_ID_BASE;
   }
 
   // Update the cyclic refresh map, to be used for setting segmentation map
@@ -249,8 +245,7 @@ void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
     // Else if it is accepted as candidate for refresh, and has not already
     // been refreshed (marked as 1) then mark it as a candidate for cleanup
     // for future time (marked as 0), otherwise don't update it.
-    if (cr->map[block_index] == 1)
-      new_map_value = 0;
+    if (cr->map[block_index] == 1) new_map_value = 0;
   } else {
     // Leave it marked as block that is not candidate for refresh.
     new_map_value = 1;
@@ -283,11 +278,12 @@ void vp10_cyclic_refresh_postencode(VP10_COMP *const cpi) {
   cr->actual_num_seg2_blocks = 0;
   for (mi_row = 0; mi_row < cm->mi_rows; mi_row++)
     for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) {
-      if (cyclic_refresh_segment_id(
-          seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST1)
+      if (cyclic_refresh_segment_id(seg_map[mi_row * cm->mi_cols + mi_col]) ==
+          CR_SEGMENT_ID_BOOST1)
         cr->actual_num_seg1_blocks++;
       else if (cyclic_refresh_segment_id(
-          seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST2)
+                   seg_map[mi_row * cm->mi_cols + mi_col]) ==
+               CR_SEGMENT_ID_BOOST2)
         cr->actual_num_seg2_blocks++;
     }
 }
@@ -324,22 +320,22 @@ void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
 
   for (mi_row = 0; mi_row < rows; mi_row++) {
     for (mi_col = 0; mi_col < cols; mi_col++) {
-      int16_t abs_mvr = mi[0]->mbmi.mv[0].as_mv.row >= 0 ?
-          mi[0]->mbmi.mv[0].as_mv.row : -1 * mi[0]->mbmi.mv[0].as_mv.row;
-      int16_t abs_mvc = mi[0]->mbmi.mv[0].as_mv.col >= 0 ?
-          mi[0]->mbmi.mv[0].as_mv.col : -1 * mi[0]->mbmi.mv[0].as_mv.col;
+      int16_t abs_mvr = mi[0]->mbmi.mv[0].as_mv.row >= 0
+                            ? mi[0]->mbmi.mv[0].as_mv.row
+                            : -1 * mi[0]->mbmi.mv[0].as_mv.row;
+      int16_t abs_mvc = mi[0]->mbmi.mv[0].as_mv.col >= 0
+                            ? mi[0]->mbmi.mv[0].as_mv.col
+                            : -1 * mi[0]->mbmi.mv[0].as_mv.col;
 
       // Calculate the motion of the background.
       if (abs_mvr <= 16 && abs_mvc <= 16) {
         cnt1++;
-        if (abs_mvr == 0 && abs_mvc == 0)
-          cnt2++;
+        if (abs_mvr == 0 && abs_mvc == 0) cnt2++;
       }
       mi++;
 
       // Accumulate low_content_frame.
-      if (cr->map[mi_row * cols + mi_col] < 1)
-        low_content_frame++;
+      if (cr->map[mi_row * cols + mi_col] < 1) low_content_frame++;
     }
     mi += 8;
   }
@@ -350,7 +346,7 @@ void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
   // Also, force this frame as a golden update frame if this frame will change
   // the resolution (resize_pending != 0).
   if (cpi->resize_pending != 0 ||
-     (cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
+      (cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
     vp10_cyclic_refresh_set_golden_update(cpi);
     rc->frames_till_gf_update_due = rc->baseline_gf_interval;
 
@@ -360,8 +356,7 @@ void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
     force_gf_refresh = 1;
   }
 
-  fraction_low =
-      (double)low_content_frame / (rows * cols);
+  fraction_low = (double)low_content_frame / (rows * cols);
   // Update average.
   cr->low_content_avg = (fraction_low + 3 * cr->low_content_avg) / 4;
   if (!force_gf_refresh && cpi->refresh_golden_frame == 1) {
@@ -425,8 +420,7 @@ static void cyclic_refresh_update_map(VP10_COMP *const cpi) {
         // for possible boost/refresh (segment 1). The segment id may get
         // reset to 0 later if block gets coded anything other than ZEROMV.
         if (cr->map[bl_index2] == 0) {
-          if (cr->last_coded_q_map[bl_index2] > qindex_thresh)
-            sum_map++;
+          if (cr->last_coded_q_map[bl_index2] > qindex_thresh) sum_map++;
         } else if (cr->map[bl_index2] < 0) {
           cr->map[bl_index2]++;
         }
@@ -459,14 +453,12 @@ void vp10_cyclic_refresh_update_parameters(VP10_COMP *const cpi) {
   cr->time_for_refresh = 0;
   // Use larger delta-qp (increase rate_ratio_qdelta) for first few (~4)
   // periods of the refresh cycle, after a key frame.
-  if (rc->frames_since_key <  4 * cr->percent_refresh)
+  if (rc->frames_since_key < 4 * cr->percent_refresh)
     cr->rate_ratio_qdelta = 3.0;
   else
     cr->rate_ratio_qdelta = 2.0;
   // Adjust some parameters for low resolutions at low bitrates.
-  if (cm->width <= 352 &&
-      cm->height <= 288 &&
-      rc->avg_frame_bandwidth < 3400) {
+  if (cm->width <= 352 && cm->height <= 288 && rc->avg_frame_bandwidth < 3400) {
     cr->motion_thresh = 4;
     cr->rate_boost_fac = 10;
   } else {
@@ -481,9 +473,8 @@ void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
   const RATE_CONTROL *const rc = &cpi->rc;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   struct segmentation *const seg = &cm->seg;
-  const int apply_cyclic_refresh  = apply_cyclic_refresh_bitrate(cm, rc);
-  if (cm->current_video_frame == 0)
-    cr->low_content_avg = 0.0;
+  const int apply_cyclic_refresh = apply_cyclic_refresh_bitrate(cm, rc);
+  if (cm->current_video_frame == 0) cr->low_content_avg = 0.0;
   // Don't apply refresh on key frame or enhancement layer frames.
   if (!apply_cyclic_refresh || cm->frame_type == KEY_FRAME) {
     // Set segmentation map to 0 and disable.
@@ -517,7 +508,8 @@ void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
     seg->abs_delta = SEGMENT_DELTADATA;
 
     // Note: setting temporal_update has no effect, as the seg-map coding method
-    // (temporal or spatial) is determined in vp10_choose_segmap_coding_method(),
+    // (temporal or spatial) is determined in
+    // vp10_choose_segmap_coding_method(),
     // based on the coding cost of each method. For error_resilient mode on the
     // last_frame_seg_map is set to 0, so if temporal coding is used, it is
     // relative to 0 previous map.
diff --git a/vp10/encoder/aq_cyclicrefresh.h b/vp10/encoder/aq_cyclicrefresh.h
index f6714c5c8de42761a011dfd54ebc46e76b4111ab..649d714882a52260a92fe29a9886b70237264bc6 100644
--- a/vp10/encoder/aq_cyclicrefresh.h
+++ b/vp10/encoder/aq_cyclicrefresh.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_AQ_CYCLICREFRESH_H_
 #define VP10_ENCODER_AQ_CYCLICREFRESH_H_
 
@@ -20,9 +19,9 @@ extern "C" {
 
 // The segment ids used in cyclic refresh: from base (no boost) to increasing
 // boost (higher delta-qp).
-#define CR_SEGMENT_ID_BASE    0
-#define CR_SEGMENT_ID_BOOST1  1
-#define CR_SEGMENT_ID_BOOST2  2
+#define CR_SEGMENT_ID_BASE 0
+#define CR_SEGMENT_ID_BOOST1 1
+#define CR_SEGMENT_ID_BOOST2 2
 
 // Maximum rate target ratio for setting segment delta-qp.
 #define CR_MAX_RATE_TARGET_RATIO 4.0
@@ -39,20 +38,20 @@ void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr);
 // Estimate the bits, incorporating the delta-q from segment 1, after encoding
 // the frame.
 int vp10_cyclic_refresh_estimate_bits_at_q(const struct VP10_COMP *cpi,
-                                          double correction_factor);
+                                           double correction_factor);
 
 // Estimate the bits per mb, for a given q = i and a corresponding delta-q
 // (for segment 1), prior to encoding the frame.
 int vp10_cyclic_refresh_rc_bits_per_mb(const struct VP10_COMP *cpi, int i,
-                                      double correction_factor);
+                                       double correction_factor);
 
 // Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
 // check if we should reset the segment_id, and update the cyclic_refresh map
 // and segmentation map.
 void vp10_cyclic_refresh_update_segment(struct VP10_COMP *const cpi,
-                                       MB_MODE_INFO *const mbmi,
-                                       int mi_row, int mi_col, BLOCK_SIZE bsize,
-                                       int64_t rate, int64_t dist, int skip);
+                                        MB_MODE_INFO *const mbmi, int mi_row,
+                                        int mi_col, BLOCK_SIZE bsize,
+                                        int64_t rate, int64_t dist, int skip);
 
 // Update the segmentation map, and related quantities: cyclic refresh map,
 // refresh sb_index, and target number of blocks to be refreshed.
diff --git a/vp10/encoder/aq_variance.c b/vp10/encoder/aq_variance.c
index 15ab6b42b415746d64670aa64c00e33ed36ea72e..520640a321aba4e1ff136845c1571f39cbcecb82 100644
--- a/vp10/encoder/aq_variance.c
+++ b/vp10/encoder/aq_variance.c
@@ -22,19 +22,19 @@
 
 #define ENERGY_MIN (-4)
 #define ENERGY_MAX (1)
-#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN +  1)
-#define ENERGY_IN_BOUNDS(energy)\
+#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
+#define ENERGY_IN_BOUNDS(energy) \
   assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX)
 
-static const double rate_ratio[MAX_SEGMENTS] =
-  {2.5, 2.0, 1.5, 1.0, 0.75, 1.0, 1.0, 1.0};
-static const int segment_id[ENERGY_SPAN] = {0, 1, 1, 2, 3, 4};
+static const double rate_ratio[MAX_SEGMENTS] = { 2.5,  2.0, 1.5, 1.0,
+                                                 0.75, 1.0, 1.0, 1.0 };
+static const int segment_id[ENERGY_SPAN] = { 0, 1, 1, 2, 3, 4 };
 
-#define SEGMENT_ID(i) segment_id[(i) - ENERGY_MIN]
+#define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
 
-DECLARE_ALIGNED(16, static const uint8_t, vp10_64_zeros[64]) = {0};
+DECLARE_ALIGNED(16, static const uint8_t, vp10_64_zeros[64]) = { 0 };
 #if CONFIG_VPX_HIGHBITDEPTH
-DECLARE_ALIGNED(16, static const uint16_t, vp10_highbd_64_zeros[64]) = {0};
+DECLARE_ALIGNED(16, static const uint16_t, vp10_highbd_64_zeros[64]) = { 0 };
 #endif
 
 unsigned int vp10_vaq_segment_id(int energy) {
@@ -60,7 +60,7 @@ void vp10_vaq_frame_setup(VP10_COMP *cpi) {
     for (i = 0; i < MAX_SEGMENTS; ++i) {
       int qindex_delta =
           vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
-                                     rate_ratio[i], cm->bit_depth);
+                                      rate_ratio[i], cm->bit_depth);
 
       // We don't allow qindex 0 in a segment if the base value is not 0.
       // Q index 0 (lossless) implies 4x4 encoding only and in AQ mode a segment
@@ -84,9 +84,9 @@ void vp10_vaq_frame_setup(VP10_COMP *cpi) {
 /* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions
  * of variance() and highbd_8_variance(). It should not.
  */
-static void aq_variance(const uint8_t *a, int  a_stride,
-                        const uint8_t *b, int  b_stride,
-                        int  w, int  h, unsigned int *sse, int *sum) {
+static void aq_variance(const uint8_t *a, int a_stride, const uint8_t *b,
+                        int b_stride, int w, int h, unsigned int *sse,
+                        int *sum) {
   int i, j;
 
   *sum = 0;
@@ -105,9 +105,9 @@ static void aq_variance(const uint8_t *a, int  a_stride,
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static void aq_highbd_variance64(const uint8_t *a8, int  a_stride,
-                                 const uint8_t *b8, int  b_stride,
-                                 int w, int h, uint64_t *sse, uint64_t *sum) {
+static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
+                                 const uint8_t *b8, int b_stride, int w, int h,
+                                 uint64_t *sse, uint64_t *sum) {
   int i, j;
 
   uint16_t *a = CONVERT_TO_SHORTPTR(a8);
@@ -126,9 +126,9 @@ static void aq_highbd_variance64(const uint8_t *a8, int  a_stride,
   }
 }
 
-static void aq_highbd_8_variance(const uint8_t *a8, int  a_stride,
-                                 const uint8_t *b8, int  b_stride,
-                                 int w, int h, unsigned int *sse, int *sum) {
+static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
+                                 const uint8_t *b8, int b_stride, int w, int h,
+                                 unsigned int *sse, int *sum) {
   uint64_t sse_long = 0;
   uint64_t sum_long = 0;
   aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
@@ -141,10 +141,10 @@ static unsigned int block_variance(VP10_COMP *cpi, MACROBLOCK *x,
                                    BLOCK_SIZE bs) {
   MACROBLOCKD *xd = &x->e_mbd;
   unsigned int var, sse;
-  int right_overflow = (xd->mb_to_right_edge < 0) ?
-      ((-xd->mb_to_right_edge) >> 3) : 0;
-  int bottom_overflow = (xd->mb_to_bottom_edge < 0) ?
-      ((-xd->mb_to_bottom_edge) >> 3) : 0;
+  int right_overflow =
+      (xd->mb_to_right_edge < 0) ? ((-xd->mb_to_right_edge) >> 3) : 0;
+  int bottom_overflow =
+      (xd->mb_to_bottom_edge < 0) ? ((-xd->mb_to_bottom_edge) >> 3) : 0;
 
   if (right_overflow || bottom_overflow) {
     const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
@@ -158,30 +158,27 @@ static unsigned int block_variance(VP10_COMP *cpi, MACROBLOCK *x,
       sse >>= 2 * (xd->bd - 8);
       avg >>= (xd->bd - 8);
     } else {
-      aq_variance(x->plane[0].src.buf, x->plane[0].src.stride,
-                  vp10_64_zeros, 0, bw, bh, &sse, &avg);
+      aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_64_zeros, 0,
+                  bw, bh, &sse, &avg);
     }
 #else
-    aq_variance(x->plane[0].src.buf, x->plane[0].src.stride,
-                vp10_64_zeros, 0, bw, bh, &sse, &avg);
+    aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_64_zeros, 0,
+                bw, bh, &sse, &avg);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
     var = sse - (((int64_t)avg * avg) / (bw * bh));
     return (256 * var) / (bw * bh);
   } else {
 #if CONFIG_VPX_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-      var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
-                               x->plane[0].src.stride,
-                               CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros),
-                               0, &sse);
+      var =
+          cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+                             CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros), 0, &sse);
     } else {
-      var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
-                               x->plane[0].src.stride,
+      var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
                                vp10_64_zeros, 0, &sse);
     }
 #else
-    var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
-                             x->plane[0].src.stride,
+    var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
                              vp10_64_zeros, 0, &sse);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
     return (256 * var) >> num_pels_log2_lookup[bs];
@@ -200,7 +197,7 @@ int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
   double energy_midpoint;
   vpx_clear_system_state();
   energy_midpoint =
-    (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
+      (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
   energy = vp10_log_block_var(cpi, x, bs) - energy_midpoint;
   return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
 }
diff --git a/vp10/encoder/aq_variance.h b/vp10/encoder/aq_variance.h
index 318f5f27f124090fff29df1e74e7f2bf7fd4eaeb..cfb5b86a2e67349d89d70c7635ade5e43e679065 100644
--- a/vp10/encoder/aq_variance.h
+++ b/vp10/encoder/aq_variance.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_AQ_VARIANCE_H_
 #define VP10_ENCODER_AQ_VARIANCE_H_
 
diff --git a/vp10/encoder/arm/neon/dct_neon.c b/vp10/encoder/arm/neon/dct_neon.c
index b37a2ff3a9fc7d1451c495cbeba21da363de7ae4..e83853fa52dac3299747b46f17682217ea70af41 100644
--- a/vp10/encoder/arm/neon/dct_neon.c
+++ b/vp10/encoder/arm/neon/dct_neon.c
@@ -17,20 +17,17 @@
 #include "vp10/common/blockd.h"
 #include "vpx_dsp/txfm_common.h"
 
-void vp10_fdct8x8_quant_neon(const int16_t *input, int stride,
-                            int16_t* coeff_ptr, intptr_t n_coeffs,
-                            int skip_block, const int16_t* zbin_ptr,
-                            const int16_t* round_ptr, const int16_t* quant_ptr,
-                            const int16_t* quant_shift_ptr,
-                            int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr,
-                            const int16_t* dequant_ptr, uint16_t* eob_ptr,
-                            const int16_t* scan_ptr,
-                            const int16_t* iscan_ptr) {
+void vp10_fdct8x8_quant_neon(
+    const int16_t* input, int stride, int16_t* coeff_ptr, intptr_t n_coeffs,
+    int skip_block, const int16_t* zbin_ptr, const int16_t* round_ptr,
+    const int16_t* quant_ptr, const int16_t* quant_shift_ptr,
+    int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
+    uint16_t* eob_ptr, const int16_t* scan_ptr, const int16_t* iscan_ptr) {
   int16_t temp_buffer[64];
   (void)coeff_ptr;
 
   vpx_fdct8x8_neon(input, temp_buffer, stride);
   vp10_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
-                       quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
-                       dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
+                        quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
+                        dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
 }
diff --git a/vp10/encoder/arm/neon/error_neon.c b/vp10/encoder/arm/neon/error_neon.c
index 009520aeed8ea070263c38f337d77121f39c60a6..34805d316a814a760dfe23deee1796ad41e6b945 100644
--- a/vp10/encoder/arm/neon/error_neon.c
+++ b/vp10/encoder/arm/neon/error_neon.c
@@ -14,7 +14,7 @@
 #include "./vp10_rtcd.h"
 
 int64_t vp10_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
-                                int block_size) {
+                                 int block_size) {
   int64x2_t error = vdupq_n_s64(0);
 
   assert(block_size >= 8);
diff --git a/vp10/encoder/arm/neon/quantize_neon.c b/vp10/encoder/arm/neon/quantize_neon.c
index 9354ced69937264e89e3d6872d0d4e7397418986..7b8e447c02a1f4f072bf4afcbadfb979a1d73660 100644
--- a/vp10/encoder/arm/neon/quantize_neon.c
+++ b/vp10/encoder/arm/neon/quantize_neon.c
@@ -22,12 +22,12 @@
 #include "vp10/encoder/rd.h"
 
 void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
-                          int skip_block, const int16_t *zbin_ptr,
-                          const int16_t *round_ptr, const int16_t *quant_ptr,
-                          const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
-                          int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
-                          uint16_t *eob_ptr,
-                          const int16_t *scan, const int16_t *iscan) {
+                           int skip_block, const int16_t *zbin_ptr,
+                           const int16_t *round_ptr, const int16_t *quant_ptr,
+                           const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+                           int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                           uint16_t *eob_ptr, const int16_t *scan,
+                           const int16_t *iscan) {
   // TODO(jingning) Decide the need of these arguments after the
   // quantization process is completed.
   (void)zbin_ptr;
@@ -54,12 +54,12 @@ void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
       const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[0]);
       const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
       const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
-      const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
-                                           vget_low_s16(v_quant));
-      const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
-                                           vget_high_s16(v_quant));
-      const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
-                                            vshrn_n_s32(v_tmp_hi, 16));
+      const int32x4_t v_tmp_lo =
+          vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
+      const int32x4_t v_tmp_hi =
+          vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
+      const int16x8_t v_tmp2 =
+          vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
       const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
       const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
       const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
@@ -79,12 +79,12 @@ void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
       const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[i]);
       const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
       const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
-      const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
-                                           vget_low_s16(v_quant));
-      const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
-                                           vget_high_s16(v_quant));
-      const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
-                                            vshrn_n_s32(v_tmp_hi, 16));
+      const int32x4_t v_tmp_lo =
+          vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
+      const int32x4_t v_tmp_hi =
+          vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
+      const int16x8_t v_tmp2 =
+          vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
       const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
       const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
       const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
@@ -96,9 +96,8 @@ void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
       vst1q_s16(&dqcoeff_ptr[i], v_dqcoeff);
     }
     {
-      const int16x4_t v_eobmax_3210 =
-          vmax_s16(vget_low_s16(v_eobmax_76543210),
-                   vget_high_s16(v_eobmax_76543210));
+      const int16x4_t v_eobmax_3210 = vmax_s16(
+          vget_low_s16(v_eobmax_76543210), vget_high_s16(v_eobmax_76543210));
       const int64x1_t v_eobmax_xx32 =
           vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
       const int16x4_t v_eobmax_tmp =
diff --git a/vp10/encoder/bitstream.c b/vp10/encoder/bitstream.c
index 0b2645849a221d37a94d6f29141cb4ec0380d700..0fd3d5a4ede91be3c15e62479dc7ffebee64ae94 100644
--- a/vp10/encoder/bitstream.c
+++ b/vp10/encoder/bitstream.c
@@ -36,20 +36,22 @@
 #include "vp10/encoder/tokenize.h"
 
 static const struct vp10_token intra_mode_encodings[INTRA_MODES] = {
-  {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7},
-  {62, 6}, {2, 2}};
+  { 0, 1 },  { 6, 3 },   { 28, 5 },  { 30, 5 }, { 58, 6 },
+  { 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
+};
 static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
-  {{0, 1}, {2, 2}, {3, 2}};
-static const struct vp10_token partition_encodings[PARTITION_TYPES] =
-  {{0, 1}, {2, 2}, {6, 3}, {7, 3}};
-static const struct vp10_token inter_mode_encodings[INTER_MODES] =
-  {{2, 2}, {6, 3}, {0, 1}, {7, 3}};
+    { { 0, 1 }, { 2, 2 }, { 3, 2 } };
+static const struct vp10_token partition_encodings[PARTITION_TYPES] = {
+  { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
+};
+static const struct vp10_token inter_mode_encodings[INTER_MODES] = {
+  { 2, 2 }, { 6, 3 }, { 0, 1 }, { 7, 3 }
+};
 
 static INLINE void write_uniform(vpx_writer *w, int n, int v) {
   int l = get_unsigned_bits(n);
   int m = (1 << l) - n;
-  if (l == 0)
-    return;
+  if (l == 0) return;
   if (v < m) {
     vpx_write_literal(w, v, l - 1);
   } else {
@@ -73,18 +75,18 @@ static void write_inter_mode(vpx_writer *w, PREDICTION_MODE mode,
                              const vpx_prob *probs) {
   assert(is_inter_mode(mode));
   vp10_write_token(w, vp10_inter_mode_tree, probs,
-                  &inter_mode_encodings[INTER_OFFSET(mode)]);
+                   &inter_mode_encodings[INTER_OFFSET(mode)]);
 }
 
-static void encode_unsigned_max(struct vpx_write_bit_buffer *wb,
-                                int data, int max) {
+static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, int data,
+                                int max) {
   vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
 }
 
 static void prob_diff_update(const vpx_tree_index *tree,
                              vpx_prob probs[/*n - 1*/],
-                             const unsigned int counts[/*n - 1*/],
-                             int n, vpx_writer *w) {
+                             const unsigned int counts[/*n - 1*/], int n,
+                             vpx_writer *w) {
   int i;
   unsigned int branch_ct[32][2];
 
@@ -108,19 +110,18 @@ static int prob_diff_update_savings(const vpx_tree_index *tree,
   assert(n <= 32);
   vp10_tree_probs_from_distribution(tree, branch_ct, counts);
   for (i = 0; i < n - 1; ++i) {
-    savings += vp10_cond_prob_diff_update_savings(&probs[i],
-                                                  branch_ct[i]);
+    savings += vp10_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
   }
   return savings;
 }
 
-static void write_selected_tx_size(const VP10_COMMON *cm,
-                                   const MACROBLOCKD *xd, vpx_writer *w) {
+static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+                                   vpx_writer *w) {
   TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
-  const vpx_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
-                                                 &cm->fc->tx_probs);
+  const vpx_prob *const tx_probs =
+      get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
   vpx_write(w, tx_size != TX_4X4, tx_probs[0]);
   if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
     vpx_write(w, tx_size != TX_8X8, tx_probs[1]);
@@ -175,36 +176,32 @@ static void update_ext_tx_probs(VP10_COMMON *cm, vpx_writer *w) {
   if (do_update) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
       for (j = 0; j < TX_TYPES; ++j)
-        prob_diff_update(vp10_ext_tx_tree,
-                         cm->fc->intra_ext_tx_prob[i][j],
-                         cm->counts.intra_ext_tx[i][j],
-                         TX_TYPES, w);
+        prob_diff_update(vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+                         cm->counts.intra_ext_tx[i][j], TX_TYPES, w);
     }
   }
   savings = 0;
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-    savings += prob_diff_update_savings(
-        vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
-        cm->counts.inter_ext_tx[i], TX_TYPES);
+    savings +=
+        prob_diff_update_savings(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+                                 cm->counts.inter_ext_tx[i], TX_TYPES);
   }
   do_update = savings > savings_thresh;
   vpx_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
   if (do_update) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-      prob_diff_update(vp10_ext_tx_tree,
-                       cm->fc->inter_ext_tx_prob[i],
-                       cm->counts.inter_ext_tx[i],
-                       TX_TYPES, w);
+      prob_diff_update(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+                       cm->counts.inter_ext_tx[i], TX_TYPES, w);
     }
   }
 }
 
-static void pack_mb_tokens(vpx_writer *w,
-                           TOKENEXTRA **tp, const TOKENEXTRA *const stop,
+static void pack_mb_tokens(vpx_writer *w, TOKENEXTRA **tp,
+                           const TOKENEXTRA *const stop,
                            vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
   TOKENEXTRA *p = *tp;
 #if !CONFIG_MISC_FIXES
-  (void) tx;
+  (void)tx;
 #endif
 
   while (p < stop && p->token != EOSB_TOKEN) {
@@ -223,7 +220,7 @@ static void pack_mb_tokens(vpx_writer *w,
       b = &vp10_extra_bits[t];
 #else
     const vp10_extra_bit *const b = &vp10_extra_bits[t];
-    (void) bit_depth;
+    (void)bit_depth;
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
     /* skip one or two nodes */
@@ -245,8 +242,8 @@ static void pack_mb_tokens(vpx_writer *w,
       int bits = v >> (n - len);
       vp10_write_tree(w, vp10_coef_tree, p->context_tree, bits, len, i);
       vp10_write_tree(w, vp10_coef_con_tree,
-                     vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1],
-                     v, n - len, 0);
+                      vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
+                      n - len, 0);
     } else {
       vp10_write_tree(w, vp10_coef_tree, p->context_tree, v, n, i);
     }
@@ -254,8 +251,7 @@ static void pack_mb_tokens(vpx_writer *w,
     if (b->base_val) {
       const int e = p->extra, l = b->len;
 #if CONFIG_MISC_FIXES
-      int skip_bits =
-          (b->base_val == CAT6_MIN_VAL) ? TX_SIZES - 1 - tx : 0;
+      int skip_bits = (b->base_val == CAT6_MIN_VAL) ? TX_SIZES - 1 - tx : 0;
 #else
       int skip_bits = 0;
 #endif
@@ -263,7 +259,7 @@ static void pack_mb_tokens(vpx_writer *w,
       if (l) {
         const unsigned char *pb = b->prob;
         int v = e >> 1;
-        int n = l;              /* number of bits in v, assumed nonzero */
+        int n = l; /* number of bits in v, assumed nonzero */
         int i = 0;
 
         do {
@@ -305,7 +301,7 @@ static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd,
   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
     assert(!is_compound);
     assert(mbmi->ref_frame[0] ==
-               get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
+           get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
   } else {
     // does the feature use compound prediction or not
     // (if not specified at the frame/segment level)
@@ -356,8 +352,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
       const int pred_flag = mbmi->seg_id_predicted;
       vpx_prob pred_prob = vp10_get_pred_prob_seg_id(segp, xd);
       vpx_write(w, pred_flag, pred_prob);
-      if (!pred_flag)
-        write_segment_id(w, seg, segp, segment_id);
+      if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
     } else {
       write_segment_id(w, seg, segp, segment_id);
     }
@@ -403,8 +398,8 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
     if (cm->interp_filter == SWITCHABLE) {
       const int ctx = vp10_get_pred_context_switchable_interp(xd);
       vp10_write_token(w, vp10_switchable_interp_tree,
-                      cm->fc->switchable_interp_prob[ctx],
-                      &switchable_interp_encodings[mbmi->interp_filter]);
+                       cm->fc->switchable_interp_prob[ctx],
+                       &switchable_interp_encodings[mbmi->interp_filter]);
       ++cpi->interp_filter_selected[0][mbmi->interp_filter];
     } else {
       assert(mbmi->interp_filter == cm->interp_filter);
@@ -422,8 +417,8 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
           if (b_mode == NEWMV) {
             for (ref = 0; ref < 1 + is_compound; ++ref)
               vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
-                            &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
-                            nmvc, allow_hp);
+                             &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+                             nmvc, allow_hp);
           }
         }
       }
@@ -431,19 +426,17 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
       if (mode == NEWMV) {
         for (ref = 0; ref < 1 + is_compound; ++ref)
           vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
-                        &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
-                        allow_hp);
+                         &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+                         nmvc, allow_hp);
       }
     }
   }
-  if (mbmi->tx_size < TX_32X32 &&
-      cm->base_qindex > 0 && !mbmi->skip &&
+  if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
     if (is_inter) {
-      vp10_write_token(
-          w, vp10_ext_tx_tree,
-          cm->fc->inter_ext_tx_prob[mbmi->tx_size],
-          &ext_tx_encodings[mbmi->tx_type]);
+      vp10_write_token(w, vp10_ext_tx_tree,
+                       cm->fc->inter_ext_tx_prob[mbmi->tx_size],
+                       &ext_tx_encodings[mbmi->tx_type]);
     } else {
       vp10_write_token(
           w, vp10_ext_tx_tree,
@@ -452,8 +445,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
           &ext_tx_encodings[mbmi->tx_type]);
     }
   } else {
-    if (!mbmi->skip)
-      assert(mbmi->tx_type == DCT_DCT);
+    if (!mbmi->skip) assert(mbmi->tx_type == DCT_DCT);
   }
 }
 
@@ -471,8 +463,7 @@ static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
   const MB_MODE_INFO *const mbmi = &mi->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
 
-  if (seg->update_map)
-    write_segment_id(w, seg, segp, mbmi->segment_id);
+  if (seg->update_map) write_segment_id(w, seg, segp, mbmi->segment_id);
 
   write_skip(cm, xd, mbmi->segment_id, mi, w);
 
@@ -499,8 +490,7 @@ static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
 
   write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mbmi->mode]);
 
-  if (mbmi->tx_size < TX_32X32 &&
-      cm->base_qindex > 0 && !mbmi->skip &&
+  if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
     vp10_write_token(
         w, vp10_ext_tx_tree,
@@ -512,8 +502,8 @@ static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
 
 static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
                           vpx_writer *w, TOKENEXTRA **tok,
-                          const TOKENEXTRA *const tok_end,
-                          int mi_row, int mi_col) {
+                          const TOKENEXTRA *const tok_end, int mi_row,
+                          int mi_col) {
   const VP10_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
   MODE_INFO *m;
@@ -524,8 +514,7 @@ static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
 
   cpi->td.mb.mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
 
-  set_mi_row_col(xd, tile,
-                 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
+  set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
                  mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
                  cm->mi_rows, cm->mi_cols);
   if (frame_is_intra_only(cm)) {
@@ -537,8 +526,8 @@ static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
   if (!m->mbmi.skip) {
     assert(*tok < tok_end);
     for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
-      TX_SIZE tx = plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane])
-                         : m->mbmi.tx_size;
+      TX_SIZE tx =
+          plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane]) : m->mbmi.tx_size;
       pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx);
       assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
       (*tok)++;
@@ -547,9 +536,9 @@ static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
 }
 
 static void write_partition(const VP10_COMMON *const cm,
-                            const MACROBLOCKD *const xd,
-                            int hbs, int mi_row, int mi_col,
-                            PARTITION_TYPE p, BLOCK_SIZE bsize, vpx_writer *w) {
+                            const MACROBLOCKD *const xd, int hbs, int mi_row,
+                            int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
+                            vpx_writer *w) {
   const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
   const vpx_prob *const probs = cm->fc->partition_prob[ctx];
   const int has_rows = (mi_row + hbs) < cm->mi_rows;
@@ -568,10 +557,10 @@ static void write_partition(const VP10_COMMON *const cm,
   }
 }
 
-static void write_modes_sb(VP10_COMP *cpi,
-                           const TileInfo *const tile, vpx_writer *w,
-                           TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
-                           int mi_row, int mi_col, BLOCK_SIZE bsize) {
+static void write_modes_sb(VP10_COMP *cpi, const TileInfo *const tile,
+                           vpx_writer *w, TOKENEXTRA **tok,
+                           const TOKENEXTRA *const tok_end, int mi_row,
+                           int mi_col, BLOCK_SIZE bsize) {
   const VP10_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
@@ -581,8 +570,7 @@ static void write_modes_sb(VP10_COMP *cpi,
   BLOCK_SIZE subsize;
   const MODE_INFO *m = NULL;
 
-  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
-    return;
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
 
   m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
 
@@ -615,8 +603,7 @@ static void write_modes_sb(VP10_COMP *cpi,
         write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
                        subsize);
         break;
-      default:
-        assert(0);
+      default: assert(0);
     }
   }
 
@@ -626,9 +613,9 @@ static void write_modes_sb(VP10_COMP *cpi,
     update_partition_context(xd, mi_row, mi_col, subsize, bsize);
 }
 
-static void write_modes(VP10_COMP *cpi,
-                        const TileInfo *const tile, vpx_writer *w,
-                        TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) {
+static void write_modes(VP10_COMP *cpi, const TileInfo *const tile,
+                        vpx_writer *w, TOKENEXTRA **tok,
+                        const TOKENEXTRA *const tok_end) {
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
   int mi_row, mi_col;
 
@@ -637,8 +624,7 @@ static void write_modes(VP10_COMP *cpi,
     vp10_zero(xd->left_seg_context);
     for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
          mi_col += MI_BLOCK_SIZE)
-      write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
-                     BLOCK_64X64);
+      write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
   }
 }
 
@@ -646,7 +632,7 @@ static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
                                     vp10_coeff_stats *coef_branch_ct,
                                     vp10_coeff_probs_model *coef_probs) {
   vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
-  unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
+  unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
       cpi->common.counts.eob_branch[tx_size];
   int i, j, k, l, m;
 
@@ -655,21 +641,21 @@ static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
       for (k = 0; k < COEF_BANDS; ++k) {
         for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
           vp10_tree_probs_from_distribution(vp10_coef_tree,
-                                           coef_branch_ct[i][j][k][l],
-                                           coef_counts[i][j][k][l]);
-          coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
-                                             coef_branch_ct[i][j][k][l][0][0];
+                                            coef_branch_ct[i][j][k][l],
+                                            coef_counts[i][j][k][l]);
+          coef_branch_ct[i][j][k][l][0][1] =
+              eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0];
           for (m = 0; m < UNCONSTRAINED_NODES; ++m)
-            coef_probs[i][j][k][l][m] = get_binary_prob(
-                                            coef_branch_ct[i][j][k][l][m][0],
-                                            coef_branch_ct[i][j][k][l][m][1]);
+            coef_probs[i][j][k][l][m] =
+                get_binary_prob(coef_branch_ct[i][j][k][l][m][0],
+                                coef_branch_ct[i][j][k][l][m][1]);
         }
       }
     }
   }
 }
 
-static void update_coef_probs_common(vpx_writer* const bc, VP10_COMP *cpi,
+static void update_coef_probs_common(vpx_writer *const bc, VP10_COMP *cpi,
                                      TX_SIZE tx_size,
                                      vp10_coeff_stats *frame_branch_ct,
                                      vp10_coeff_probs_model *new_coef_probs) {
@@ -683,7 +669,7 @@ static void update_coef_probs_common(vpx_writer* const bc, VP10_COMP *cpi,
     case TWO_LOOP: {
       /* dry run to see if there is any update at all needed */
       int savings = 0;
-      int update[2] = {0, 0};
+      int update[2] = { 0, 0 };
       for (i = 0; i < PLANE_TYPES; ++i) {
         for (j = 0; j < REF_TYPES; ++j) {
           for (k = 0; k < COEF_BANDS; ++k) {
@@ -700,8 +686,7 @@ static void update_coef_probs_common(vpx_writer* const bc, VP10_COMP *cpi,
                 else
                   s = vp10_prob_diff_update_savings_search(
                       frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
-                if (s > 0 && newp != oldp)
-                  u = 1;
+                if (s > 0 && newp != oldp) u = 1;
                 if (u)
                   savings += s - (int)(vp10_cost_zero(upd));
                 else
@@ -737,10 +722,8 @@ static void update_coef_probs_common(vpx_writer* const bc, VP10_COMP *cpi,
                       old_coef_probs[i][j][k][l], &newp, upd, stepsize);
                 else
                   s = vp10_prob_diff_update_savings_search(
-                      frame_branch_ct[i][j][k][l][t],
-                      *oldp, &newp, upd);
-                if (s > 0 && newp != *oldp)
-                  u = 1;
+                      frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
+                if (s > 0 && newp != *oldp) u = 1;
                 vpx_write(bc, u, upd);
                 if (u) {
                   /* send/use new probability */
@@ -775,12 +758,10 @@ static void update_coef_probs_common(vpx_writer* const bc, VP10_COMP *cpi,
                       old_coef_probs[i][j][k][l], &newp, upd, stepsize);
                 } else {
                   s = vp10_prob_diff_update_savings_search(
-                      frame_branch_ct[i][j][k][l][t],
-                      *oldp, &newp, upd);
+                      frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
                 }
 
-                if (s > 0 && newp != *oldp)
-                  u = 1;
+                if (s > 0 && newp != *oldp) u = 1;
                 updates += u;
                 if (u == 0 && updates == 0) {
                   noupdates_before_first++;
@@ -809,12 +790,11 @@ static void update_coef_probs_common(vpx_writer* const bc, VP10_COMP *cpi,
       }
       return;
     }
-    default:
-      assert(0);
+    default: assert(0);
   }
 }
 
-static void update_coef_probs(VP10_COMP *cpi, vpx_writer* w) {
+static void update_coef_probs(VP10_COMP *cpi, vpx_writer *w) {
   const TX_MODE tx_mode = cpi->common.tx_mode;
   const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
   TX_SIZE tx_size;
@@ -825,8 +805,7 @@ static void update_coef_probs(VP10_COMP *cpi, vpx_writer* w) {
         (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
       vpx_write_bit(w, 0);
     } else {
-      build_tree_distribution(cpi, tx_size, frame_branch_ct,
-                              frame_coef_probs);
+      build_tree_distribution(cpi, tx_size, frame_branch_ct, frame_coef_probs);
       update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
                                frame_coef_probs);
     }
@@ -898,8 +877,7 @@ static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
 #endif
 
   vpx_wb_write_bit(wb, seg->enabled);
-  if (!seg->enabled)
-    return;
+  if (!seg->enabled) return;
 
   // Segmentation map
   if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
@@ -916,8 +894,7 @@ static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
       const int prob = segp->tree_probs[i];
       const int update = prob != MAX_PROB;
       vpx_wb_write_bit(wb, update);
-      if (update)
-        vpx_wb_write_literal(wb, prob, 8);
+      if (update) vpx_wb_write_literal(wb, prob, 8);
     }
 #endif
 
@@ -934,8 +911,7 @@ static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
         const int prob = segp->pred_probs[i];
         const int update = prob != MAX_PROB;
         vpx_wb_write_bit(wb, update);
-        if (update)
-          vpx_wb_write_literal(wb, prob, 8);
+        if (update) vpx_wb_write_literal(wb, prob, 8);
       }
     }
 #endif
@@ -970,48 +946,42 @@ static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
 static void update_seg_probs(VP10_COMP *cpi, vpx_writer *w) {
   VP10_COMMON *cm = &cpi->common;
 
-  if (!cpi->common.seg.enabled)
-    return;
+  if (!cpi->common.seg.enabled) return;
 
   if (cpi->common.seg.temporal_update) {
     int i;
 
     for (i = 0; i < PREDICTION_PROBS; i++)
       vp10_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
-          cm->counts.seg.pred[i]);
+                                 cm->counts.seg.pred[i]);
 
     prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
-        cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
+                     cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
   } else {
     prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
-        cm->counts.seg.tree_total, MAX_SEGMENTS, w);
+                     cm->counts.seg.tree_total, MAX_SEGMENTS, w);
   }
 }
 
 static void write_txfm_mode(TX_MODE mode, struct vpx_write_bit_buffer *wb) {
   vpx_wb_write_bit(wb, mode == TX_MODE_SELECT);
-  if (mode != TX_MODE_SELECT)
-    vpx_wb_write_literal(wb, mode, 2);
+  if (mode != TX_MODE_SELECT) vpx_wb_write_literal(wb, mode, 2);
 }
 #else
 static void write_txfm_mode(TX_MODE mode, struct vpx_writer *wb) {
   vpx_write_literal(wb, VPXMIN(mode, ALLOW_32X32), 2);
-  if (mode >= ALLOW_32X32)
-    vpx_write_bit(wb, mode == TX_MODE_SELECT);
+  if (mode >= ALLOW_32X32) vpx_write_bit(wb, mode == TX_MODE_SELECT);
 }
 #endif
 
-
 static void update_txfm_probs(VP10_COMMON *cm, vpx_writer *w,
                               FRAME_COUNTS *counts) {
-
   if (cm->tx_mode == TX_MODE_SELECT) {
     int i, j;
     unsigned int ct_8x8p[TX_SIZES - 3][2];
     unsigned int ct_16x16p[TX_SIZES - 2][2];
     unsigned int ct_32x32p[TX_SIZES - 1][2];
 
-
     for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
       vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
       for (j = 0; j < TX_SIZES - 3; j++)
@@ -1022,14 +992,14 @@ static void update_txfm_probs(VP10_COMMON *cm, vpx_writer *w,
       vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
       for (j = 0; j < TX_SIZES - 2; j++)
         vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
-                                  ct_16x16p[j]);
+                                   ct_16x16p[j]);
     }
 
     for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
       vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
       for (j = 0; j < TX_SIZES - 1; j++)
         vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
-                                  ct_32x32p[j]);
+                                   ct_32x32p[j]);
     }
   }
 }
@@ -1037,8 +1007,7 @@ static void update_txfm_probs(VP10_COMMON *cm, vpx_writer *w,
 static void write_interp_filter(INTERP_FILTER filter,
                                 struct vpx_write_bit_buffer *wb) {
   vpx_wb_write_bit(wb, filter == SWITCHABLE);
-  if (filter != SWITCHABLE)
-    vpx_wb_write_literal(wb, filter, 2);
+  if (filter != SWITCHABLE) vpx_wb_write_literal(wb, filter, 2);
 }
 
 static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
@@ -1071,16 +1040,13 @@ static void write_tile_info(const VP10_COMMON *const cm,
 
   // columns
   ones = cm->log2_tile_cols - min_log2_tile_cols;
-  while (ones--)
-    vpx_wb_write_bit(wb, 1);
+  while (ones--) vpx_wb_write_bit(wb, 1);
 
-  if (cm->log2_tile_cols < max_log2_tile_cols)
-    vpx_wb_write_bit(wb, 0);
+  if (cm->log2_tile_cols < max_log2_tile_cols) vpx_wb_write_bit(wb, 0);
 
   // rows
   vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
-  if (cm->log2_tile_rows != 0)
-    vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
+  if (cm->log2_tile_rows != 0) vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
 }
 
 static int get_refresh_mask(VP10_COMP *cpi) {
@@ -1129,15 +1095,15 @@ static size_t encode_tiles(VP10_COMP *cpi, uint8_t *data_ptr,
       TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
 
       tok_end = cpi->tile_tok[tile_row][tile_col] +
-          cpi->tok_count[tile_row][tile_col];
+                cpi->tok_count[tile_row][tile_col];
 
       if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
         vpx_start_encode(&residual_bc, data_ptr + total_size + 4);
       else
         vpx_start_encode(&residual_bc, data_ptr + total_size);
 
-      write_modes(cpi, &cpi->tile_data[tile_idx].tile_info,
-                  &residual_bc, &tok, tok_end);
+      write_modes(cpi, &cpi->tile_data[tile_idx].tile_info, &residual_bc, &tok,
+                  tok_end);
       assert(tok == tok_end);
       vpx_stop_encode(&residual_bc);
       if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
@@ -1161,8 +1127,8 @@ static size_t encode_tiles(VP10_COMP *cpi, uint8_t *data_ptr,
 
 static void write_render_size(const VP10_COMMON *cm,
                               struct vpx_write_bit_buffer *wb) {
-  const int scaling_active = cm->width != cm->render_width ||
-                             cm->height != cm->render_height;
+  const int scaling_active =
+      cm->width != cm->render_width || cm->height != cm->render_height;
   vpx_wb_write_bit(wb, scaling_active);
   if (scaling_active) {
     vpx_wb_write_literal(wb, cm->render_width - 1, 16);
@@ -1188,8 +1154,8 @@ static void write_frame_size_with_refs(VP10_COMP *cpi,
     YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
 
     if (cfg != NULL) {
-      found = cm->width == cfg->y_crop_width &&
-              cm->height == cfg->y_crop_height;
+      found =
+          cm->width == cfg->y_crop_width && cm->height == cfg->y_crop_height;
 #if CONFIG_MISC_FIXES
       found &= cm->render_width == cfg->render_width &&
                cm->render_height == cfg->render_height;
@@ -1224,20 +1190,11 @@ static void write_sync_code(struct vpx_write_bit_buffer *wb) {
 static void write_profile(BITSTREAM_PROFILE profile,
                           struct vpx_write_bit_buffer *wb) {
   switch (profile) {
-    case PROFILE_0:
-      vpx_wb_write_literal(wb, 0, 2);
-      break;
-    case PROFILE_1:
-      vpx_wb_write_literal(wb, 2, 2);
-      break;
-    case PROFILE_2:
-      vpx_wb_write_literal(wb, 1, 2);
-      break;
-    case PROFILE_3:
-      vpx_wb_write_literal(wb, 6, 3);
-      break;
-    default:
-      assert(0);
+    case PROFILE_0: vpx_wb_write_literal(wb, 0, 2); break;
+    case PROFILE_1: vpx_wb_write_literal(wb, 2, 2); break;
+    case PROFILE_2: vpx_wb_write_literal(wb, 1, 2); break;
+    case PROFILE_3: vpx_wb_write_literal(wb, 6, 3); break;
+    default: assert(0);
   }
 }
 
@@ -1284,8 +1241,7 @@ static void write_uncompressed_header(VP10_COMP *cpi,
     write_bitdepth_colorspace_sampling(cm, wb);
     write_frame_size(cm, wb);
   } else {
-    if (!cm->show_frame)
-      vpx_wb_write_bit(wb, cm->intra_only);
+    if (!cm->show_frame) vpx_wb_write_bit(wb, cm->intra_only);
 
     if (!cm->error_resilient_mode) {
 #if CONFIG_MISC_FIXES
@@ -1302,8 +1258,8 @@ static void write_uncompressed_header(VP10_COMP *cpi,
 #else
       static const int reset_frame_context_conv_tbl[3] = { 0, 2, 3 };
 
-      vpx_wb_write_literal(wb,
-          reset_frame_context_conv_tbl[cm->reset_frame_context], 2);
+      vpx_wb_write_literal(
+          wb, reset_frame_context_conv_tbl[cm->reset_frame_context], 2);
 #endif
     }
 
@@ -1346,8 +1302,8 @@ static void write_uncompressed_header(VP10_COMP *cpi,
 #if CONFIG_MISC_FIXES
     if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF)
 #endif
-      vpx_wb_write_bit(wb, cm->refresh_frame_context !=
-                               REFRESH_FRAME_CONTEXT_BACKWARD);
+      vpx_wb_write_bit(
+          wb, cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD);
   }
 
   vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
@@ -1365,8 +1321,7 @@ static void write_uncompressed_header(VP10_COMP *cpi,
     const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
 
     vpx_wb_write_bit(wb, use_hybrid_pred);
-    if (!use_hybrid_pred)
-      vpx_wb_write_bit(wb, use_compound_pred);
+    if (!use_hybrid_pred) vpx_wb_write_bit(wb, use_compound_pred);
   }
 #endif
 
@@ -1427,7 +1382,7 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
 
     for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
       vp10_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
-                                counts->intra_inter[i]);
+                                 counts->intra_inter[i]);
 
     if (cpi->allow_comp_inter_inter) {
       const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
@@ -1440,7 +1395,7 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
         if (use_hybrid_pred)
           for (i = 0; i < COMP_INTER_CONTEXTS; i++)
             vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
-                                      counts->comp_inter[i]);
+                                       counts->comp_inter[i]);
       }
 #else
       if (use_hybrid_pred)
@@ -1453,16 +1408,16 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
     if (cm->reference_mode != COMPOUND_REFERENCE) {
       for (i = 0; i < REF_CONTEXTS; i++) {
         vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
-                                  counts->single_ref[i][0]);
+                                   counts->single_ref[i][0]);
         vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
-                                  counts->single_ref[i][1]);
+                                   counts->single_ref[i][1]);
       }
     }
 
     if (cm->reference_mode != SINGLE_REFERENCE)
       for (i = 0; i < REF_CONTEXTS; i++)
         vp10_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
-                                  counts->comp_ref[i]);
+                                   counts->comp_ref[i]);
 
     for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
       prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i],
@@ -1475,7 +1430,7 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
 #endif
 
     vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
-                        &counts->mv);
+                         &counts->mv);
     update_ext_tx_probs(cm, &header_bc);
   }
 
@@ -1486,8 +1441,8 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
 }
 
 #if CONFIG_MISC_FIXES
-static int remux_tiles(uint8_t *dest, const int sz,
-                       const int n_tiles, const int mag) {
+static int remux_tiles(uint8_t *dest, const int sz, const int n_tiles,
+                       const int mag) {
   int rpos = 0, wpos = 0, n;
 
   for (n = 0; n < n_tiles; n++) {
@@ -1499,18 +1454,11 @@ static int remux_tiles(uint8_t *dest, const int sz,
       tile_sz = mem_get_le32(&dest[rpos]) + 1;
       rpos += 4;
       switch (mag) {
-        case 0:
-          dest[wpos] = tile_sz - 1;
-          break;
-        case 1:
-          mem_put_le16(&dest[wpos], tile_sz - 1);
-          break;
-        case 2:
-          mem_put_le24(&dest[wpos], tile_sz - 1);
-          break;
+        case 0: dest[wpos] = tile_sz - 1; break;
+        case 1: mem_put_le16(&dest[wpos], tile_sz - 1); break;
+        case 2: mem_put_le24(&dest[wpos], tile_sz - 1); break;
         case 3:  // remuxing should only happen if mag < 3
-        default:
-          assert("Invalid value for tile size magnitude" && 0);
+        default: assert("Invalid value for tile size magnitude" && 0);
       }
       wpos += mag + 1;
     }
@@ -1530,7 +1478,7 @@ static int remux_tiles(uint8_t *dest, const int sz,
 void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size) {
   uint8_t *data = dest;
   size_t first_part_size, uncompressed_hdr_size, data_sz;
-  struct vpx_write_bit_buffer wb = {data, 0};
+  struct vpx_write_bit_buffer wb = { data, 0 };
   struct vpx_write_bit_buffer saved_wb;
   unsigned int max_tile;
 #if CONFIG_MISC_FIXES
@@ -1563,8 +1511,7 @@ void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size) {
 
     // Choose the (tile size) magnitude
     for (mag = 0, mask = 0xff; mag < 4; mag++) {
-      if (max_tile <= mask)
-        break;
+      if (max_tile <= mask) break;
       mask <<= 8;
       mask |= 0xff;
     }
diff --git a/vp10/encoder/bitstream.h b/vp10/encoder/bitstream.h
index b1da89f1d770dd6693cfda2f994f4f4bae65b722..ab692e85f6c2bc4c638aa7c244e8756ed09edc00 100644
--- a/vp10/encoder/bitstream.h
+++ b/vp10/encoder/bitstream.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_BITSTREAM_H_
 #define VP10_ENCODER_BITSTREAM_H_
 
diff --git a/vp10/encoder/block.h b/vp10/encoder/block.h
index ab0252baaef3b7666120904b004e7c5bf03c3b8a..3d35485071eb44d64cf92133448d4aaa58d71c54 100644
--- a/vp10/encoder/block.h
+++ b/vp10/encoder/block.h
@@ -45,7 +45,7 @@ struct macroblock_plane {
 /* The [2] dimension is for whether we skip the EOB node (i.e. if previous
  * coefficient in this block was zero) or not. */
 typedef unsigned int vp10_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
-                                   [COEFF_CONTEXTS][ENTROPY_TOKENS];
+                                    [COEFF_CONTEXTS][ENTROPY_TOKENS];
 
 typedef struct {
   int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
@@ -70,8 +70,8 @@ struct macroblock {
   int rddiv;
   int rdmult;
   int mb_energy;
-  int * m_search_count_ptr;
-  int * ex_search_count_ptr;
+  int *m_search_count_ptr;
+  int *ex_search_count_ptr;
 
   // These are set to their default values at the beginning, and then adjusted
   // further in the encoding process.
@@ -122,9 +122,9 @@ struct macroblock {
 
   // skip forward transform and quantization
   uint8_t skip_txfm[MAX_MB_PLANE << 2];
-  #define SKIP_TXFM_NONE 0
-  #define SKIP_TXFM_AC_DC 1
-  #define SKIP_TXFM_AC_ONLY 2
+#define SKIP_TXFM_NONE 0
+#define SKIP_TXFM_AC_DC 1
+#define SKIP_TXFM_AC_ONLY 2
 
   int64_t bsse[MAX_MB_PLANE << 2];
 
diff --git a/vp10/encoder/blockiness.c b/vp10/encoder/blockiness.c
index ede13e0e597596f408c2b86b99edb5e09e6f073c..c57e4efea66d7a0f3e4ceef445ab3a60dcd992fd 100644
--- a/vp10/encoder/blockiness.c
+++ b/vp10/encoder/blockiness.c
@@ -70,9 +70,9 @@ static int blockiness_vertical(const uint8_t *s, int sp, const uint8_t *r,
     s_blockiness += horizontal_filter(s);
     r_blockiness += horizontal_filter(r);
     sum_0 += s[0];
-    sum_sq_0 += s[0]*s[0];
+    sum_sq_0 += s[0] * s[0];
     sum_1 += s[-1];
-    sum_sq_1 += s[-1]*s[-1];
+    sum_sq_1 += s[-1] * s[-1];
   }
   var_0 = variance(sum_0, sum_sq_0, size);
   var_1 = variance(sum_1, sum_sq_1, size);
@@ -120,19 +120,19 @@ static int blockiness_horizontal(const uint8_t *s, int sp, const uint8_t *r,
 // This function returns the blockiness for the entire frame currently by
 // looking at all borders in steps of 4.
 double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
-                          const unsigned char *img2, int img2_pitch,
-                          int width, int height ) {
+                           const unsigned char *img2, int img2_pitch, int width,
+                           int height) {
   double blockiness = 0;
   int i, j;
   vpx_clear_system_state();
-  for (i = 0; i < height; i += 4, img1 += img1_pitch * 4,
-       img2 += img2_pitch * 4) {
+  for (i = 0; i < height;
+       i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
     for (j = 0; j < width; j += 4) {
       if (i > 0 && i < height && j > 0 && j < width) {
-        blockiness += blockiness_vertical(img1 + j, img1_pitch,
-                                          img2 + j, img2_pitch, 4);
-        blockiness += blockiness_horizontal(img1 + j, img1_pitch,
-                                            img2 + j, img2_pitch, 4);
+        blockiness +=
+            blockiness_vertical(img1 + j, img1_pitch, img2 + j, img2_pitch, 4);
+        blockiness += blockiness_horizontal(img1 + j, img1_pitch, img2 + j,
+                                            img2_pitch, 4);
       }
     }
   }
diff --git a/vp10/encoder/context_tree.c b/vp10/encoder/context_tree.c
index 6c056d28e1a0b54a255f37b325134ed5082c9f89..07ae254aea93145597109e5437428d2255ff3e6a 100644
--- a/vp10/encoder/context_tree.c
+++ b/vp10/encoder/context_tree.c
@@ -12,10 +12,7 @@
 #include "vp10/encoder/encoder.h"
 
 static const BLOCK_SIZE square[] = {
-  BLOCK_8X8,
-  BLOCK_16X16,
-  BLOCK_32X32,
-  BLOCK_64X64,
+  BLOCK_8X8, BLOCK_16X16, BLOCK_32X32, BLOCK_64X64,
 };
 
 static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk,
@@ -25,8 +22,7 @@ static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk,
   int i, k;
   ctx->num_4x4_blk = num_blk;
 
-  CHECK_MEM_ERROR(cm, ctx->zcoeff_blk,
-                  vpx_calloc(num_blk, sizeof(uint8_t)));
+  CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, vpx_calloc(num_blk, sizeof(uint8_t)));
   for (i = 0; i < MAX_MB_PLANE; ++i) {
     for (k = 0; k < 3; ++k) {
       CHECK_MEM_ERROR(cm, ctx->coeff[i][k],
@@ -37,10 +33,10 @@ static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk,
                       vpx_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k])));
       CHECK_MEM_ERROR(cm, ctx->eobs[i][k],
                       vpx_memalign(32, num_blk * sizeof(*ctx->eobs[i][k])));
-      ctx->coeff_pbuf[i][k]   = ctx->coeff[i][k];
-      ctx->qcoeff_pbuf[i][k]  = ctx->qcoeff[i][k];
+      ctx->coeff_pbuf[i][k] = ctx->coeff[i][k];
+      ctx->qcoeff_pbuf[i][k] = ctx->qcoeff[i][k];
       ctx->dqcoeff_pbuf[i][k] = ctx->dqcoeff[i][k];
-      ctx->eobs_pbuf[i][k]    = ctx->eobs[i][k];
+      ctx->eobs_pbuf[i][k] = ctx->eobs[i][k];
     }
   }
 }
@@ -71,12 +67,12 @@ static void free_mode_context(PICK_MODE_CONTEXT *ctx) {
 static void alloc_tree_contexts(VP10_COMMON *cm, PC_TREE *tree,
                                 int num_4x4_blk) {
   alloc_mode_context(cm, num_4x4_blk, &tree->none);
-  alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[0]);
-  alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[0]);
+  alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[0]);
+  alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[0]);
 
   if (num_4x4_blk > 4) {
-    alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[1]);
-    alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[1]);
+    alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[1]);
+    alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[1]);
   } else {
     memset(&tree->horizontal[1], 0, sizeof(tree->horizontal[1]));
     memset(&tree->vertical[1], 0, sizeof(tree->vertical[1]));
@@ -106,19 +102,18 @@ void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) {
   int nodes;
 
   vpx_free(td->leaf_tree);
-  CHECK_MEM_ERROR(cm, td->leaf_tree, vpx_calloc(leaf_nodes,
-                                                sizeof(*td->leaf_tree)));
+  CHECK_MEM_ERROR(cm, td->leaf_tree,
+                  vpx_calloc(leaf_nodes, sizeof(*td->leaf_tree)));
   vpx_free(td->pc_tree);
-  CHECK_MEM_ERROR(cm, td->pc_tree, vpx_calloc(tree_nodes,
-                                              sizeof(*td->pc_tree)));
+  CHECK_MEM_ERROR(cm, td->pc_tree,
+                  vpx_calloc(tree_nodes, sizeof(*td->pc_tree)));
 
   this_pc = &td->pc_tree[0];
   this_leaf = &td->leaf_tree[0];
 
   // 4x4 blocks smaller than 8x8 but in the same 8x8 block share the same
   // context so we only need to allocate 1 for each 8x8 block.
-  for (i = 0; i < leaf_nodes; ++i)
-    alloc_mode_context(cm, 1, &td->leaf_tree[i]);
+  for (i = 0; i < leaf_nodes; ++i) alloc_mode_context(cm, 1, &td->leaf_tree[i]);
 
   // Sets up all the leaf nodes in the tree.
   for (pc_tree_index = 0; pc_tree_index < leaf_nodes; ++pc_tree_index) {
@@ -126,8 +121,7 @@ void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) {
     tree->block_size = square[0];
     alloc_tree_contexts(cm, tree, 4);
     tree->leaf_split[0] = this_leaf++;
-    for (j = 1; j < 4; j++)
-      tree->leaf_split[j] = tree->leaf_split[0];
+    for (j = 1; j < 4; j++) tree->leaf_split[j] = tree->leaf_split[0];
   }
 
   // Each node has 4 leaf nodes, fill each block_size level of the tree
@@ -137,8 +131,7 @@ void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) {
       PC_TREE *const tree = &td->pc_tree[pc_tree_index];
       alloc_tree_contexts(cm, tree, 4 << (2 * square_index));
       tree->block_size = square[square_index];
-      for (j = 0; j < 4; j++)
-        tree->split[j] = this_pc++;
+      for (j = 0; j < 4; j++) tree->split[j] = this_pc++;
       ++pc_tree_index;
     }
     ++square_index;
@@ -152,12 +145,10 @@ void vp10_free_pc_tree(ThreadData *td) {
   int i;
 
   // Set up all 4x4 mode contexts
-  for (i = 0; i < 64; ++i)
-    free_mode_context(&td->leaf_tree[i]);
+  for (i = 0; i < 64; ++i) free_mode_context(&td->leaf_tree[i]);
 
   // Sets up all the leaf nodes in the tree.
-  for (i = 0; i < tree_nodes; ++i)
-    free_tree_contexts(&td->pc_tree[i]);
+  for (i = 0; i < tree_nodes; ++i) free_tree_contexts(&td->pc_tree[i]);
 
   vpx_free(td->pc_tree);
   td->pc_tree = NULL;
diff --git a/vp10/encoder/cost.c b/vp10/encoder/cost.c
index aab826322be48dbb71822bd0e75e7076f9e0778b..a4cd43aa2dab83de0b50863733cb2664108e61ab 100644
--- a/vp10/encoder/cost.c
+++ b/vp10/encoder/cost.c
@@ -12,31 +12,30 @@
 #include "vp10/encoder/cost.h"
 
 const unsigned int vp10_prob_cost[256] = {
-  2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161,
-  1129, 1099, 1072, 1046, 1023, 1000, 979,  959,  940,  922,  905,  889,
-  873,  858,  843,  829,  816,  803,  790,  778,  767,  755,  744,  733,
-  723,  713,  703,  693,  684,  675,  666,  657,  649,  641,  633,  625,
-  617,  609,  602,  594,  587,  580,  573,  567,  560,  553,  547,  541,
-  534,  528,  522,  516,  511,  505,  499,  494,  488,  483,  477,  472,
-  467,  462,  457,  452,  447,  442,  437,  433,  428,  424,  419,  415,
-  410,  406,  401,  397,  393,  389,  385,  381,  377,  373,  369,  365,
-  361,  357,  353,  349,  346,  342,  338,  335,  331,  328,  324,  321,
-  317,  314,  311,  307,  304,  301,  297,  294,  291,  288,  285,  281,
-  278,  275,  272,  269,  266,  263,  260,  257,  255,  252,  249,  246,
-  243,  240,  238,  235,  232,  229,  227,  224,  221,  219,  216,  214,
-  211,  208,  206,  203,  201,  198,  196,  194,  191,  189,  186,  184,
-  181,  179,  177,  174,  172,  170,  168,  165,  163,  161,  159,  156,
-  154,  152,  150,  148,  145,  143,  141,  139,  137,  135,  133,  131,
-  129,  127,  125,  123,  121,  119,  117,  115,  113,  111,  109,  107,
-  105,  103,  101,  99,   97,   95,   93,   92,   90,   88,   86,   84,
-  82,   81,   79,   77,   75,   73,   72,   70,   68,   66,   65,   63,
-  61,   60,   58,   56,   55,   53,   51,   50,   48,   46,   45,   43,
-  41,   40,   38,   37,   35,   33,   32,   30,   29,   27,   25,   24,
-  22,   21,   19,   18,   16,   15,   13,   12,   10,   9,    7,    6,
-  4,    3,    1,    1};
-
-static void cost(int *costs, vpx_tree tree, const vpx_prob *probs,
-                 int i, int c) {
+  2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129,
+  1099, 1072, 1046, 1023, 1000, 979,  959,  940,  922,  905,  889,  873,  858,
+  843,  829,  816,  803,  790,  778,  767,  755,  744,  733,  723,  713,  703,
+  693,  684,  675,  666,  657,  649,  641,  633,  625,  617,  609,  602,  594,
+  587,  580,  573,  567,  560,  553,  547,  541,  534,  528,  522,  516,  511,
+  505,  499,  494,  488,  483,  477,  472,  467,  462,  457,  452,  447,  442,
+  437,  433,  428,  424,  419,  415,  410,  406,  401,  397,  393,  389,  385,
+  381,  377,  373,  369,  365,  361,  357,  353,  349,  346,  342,  338,  335,
+  331,  328,  324,  321,  317,  314,  311,  307,  304,  301,  297,  294,  291,
+  288,  285,  281,  278,  275,  272,  269,  266,  263,  260,  257,  255,  252,
+  249,  246,  243,  240,  238,  235,  232,  229,  227,  224,  221,  219,  216,
+  214,  211,  208,  206,  203,  201,  198,  196,  194,  191,  189,  186,  184,
+  181,  179,  177,  174,  172,  170,  168,  165,  163,  161,  159,  156,  154,
+  152,  150,  148,  145,  143,  141,  139,  137,  135,  133,  131,  129,  127,
+  125,  123,  121,  119,  117,  115,  113,  111,  109,  107,  105,  103,  101,
+  99,   97,   95,   93,   92,   90,   88,   86,   84,   82,   81,   79,   77,
+  75,   73,   72,   70,   68,   66,   65,   63,   61,   60,   58,   56,   55,
+  53,   51,   50,   48,   46,   45,   43,   41,   40,   38,   37,   35,   33,
+  32,   30,   29,   27,   25,   24,   22,   21,   19,   18,   16,   15,   13,
+  12,   10,   9,    7,    6,    4,    3,    1,    1
+};
+
+static void cost(int *costs, vpx_tree tree, const vpx_prob *probs, int i,
+                 int c) {
   const vpx_prob prob = probs[i / 2];
   int b;
 
diff --git a/vp10/encoder/cost.h b/vp10/encoder/cost.h
index b9619c6b1b6d261bc3da2b5aad82f6dd48f4f951..702f0a4f221bd7823653fae6ac83abc6dab60018 100644
--- a/vp10/encoder/cost.h
+++ b/vp10/encoder/cost.h
@@ -23,16 +23,16 @@ extern const unsigned int vp10_prob_cost[256];
 
 #define vp10_cost_one(prob) vp10_cost_zero(vpx_complement(prob))
 
-#define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? vpx_complement(prob) \
-                                                    : (prob))
+#define vp10_cost_bit(prob, bit) \
+  vp10_cost_zero((bit) ? vpx_complement(prob) : (prob))
 
 static INLINE unsigned int cost_branch256(const unsigned int ct[2],
                                           vpx_prob p) {
   return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p);
 }
 
-static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs,
-                             int bits, int len) {
+static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs, int bits,
+                             int len) {
   int cost = 0;
   vpx_tree_index i = 0;
 
diff --git a/vp10/encoder/dct.c b/vp10/encoder/dct.c
index 6fb7870e510bcd54ae31c8ae9d38c4e03ec1e866..2a7ba7ef4ed497f0c7aa62836d856453750e0025 100644
--- a/vp10/encoder/dct.c
+++ b/vp10/encoder/dct.c
@@ -165,11 +165,11 @@ static void fdct16(const tran_low_t in[16], tran_low_t out[16]) {
   input[3] = in[3] + in[12];
   input[4] = in[4] + in[11];
   input[5] = in[5] + in[10];
-  input[6] = in[6] + in[ 9];
-  input[7] = in[7] + in[ 8];
+  input[6] = in[6] + in[9];
+  input[7] = in[7] + in[8];
 
-  step1[0] = in[7] - in[ 8];
-  step1[1] = in[6] - in[ 9];
+  step1[0] = in[7] - in[8];
+  step1[1] = in[6] - in[9];
   step1[2] = in[5] - in[10];
   step1[3] = in[4] - in[11];
   step1[4] = in[3] - in[12];
@@ -292,7 +292,6 @@ static void fdct16(const tran_low_t in[16], tran_low_t out[16]) {
   out[15] = (tran_low_t)fdct_round_shift(temp2);
 }
 
-
 /* TODO(angiebird): Unify this with vp10_fwd_txfm.c: vp10_fdct32
 static void fdct32(const tran_low_t *input, tran_low_t *output) {
   tran_high_t temp;
@@ -746,14 +745,14 @@ static void fadst8(const tran_low_t *input, tran_low_t *output) {
   tran_high_t x7 = input[6];
 
   // stage 1
-  s0 = cospi_2_64  * x0 + cospi_30_64 * x1;
-  s1 = cospi_30_64 * x0 - cospi_2_64  * x1;
+  s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+  s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
   s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
   s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
   s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
   s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
-  s6 = cospi_26_64 * x6 + cospi_6_64  * x7;
-  s7 = cospi_6_64  * x6 - cospi_26_64 * x7;
+  s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+  s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
 
   x0 = fdct_round_shift(s0 + s4);
   x1 = fdct_round_shift(s1 + s5);
@@ -769,10 +768,10 @@ static void fadst8(const tran_low_t *input, tran_low_t *output) {
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 = cospi_8_64  * x4 + cospi_24_64 * x5;
-  s5 = cospi_24_64 * x4 - cospi_8_64  * x5;
-  s6 = - cospi_24_64 * x6 + cospi_8_64  * x7;
-  s7 =   cospi_8_64  * x6 + cospi_24_64 * x7;
+  s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+  s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+  s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+  s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
 
   x0 = s0 + s2;
   x1 = s1 + s3;
@@ -826,11 +825,11 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) {
   tran_high_t x15 = input[14];
 
   // stage 1
-  s0 = x0 * cospi_1_64  + x1 * cospi_31_64;
+  s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
   s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
-  s2 = x2 * cospi_5_64  + x3 * cospi_27_64;
+  s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
   s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
-  s4 = x4 * cospi_9_64  + x5 * cospi_23_64;
+  s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
   s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
   s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
   s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -839,9 +838,9 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) {
   s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
   s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
   s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
-  s13 = x12 * cospi_7_64  - x13 * cospi_25_64;
+  s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
   s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
-  s15 = x14 * cospi_3_64  - x15 * cospi_29_64;
+  s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
 
   x0 = fdct_round_shift(s0 + s8);
   x1 = fdct_round_shift(s1 + s9);
@@ -851,8 +850,8 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) {
   x5 = fdct_round_shift(s5 + s13);
   x6 = fdct_round_shift(s6 + s14);
   x7 = fdct_round_shift(s7 + s15);
-  x8  = fdct_round_shift(s0 - s8);
-  x9  = fdct_round_shift(s1 - s9);
+  x8 = fdct_round_shift(s0 - s8);
+  x9 = fdct_round_shift(s1 - s9);
   x10 = fdct_round_shift(s2 - s10);
   x11 = fdct_round_shift(s3 - s11);
   x12 = fdct_round_shift(s4 - s12);
@@ -869,14 +868,14 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) {
   s5 = x5;
   s6 = x6;
   s7 = x7;
-  s8 =    x8 * cospi_4_64   + x9 * cospi_28_64;
-  s9 =    x8 * cospi_28_64  - x9 * cospi_4_64;
-  s10 =   x10 * cospi_20_64 + x11 * cospi_12_64;
-  s11 =   x10 * cospi_12_64 - x11 * cospi_20_64;
-  s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
-  s13 =   x12 * cospi_4_64  + x13 * cospi_28_64;
-  s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
-  s15 =   x14 * cospi_20_64 + x15 * cospi_12_64;
+  s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+  s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+  s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+  s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+  s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+  s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+  s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+  s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
 
   x0 = s0 + s4;
   x1 = s1 + s5;
@@ -900,18 +899,18 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) {
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 = x4 * cospi_8_64  + x5 * cospi_24_64;
+  s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
   s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
-  s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
-  s7 =   x6 * cospi_8_64  + x7 * cospi_24_64;
+  s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+  s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
   s8 = x8;
   s9 = x9;
   s10 = x10;
   s11 = x11;
-  s12 = x12 * cospi_8_64  + x13 * cospi_24_64;
+  s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
   s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
-  s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
-  s15 =   x14 * cospi_8_64  + x15 * cospi_24_64;
+  s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+  s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
 
   x0 = s0 + s2;
   x1 = s1 + s3;
@@ -931,13 +930,13 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) {
   x15 = fdct_round_shift(s13 - s15);
 
   // stage 4
-  s2 = (- cospi_16_64) * (x2 + x3);
+  s2 = (-cospi_16_64) * (x2 + x3);
   s3 = cospi_16_64 * (x2 - x3);
   s6 = cospi_16_64 * (x6 + x7);
-  s7 = cospi_16_64 * (- x6 + x7);
+  s7 = cospi_16_64 * (-x6 + x7);
   s10 = cospi_16_64 * (x10 + x11);
-  s11 = cospi_16_64 * (- x10 + x11);
-  s14 = (- cospi_16_64) * (x14 + x15);
+  s11 = cospi_16_64 * (-x10 + x11);
+  s14 = (-cospi_16_64) * (x14 + x15);
   s15 = cospi_16_64 * (x14 - x15);
 
   x2 = fdct_round_shift(s2);
@@ -968,28 +967,28 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) {
 }
 
 static const transform_2d FHT_4[] = {
-  { fdct4,  fdct4  },  // DCT_DCT  = 0
-  { fadst4, fdct4  },  // ADST_DCT = 1
-  { fdct4,  fadst4 },  // DCT_ADST = 2
-  { fadst4, fadst4 }   // ADST_ADST = 3
+  { fdct4, fdct4 },   // DCT_DCT  = 0
+  { fadst4, fdct4 },  // ADST_DCT = 1
+  { fdct4, fadst4 },  // DCT_ADST = 2
+  { fadst4, fadst4 }  // ADST_ADST = 3
 };
 
 static const transform_2d FHT_8[] = {
-  { fdct8,  fdct8  },  // DCT_DCT  = 0
-  { fadst8, fdct8  },  // ADST_DCT = 1
-  { fdct8,  fadst8 },  // DCT_ADST = 2
-  { fadst8, fadst8 }   // ADST_ADST = 3
+  { fdct8, fdct8 },   // DCT_DCT  = 0
+  { fadst8, fdct8 },  // ADST_DCT = 1
+  { fdct8, fadst8 },  // DCT_ADST = 2
+  { fadst8, fadst8 }  // ADST_ADST = 3
 };
 
 static const transform_2d FHT_16[] = {
-  { fdct16,  fdct16  },  // DCT_DCT  = 0
-  { fadst16, fdct16  },  // ADST_DCT = 1
-  { fdct16,  fadst16 },  // DCT_ADST = 2
-  { fadst16, fadst16 }   // ADST_ADST = 3
+  { fdct16, fdct16 },   // DCT_DCT  = 0
+  { fadst16, fdct16 },  // ADST_DCT = 1
+  { fdct16, fadst16 },  // DCT_ADST = 2
+  { fadst16, fadst16 }  // ADST_ADST = 3
 };
 
-void vp10_fht4x4_c(const int16_t *input, tran_low_t *output,
-                  int stride, int tx_type) {
+void vp10_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+                   int tx_type) {
   if (tx_type == DCT_DCT) {
     vpx_fdct4x4_c(input, output, stride);
   } else {
@@ -1000,36 +999,29 @@ void vp10_fht4x4_c(const int16_t *input, tran_low_t *output,
 
     // Columns
     for (i = 0; i < 4; ++i) {
-      for (j = 0; j < 4; ++j)
-        temp_in[j] = input[j * stride + i] * 16;
-      if (i == 0 && temp_in[0])
-        temp_in[0] += 1;
+      for (j = 0; j < 4; ++j) temp_in[j] = input[j * stride + i] * 16;
+      if (i == 0 && temp_in[0]) temp_in[0] += 1;
       ht.cols(temp_in, temp_out);
-      for (j = 0; j < 4; ++j)
-        out[j * 4 + i] = temp_out[j];
+      for (j = 0; j < 4; ++j) out[j * 4 + i] = temp_out[j];
     }
 
     // Rows
     for (i = 0; i < 4; ++i) {
-      for (j = 0; j < 4; ++j)
-        temp_in[j] = out[j + i * 4];
+      for (j = 0; j < 4; ++j) temp_in[j] = out[j + i * 4];
       ht.rows(temp_in, temp_out);
-      for (j = 0; j < 4; ++j)
-        output[j + i * 4] = (temp_out[j] + 1) >> 2;
+      for (j = 0; j < 4; ++j) output[j + i * 4] = (temp_out[j] + 1) >> 2;
     }
   }
 }
 
 void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
-                         tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                         int skip_block,
-                         const int16_t *zbin_ptr, const int16_t *round_ptr,
-                         const int16_t *quant_ptr,
-                         const int16_t *quant_shift_ptr,
-                         tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                         const int16_t *dequant_ptr,
-                         uint16_t *eob_ptr,
-                         const int16_t *scan, const int16_t *iscan) {
+                          tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                          int skip_block, const int16_t *zbin_ptr,
+                          const int16_t *round_ptr, const int16_t *quant_ptr,
+                          const int16_t *quant_shift_ptr,
+                          tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                          const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                          const int16_t *scan, const int16_t *iscan) {
   int eob = -1;
 
   int i, j;
@@ -1061,8 +1053,8 @@ void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
       x3 = s0 - s3;
       t0 = (x0 + x1) * cospi_16_64;
       t1 = (x0 - x1) * cospi_16_64;
-      t2 =  x2 * cospi_24_64 + x3 *  cospi_8_64;
-      t3 = -x2 * cospi_8_64  + x3 * cospi_24_64;
+      t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+      t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
       output[0 * 8] = (tran_low_t)fdct_round_shift(t0);
       output[2 * 8] = (tran_low_t)fdct_round_shift(t2);
       output[4 * 8] = (tran_low_t)fdct_round_shift(t1);
@@ -1081,10 +1073,10 @@ void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
       x3 = s7 + t3;
 
       // stage 4
-      t0 = x0 * cospi_28_64 + x3 *   cospi_4_64;
-      t1 = x1 * cospi_12_64 + x2 *  cospi_20_64;
+      t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+      t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
       t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
-      t3 = x3 * cospi_28_64 + x0 *  -cospi_4_64;
+      t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
       output[1 * 8] = (tran_low_t)fdct_round_shift(t0);
       output[3 * 8] = (tran_low_t)fdct_round_shift(t2);
       output[5 * 8] = (tran_low_t)fdct_round_shift(t1);
@@ -1097,8 +1089,7 @@ void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
   // Rows
   for (i = 0; i < 8; ++i) {
     fdct8(&intermediate[i * 8], &coeff_ptr[i * 8]);
-    for (j = 0; j < 8; ++j)
-      coeff_ptr[j + i * 8] /= 2;
+    for (j = 0; j < 8; ++j) coeff_ptr[j + i * 8] /= 2;
   }
 
   // TODO(jingning) Decide the need of these arguments after the
@@ -1125,15 +1116,14 @@ void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
       qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
       dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
 
-      if (tmp)
-        eob = i;
+      if (tmp) eob = i;
     }
   }
   *eob_ptr = eob + 1;
 }
 
-void vp10_fht8x8_c(const int16_t *input, tran_low_t *output,
-                  int stride, int tx_type) {
+void vp10_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+                   int tx_type) {
   if (tx_type == DCT_DCT) {
     vpx_fdct8x8_c(input, output, stride);
   } else {
@@ -1144,17 +1134,14 @@ void vp10_fht8x8_c(const int16_t *input, tran_low_t *output,
 
     // Columns
     for (i = 0; i < 8; ++i) {
-      for (j = 0; j < 8; ++j)
-        temp_in[j] = input[j * stride + i] * 4;
+      for (j = 0; j < 8; ++j) temp_in[j] = input[j * stride + i] * 4;
       ht.cols(temp_in, temp_out);
-      for (j = 0; j < 8; ++j)
-        out[j * 8 + i] = temp_out[j];
+      for (j = 0; j < 8; ++j) out[j * 8 + i] = temp_out[j];
     }
 
     // Rows
     for (i = 0; i < 8; ++i) {
-      for (j = 0; j < 8; ++j)
-        temp_in[j] = out[j + i * 8];
+      for (j = 0; j < 8; ++j) temp_in[j] = out[j + i * 8];
       ht.rows(temp_in, temp_out);
       for (j = 0; j < 8; ++j)
         output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
@@ -1218,8 +1205,8 @@ void vp10_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   }
 }
 
-void vp10_fht16x16_c(const int16_t *input, tran_low_t *output,
-                    int stride, int tx_type) {
+void vp10_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
   if (tx_type == DCT_DCT) {
     vpx_fdct16x16_c(input, output, stride);
   } else {
@@ -1230,8 +1217,7 @@ void vp10_fht16x16_c(const int16_t *input, tran_low_t *output,
 
     // Columns
     for (i = 0; i < 16; ++i) {
-      for (j = 0; j < 16; ++j)
-        temp_in[j] = input[j * stride + i] * 4;
+      for (j = 0; j < 16; ++j) temp_in[j] = input[j * stride + i] * 4;
       ht.cols(temp_in, temp_out);
       for (j = 0; j < 16; ++j)
         out[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
@@ -1239,33 +1225,31 @@ void vp10_fht16x16_c(const int16_t *input, tran_low_t *output,
 
     // Rows
     for (i = 0; i < 16; ++i) {
-      for (j = 0; j < 16; ++j)
-        temp_in[j] = out[j + i * 16];
+      for (j = 0; j < 16; ++j) temp_in[j] = out[j + i * 16];
       ht.rows(temp_in, temp_out);
-      for (j = 0; j < 16; ++j)
-        output[j + i * 16] = temp_out[j];
+      for (j = 0; j < 16; ++j) output[j + i * 16] = temp_out[j];
     }
   }
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output,
-                         int stride, int tx_type) {
+void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+                          int tx_type) {
   vp10_fht4x4_c(input, output, stride, tx_type);
 }
 
-void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output,
-                         int stride, int tx_type) {
+void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+                          int tx_type) {
   vp10_fht8x8_c(input, output, stride, tx_type);
 }
 
 void vp10_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
-                          int stride) {
+                           int stride) {
   vp10_fwht4x4_c(input, output, stride);
 }
 
 void vp10_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
-                           int stride, int tx_type) {
+                            int stride, int tx_type) {
   vp10_fht16x16_c(input, output, stride, tx_type);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vp10/encoder/encodeframe.c b/vp10/encoder/encodeframe.c
index 99c8d79e66db6d294ea76efd12a3427f797207c4..f9fa1041df26ee70c4981893588cf9e1819789b7 100644
--- a/vp10/encoder/encodeframe.c
+++ b/vp10/encoder/encodeframe.c
@@ -46,90 +46,86 @@
 #include "vp10/encoder/segmentation.h"
 #include "vp10/encoder/tokenize.h"
 
-static void encode_superblock(VP10_COMP *cpi, ThreadData * td,
-                              TOKENEXTRA **t, int output_enabled,
-                              int mi_row, int mi_col, BLOCK_SIZE bsize,
-                              PICK_MODE_CONTEXT *ctx);
+static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+                              int output_enabled, int mi_row, int mi_col,
+                              BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
 
 // This is used as a reference when computing the source variance for the
 //  purposes of activity masking.
 // Eventually this should be replaced by custom no-reference routines,
 //  which will be faster.
 static const uint8_t VP9_VAR_OFFS[64] = {
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
 };
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128,
-    128, 128, 128, 128, 128, 128, 128, 128
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
 };
 
 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
-    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
-    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
-    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
-    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
-    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
-    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
-    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
-    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
 };
 
 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
-    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
-    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
-    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
-    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
-    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
-    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
-    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
-    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16
 };
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
-                                           const struct buf_2d *ref,
-                                           BLOCK_SIZE bs) {
+                                            const struct buf_2d *ref,
+                                            BLOCK_SIZE bs) {
   unsigned int sse;
-  const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
-                                              VP9_VAR_OFFS, 0, &sse);
+  const unsigned int var =
+      cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP9_VAR_OFFS, 0, &sse);
   return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(
-    VP10_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
+unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
+                                                 const struct buf_2d *ref,
+                                                 BLOCK_SIZE bs, int bd) {
   unsigned int var, sse;
   switch (bd) {
     case 10:
-      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
-                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
-                               0, &sse);
+      var =
+          cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+                             CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), 0, &sse);
       break;
     case 12:
-      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
-                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
-                               0, &sse);
+      var =
+          cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+                             CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), 0, &sse);
       break;
     case 8:
     default:
-      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
-                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
-                               0, &sse);
+      var =
+          cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+                             CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), 0, &sse);
       break;
   }
   return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
@@ -152,12 +148,10 @@ static unsigned int get_sby_perpixel_diff_variance(VP10_COMP *cpi,
 }
 
 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi,
-                                                   MACROBLOCK *x,
-                                                   int mi_row,
+                                                   MACROBLOCK *x, int mi_row,
                                                    int mi_col) {
-  unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
-                                                    mi_row, mi_col,
-                                                    BLOCK_64X64);
+  unsigned int var = get_sby_perpixel_diff_variance(
+      cpi, &x->plane[0].src, mi_row, mi_col, BLOCK_64X64);
   if (var < 8)
     return BLOCK_64X64;
   else if (var < 128)
@@ -172,8 +166,7 @@ static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi,
 // pointers.
 static INLINE void set_mode_info_offsets(VP10_COMP *const cpi,
                                          MACROBLOCK *const x,
-                                         MACROBLOCKD *const xd,
-                                         int mi_row,
+                                         MACROBLOCKD *const xd, int mi_row,
                                          int mi_col) {
   VP10_COMMON *const cm = &cpi->common;
   const int idx_str = xd->mi_stride * mi_row + mi_col;
@@ -210,8 +203,8 @@ static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
 
   // Set up distance of MB to edge of frame in 1/8th pel units.
   assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
-  set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
-                 cm->mi_rows, cm->mi_cols);
+  set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
+                 cm->mi_cols);
 
   // Set up source buffers.
   vp10_setup_src_planes(x, cpi->Source, mi_row, mi_col);
@@ -223,8 +216,8 @@ static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
   // Setup segment ID.
   if (seg->enabled) {
     if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
-      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
-                                                 : cm->last_frame_seg_map;
+      const uint8_t *const map =
+          seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
       mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
     }
     vp10_init_plane_quantizers(cpi, x);
@@ -239,10 +232,8 @@ static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
   xd->tile = *tile;
 }
 
-static void set_block_size(VP10_COMP * const cpi,
-                           MACROBLOCK *const x,
-                           MACROBLOCKD *const xd,
-                           int mi_row, int mi_col,
+static void set_block_size(VP10_COMP *const cpi, MACROBLOCK *const x,
+                           MACROBLOCKD *const xd, int mi_row, int mi_col,
                            BLOCK_SIZE bsize) {
   if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
     set_mode_info_offsets(cpi, x, xd, mi_row, mi_col);
@@ -304,38 +295,37 @@ static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
   node->part_variances = NULL;
   switch (bsize) {
     case BLOCK_64X64: {
-      v64x64 *vt = (v64x64 *) data;
+      v64x64 *vt = (v64x64 *)data;
       node->part_variances = &vt->part_variances;
       for (i = 0; i < 4; i++)
         node->split[i] = &vt->split[i].part_variances.none;
       break;
     }
     case BLOCK_32X32: {
-      v32x32 *vt = (v32x32 *) data;
+      v32x32 *vt = (v32x32 *)data;
       node->part_variances = &vt->part_variances;
       for (i = 0; i < 4; i++)
         node->split[i] = &vt->split[i].part_variances.none;
       break;
     }
     case BLOCK_16X16: {
-      v16x16 *vt = (v16x16 *) data;
+      v16x16 *vt = (v16x16 *)data;
       node->part_variances = &vt->part_variances;
       for (i = 0; i < 4; i++)
         node->split[i] = &vt->split[i].part_variances.none;
       break;
     }
     case BLOCK_8X8: {
-      v8x8 *vt = (v8x8 *) data;
+      v8x8 *vt = (v8x8 *)data;
       node->part_variances = &vt->part_variances;
       for (i = 0; i < 4; i++)
         node->split[i] = &vt->split[i].part_variances.none;
       break;
     }
     case BLOCK_4X4: {
-      v4x4 *vt = (v4x4 *) data;
+      v4x4 *vt = (v4x4 *)data;
       node->part_variances = &vt->part_variances;
-      for (i = 0; i < 4; i++)
-        node->split[i] = &vt->split[i];
+      for (i = 0; i < 4; i++) node->split[i] = &vt->split[i];
       break;
     }
     default: {
@@ -353,8 +343,10 @@ static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
 }
 
 static void get_variance(var *v) {
-  v->variance = (int)(256 * (v->sum_square_error -
-      ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
+  v->variance =
+      (int)(256 * (v->sum_square_error -
+                   ((v->sum_error * v->sum_error) >> v->log2_count)) >>
+            v->log2_count);
 }
 
 static void sum_2_variances(const var *a, const var *b, var *r) {
@@ -375,17 +367,12 @@ static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
                   &node.part_variances->none);
 }
 
-static int set_vt_partitioning(VP10_COMP *cpi,
-                               MACROBLOCK *const x,
-                               MACROBLOCKD *const xd,
-                               void *data,
-                               BLOCK_SIZE bsize,
-                               int mi_row,
-                               int mi_col,
-                               int64_t threshold,
-                               BLOCK_SIZE bsize_min,
+static int set_vt_partitioning(VP10_COMP *cpi, MACROBLOCK *const x,
+                               MACROBLOCKD *const xd, void *data,
+                               BLOCK_SIZE bsize, int mi_row, int mi_col,
+                               int64_t threshold, BLOCK_SIZE bsize_min,
                                int force_split) {
-  VP10_COMMON * const cm = &cpi->common;
+  VP10_COMMON *const cm = &cpi->common;
   variance_node vt;
   const int block_width = num_8x8_blocks_wide_lookup[bsize];
   const int block_height = num_8x8_blocks_high_lookup[bsize];
@@ -394,8 +381,7 @@ static int set_vt_partitioning(VP10_COMP *cpi,
   assert(block_height == block_width);
   tree_to_node(data, bsize, &vt);
 
-  if (force_split == 1)
-    return 0;
+  if (force_split == 1) return 0;
 
   // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
   // variance is below threshold, otherwise split will be selected.
@@ -418,7 +404,7 @@ static int set_vt_partitioning(VP10_COMP *cpi,
     // For key frame: take split for bsize above 32X32 or very high variance.
     if (cm->frame_type == KEY_FRAME &&
         (bsize > BLOCK_32X32 ||
-        vt.part_variances->none.variance > (threshold << 4))) {
+         vt.part_variances->none.variance > (threshold << 4))) {
       return 0;
     }
     // If variance is low, take the bsize (no split).
@@ -469,8 +455,8 @@ static void set_vbp_thresholds(VP10_COMP *cpi, int64_t thresholds[], int q) {
   VP10_COMMON *const cm = &cpi->common;
   const int is_key_frame = (cm->frame_type == KEY_FRAME);
   const int threshold_multiplier = is_key_frame ? 20 : 1;
-  const int64_t threshold_base = (int64_t)(threshold_multiplier *
-      cpi->y_dequant[q][1]);
+  const int64_t threshold_base =
+      (int64_t)(threshold_multiplier * cpi->y_dequant[q][1]);
   if (is_key_frame) {
     thresholds[0] = threshold_base;
     thresholds[1] = threshold_base >> 2;
@@ -508,8 +494,9 @@ void vp10_set_variance_partition_thresholds(VP10_COMP *cpi, int q) {
       if (cm->width <= 352 && cm->height <= 288)
         cpi->vbp_threshold_sad = 100;
       else
-        cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 ?
-            (cpi->y_dequant[q][1] << 1) : 1000;
+        cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000
+                                     ? (cpi->y_dequant[q][1] << 1)
+                                     : 1000;
       cpi->vbp_bsize_min = BLOCK_16X16;
     }
     cpi->vbp_threshold_minmax = 15 + (q >> 3);
@@ -522,8 +509,7 @@ static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
 #if CONFIG_VPX_HIGHBITDEPTH
                               int highbd_flag,
 #endif
-                              int pixels_wide,
-                              int pixels_high) {
+                              int pixels_wide, int pixels_high) {
   int k;
   int minmax_max = 0;
   int minmax_min = 255;
@@ -537,22 +523,17 @@ static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
 #if CONFIG_VPX_HIGHBITDEPTH
       if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
         vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
-                              d + y8_idx * dp + x8_idx, dp,
-                              &min, &max);
+                              d + y8_idx * dp + x8_idx, dp, &min, &max);
       } else {
-        vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
-                       d + y8_idx * dp + x8_idx, dp,
-                       &min, &max);
+        vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx,
+                       dp, &min, &max);
       }
 #else
-      vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
-                     d + y8_idx * dp + x8_idx, dp,
+      vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, dp,
                      &min, &max);
 #endif
-      if ((max - min) > minmax_max)
-        minmax_max = (max - min);
-      if ((max - min) < minmax_min)
-        minmax_min = (max - min);
+      if ((max - min) > minmax_max) minmax_max = (max - min);
+      if ((max - min) < minmax_min) minmax_min = (max - min);
     }
   }
   return (minmax_max - minmax_min);
@@ -563,8 +544,7 @@ static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
 #if CONFIG_VPX_HIGHBITDEPTH
                                  int highbd_flag,
 #endif
-                                 int pixels_wide,
-                                 int pixels_high,
+                                 int pixels_wide, int pixels_high,
                                  int is_key_frame) {
   int k;
   for (k = 0; k < 4; k++) {
@@ -582,13 +562,11 @@ static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
           d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
       } else {
         s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
-        if (!is_key_frame)
-          d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
+        if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
       }
 #else
       s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
-      if (!is_key_frame)
-        d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
+      if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
 #endif
       sum = s_avg - d_avg;
       sse = sum * sum;
@@ -602,8 +580,7 @@ static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
 #if CONFIG_VPX_HIGHBITDEPTH
                                  int highbd_flag,
 #endif
-                                 int pixels_wide,
-                                 int pixels_high,
+                                 int pixels_wide, int pixels_high,
                                  int is_key_frame) {
   int k;
   for (k = 0; k < 4; k++) {
@@ -621,13 +598,11 @@ static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
           d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
       } else {
         s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
-        if (!is_key_frame)
-          d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
+        if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
       }
 #else
       s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
-      if (!is_key_frame)
-        d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
+      if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
 #endif
       sum = s_avg - d_avg;
       sse = sum * sum;
@@ -638,11 +613,9 @@ static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
 
 // This function chooses partitioning based on the variance between source and
 // reconstructed last, where variance is computed for down-sampled inputs.
-static int choose_partitioning(VP10_COMP *cpi,
-                                const TileInfo *const tile,
-                                MACROBLOCK *x,
-                                int mi_row, int mi_col) {
-  VP10_COMMON * const cm = &cpi->common;
+static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
+                               MACROBLOCK *x, int mi_row, int mi_col) {
+  VP10_COMMON *const cm = &cpi->common;
   MACROBLOCKD *xd = &x->e_mbd;
   int i, j, k, m;
   v64x64 vt;
@@ -653,8 +626,8 @@ static int choose_partitioning(VP10_COMP *cpi,
   int sp;
   int dp;
   int pixels_wide = 64, pixels_high = 64;
-  int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
-      cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]};
+  int64_t thresholds[4] = { cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
+                            cpi->vbp_thresholds[2], cpi->vbp_thresholds[3] };
 
   // Always use 4x4 partition for key frame.
   const int is_key_frame = (cm->frame_type == KEY_FRAME);
@@ -664,8 +637,8 @@ static int choose_partitioning(VP10_COMP *cpi,
 
   int segment_id = CR_SEGMENT_ID_BASE;
   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
-    const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
-                                                    cm->last_frame_seg_map;
+    const uint8_t *const map =
+        cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
     segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
 
     if (cyclic_refresh_segment_id_boosted(segment_id)) {
@@ -676,10 +649,8 @@ static int choose_partitioning(VP10_COMP *cpi,
 
   set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
 
-  if (xd->mb_to_right_edge < 0)
-    pixels_wide += (xd->mb_to_right_edge >> 3);
-  if (xd->mb_to_bottom_edge < 0)
-    pixels_high += (xd->mb_to_bottom_edge >> 3);
+  if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3);
+  if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3);
 
   s = x->plane[0].src.buf;
   sp = x->plane[0].src.stride;
@@ -691,25 +662,24 @@ static int choose_partitioning(VP10_COMP *cpi,
 
     const YV12_BUFFER_CONFIG *yv12_g = NULL;
     unsigned int y_sad, y_sad_g;
-    const BLOCK_SIZE bsize = BLOCK_32X32
-        + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows);
+    const BLOCK_SIZE bsize = BLOCK_32X32 + (mi_col + 4 < cm->mi_cols) * 2 +
+                             (mi_row + 4 < cm->mi_rows);
 
     assert(yv12 != NULL);
     yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
 
     if (yv12_g && yv12_g != yv12) {
       vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
-                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
-      y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
-                                       x->plane[0].src.stride,
-                                       xd->plane[0].pre[0].buf,
-                                       xd->plane[0].pre[0].stride);
+                            &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+      y_sad_g = cpi->fn_ptr[bsize].sdf(
+          x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
+          xd->plane[0].pre[0].stride);
     } else {
       y_sad_g = UINT_MAX;
     }
 
     vp10_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
-                         &cm->frame_refs[LAST_FRAME - 1].sf);
+                          &cm->frame_refs[LAST_FRAME - 1].sf);
     mbmi->ref_frame[0] = LAST_FRAME;
     mbmi->ref_frame[1] = NONE;
     mbmi->sb_type = BLOCK_64X64;
@@ -719,7 +689,7 @@ static int choose_partitioning(VP10_COMP *cpi,
     y_sad = vp10_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
     if (y_sad_g < y_sad) {
       vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
-                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+                            &cm->frame_refs[GOLDEN_FRAME - 1].sf);
       mbmi->ref_frame[0] = GOLDEN_FRAME;
       mbmi->mv[0].as_int = 0;
       y_sad = y_sad_g;
@@ -730,15 +700,15 @@ static int choose_partitioning(VP10_COMP *cpi,
     vp10_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
 
     for (i = 1; i <= 2; ++i) {
-      struct macroblock_plane  *p = &x->plane[i];
+      struct macroblock_plane *p = &x->plane[i];
       struct macroblockd_plane *pd = &xd->plane[i];
       const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
 
       if (bs == BLOCK_INVALID)
         uv_sad = UINT_MAX;
       else
-        uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
-                                     pd->dst.buf, pd->dst.stride);
+        uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, pd->dst.buf,
+                                     pd->dst.stride);
 
       x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
     }
@@ -748,8 +718,7 @@ static int choose_partitioning(VP10_COMP *cpi,
 
     // If the y_sad is very small, take 64x64 as partition and exit.
     // Don't check on boosted segment for now, as 64x64 is suppressed there.
-    if (segment_id == CR_SEGMENT_ID_BASE &&
-        y_sad < cpi->vbp_threshold_sad) {
+    if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) {
       const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
       const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
       if (mi_col + block_width / 2 < cm->mi_cols &&
@@ -764,16 +733,10 @@ static int choose_partitioning(VP10_COMP *cpi,
 #if CONFIG_VPX_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       switch (xd->bd) {
-        case 10:
-          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
-          break;
-        case 12:
-          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
-          break;
+        case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break;
+        case 12: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); break;
         case 8:
-        default:
-          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
-          break;
+        default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break;
       }
     }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
@@ -799,22 +762,19 @@ static int choose_partitioning(VP10_COMP *cpi,
       if (!is_key_frame) {
         fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
 #if CONFIG_VPX_HIGHBITDEPTH
-                            xd->cur_buf->flags,
+                             xd->cur_buf->flags,
 #endif
-                            pixels_wide,
-                            pixels_high,
-                            is_key_frame);
+                             pixels_wide, pixels_high, is_key_frame);
         fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
         get_variance(&vt.split[i].split[j].part_variances.none);
-        if (vt.split[i].split[j].part_variances.none.variance >
-            thresholds[2]) {
+        if (vt.split[i].split[j].part_variances.none.variance > thresholds[2]) {
           // 16X16 variance is above threshold for split, so force split to 8x8
           // for this 16x16 block (this also forces splits for upper levels).
           force_split[split_index] = 1;
           force_split[i + 1] = 1;
           force_split[0] = 1;
         } else if (vt.split[i].split[j].part_variances.none.variance >
-                   thresholds[1] &&
+                       thresholds[1] &&
                    !cyclic_refresh_segment_id_boosted(segment_id)) {
           // We have some nominal amount of 16x16 variance (based on average),
           // compute the minmax over the 8x8 sub-blocks, and if above threshold,
@@ -832,23 +792,20 @@ static int choose_partitioning(VP10_COMP *cpi,
         }
       }
       if (is_key_frame || (low_res &&
-          vt.split[i].split[j].part_variances.none.variance >
-          (thresholds[1] << 1))) {
+                           vt.split[i].split[j].part_variances.none.variance >
+                               (thresholds[1] << 1))) {
         force_split[split_index] = 0;
         // Go down to 4x4 down-sampling for variance.
         variance4x4downsample[i2 + j] = 1;
         for (k = 0; k < 4; k++) {
           int x8_idx = x16_idx + ((k & 1) << 3);
           int y8_idx = y16_idx + ((k >> 1) << 3);
-          v8x8 *vst2 = is_key_frame ? &vst->split[k] :
-              &vt2[i2 + j].split[k];
+          v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k];
           fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
 #if CONFIG_VPX_HIGHBITDEPTH
                                xd->cur_buf->flags,
 #endif
-                               pixels_wide,
-                               pixels_high,
-                               is_key_frame);
+                               pixels_wide, pixels_high, is_key_frame);
         }
       }
     }
@@ -859,10 +816,8 @@ static int choose_partitioning(VP10_COMP *cpi,
     const int i2 = i << 2;
     for (j = 0; j < 4; j++) {
       if (variance4x4downsample[i2 + j] == 1) {
-        v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
-            &vt.split[i].split[j];
-        for (m = 0; m < 4; m++)
-          fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
+        v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : &vt.split[i].split[j];
+        for (m = 0; m < 4; m++) fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
         fill_variance_tree(vtemp, BLOCK_16X16);
       }
     }
@@ -884,7 +839,7 @@ static int choose_partitioning(VP10_COMP *cpi,
 
   // Now go through the entire structure, splitting every block size until
   // we get to one that's got a variance lower than our threshold.
-  if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
+  if (mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
       !set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col,
                            thresholds[0], BLOCK_16X16, force_split[0])) {
     for (i = 0; i < 4; ++i) {
@@ -901,15 +856,13 @@ static int choose_partitioning(VP10_COMP *cpi,
           // For inter frames: if variance4x4downsample[] == 1 for this 16x16
           // block, then the variance is based on 4x4 down-sampling, so use vt2
           // in set_vt_partioning(), otherwise use vt.
-          v16x16 *vtemp = (!is_key_frame &&
-                           variance4x4downsample[i2 + j] == 1) ?
-                           &vt2[i2 + j] : &vt.split[i].split[j];
-          if (!set_vt_partitioning(cpi, x, xd, vtemp, BLOCK_16X16,
-                                   mi_row + y32_idx + y16_idx,
-                                   mi_col + x32_idx + x16_idx,
-                                   thresholds[2],
-                                   cpi->vbp_bsize_min,
-                                   force_split[5 + i2  + j])) {
+          v16x16 *vtemp = (!is_key_frame && variance4x4downsample[i2 + j] == 1)
+                              ? &vt2[i2 + j]
+                              : &vt.split[i].split[j];
+          if (!set_vt_partitioning(
+                  cpi, x, xd, vtemp, BLOCK_16X16, mi_row + y32_idx + y16_idx,
+                  mi_col + x32_idx + x16_idx, thresholds[2], cpi->vbp_bsize_min,
+                  force_split[5 + i2 + j])) {
             for (k = 0; k < 4; ++k) {
               const int x8_idx = (k & 1);
               const int y8_idx = (k >> 1);
@@ -919,16 +872,14 @@ static int choose_partitioning(VP10_COMP *cpi,
                                          mi_row + y32_idx + y16_idx + y8_idx,
                                          mi_col + x32_idx + x16_idx + x8_idx,
                                          thresholds[3], BLOCK_8X8, 0)) {
-                  set_block_size(cpi, x, xd,
-                                 (mi_row + y32_idx + y16_idx + y8_idx),
-                                 (mi_col + x32_idx + x16_idx + x8_idx),
-                                 BLOCK_4X4);
+                  set_block_size(
+                      cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
+                      (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_4X4);
                 }
               } else {
-                set_block_size(cpi, x, xd,
-                               (mi_row + y32_idx + y16_idx + y8_idx),
-                               (mi_col + x32_idx + x16_idx + x8_idx),
-                               BLOCK_8X8);
+                set_block_size(
+                    cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
+                    (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_8X8);
               }
             }
           }
@@ -939,8 +890,7 @@ static int choose_partitioning(VP10_COMP *cpi,
   return 0;
 }
 
-static void update_state(VP10_COMP *cpi, ThreadData *td,
-                         PICK_MODE_CONTEXT *ctx,
+static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
                          int mi_row, int mi_col, BLOCK_SIZE bsize,
                          int output_enabled) {
   int i, x_idx, y;
@@ -958,8 +908,7 @@ static void update_state(VP10_COMP *cpi, ThreadData *td,
   const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
   const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
   const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
-  MV_REF *const frame_mvs =
-      cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+  MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
   int w, h;
 
   const int mis = cm->mi_stride;
@@ -976,17 +925,15 @@ static void update_state(VP10_COMP *cpi, ThreadData *td,
   if (seg->enabled) {
     // For in frame complexity AQ copy the segment id from the segment map.
     if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
-      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
-                                                 : cm->last_frame_seg_map;
-      mi_addr->mbmi.segment_id =
-        get_segment_id(cm, map, bsize, mi_row, mi_col);
+      const uint8_t *const map =
+          seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
+      mi_addr->mbmi.segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
     }
     // Else for cyclic refresh mode update the segment map, set the segment id
     // and then update the quantizer.
     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
-      vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row,
-                                        mi_col, bsize, ctx->rate, ctx->dist,
-                                        x->skip);
+      vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+                                         bsize, ctx->rate, ctx->dist, x->skip);
     }
   }
 
@@ -1005,20 +952,18 @@ static void update_state(VP10_COMP *cpi, ThreadData *td,
     p[i].eobs = ctx->eobs_pbuf[i][2];
   }
 
-  for (i = 0; i < 2; ++i)
-    pd[i].color_index_map = ctx->color_index_map[i];
+  for (i = 0; i < 2; ++i) pd[i].color_index_map = ctx->color_index_map[i];
 
   // Restore the coding context of the MB to that that was in place
   // when the mode was picked for it
   for (y = 0; y < mi_height; y++)
     for (x_idx = 0; x_idx < mi_width; x_idx++)
-      if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
-        && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
+      if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx &&
+          (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
         xd->mi[x_idx + y * mis] = mi_addr;
       }
 
-  if (cpi->oxcf.aq_mode)
-    vp10_init_plane_quantizers(cpi, x);
+  if (cpi->oxcf.aq_mode) vp10_init_plane_quantizers(cpi, x);
 
   if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
     mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
@@ -1029,22 +974,16 @@ static void update_state(VP10_COMP *cpi, ThreadData *td,
   memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
          sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
 
-  if (!output_enabled)
-    return;
+  if (!output_enabled) return;
 
 #if CONFIG_INTERNAL_STATS
   if (frame_is_intra_only(cm)) {
     static const int kf_mode_index[] = {
-      THR_DC        /*DC_PRED*/,
-      THR_V_PRED    /*V_PRED*/,
-      THR_H_PRED    /*H_PRED*/,
-      THR_D45_PRED  /*D45_PRED*/,
-      THR_D135_PRED /*D135_PRED*/,
-      THR_D117_PRED /*D117_PRED*/,
-      THR_D153_PRED /*D153_PRED*/,
-      THR_D207_PRED /*D207_PRED*/,
-      THR_D63_PRED  /*D63_PRED*/,
-      THR_TM        /*TM_PRED*/,
+      THR_DC /*DC_PRED*/,          THR_V_PRED /*V_PRED*/,
+      THR_H_PRED /*H_PRED*/,       THR_D45_PRED /*D45_PRED*/,
+      THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
+      THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
+      THR_D63_PRED /*D63_PRED*/,   THR_TM /*TM_PRED*/,
     };
     ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
   } else {
@@ -1083,9 +1022,9 @@ static void update_state(VP10_COMP *cpi, ThreadData *td,
 }
 
 void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
-                          int mi_row, int mi_col) {
-  uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
-  const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
+                           int mi_row, int mi_col) {
+  uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
+  const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
   int i;
 
   // Set current frame pointer.
@@ -1097,24 +1036,20 @@ void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
                      x->e_mbd.plane[i].subsampling_y);
 }
 
-static int set_segment_rdmult(VP10_COMP *const cpi,
-                               MACROBLOCK *const x,
-                               int8_t segment_id) {
+static int set_segment_rdmult(VP10_COMP *const cpi, MACROBLOCK *const x,
+                              int8_t segment_id) {
   int segment_qindex;
   VP10_COMMON *const cm = &cpi->common;
   vp10_init_plane_quantizers(cpi, x);
   vpx_clear_system_state();
-  segment_qindex = vp10_get_qindex(&cm->seg, segment_id,
-                                  cm->base_qindex);
+  segment_qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
   return vp10_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
 }
 
-static void rd_pick_sb_modes(VP10_COMP *cpi,
-                             TileDataEnc *tile_data,
-                             MACROBLOCK *const x,
-                             int mi_row, int mi_col, RD_COST *rd_cost,
-                             BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
-                             int64_t best_rd) {
+static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
+                             MACROBLOCK *const x, int mi_row, int mi_col,
+                             RD_COST *rd_cost, BLOCK_SIZE bsize,
+                             PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
   VP10_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -1140,8 +1075,7 @@ static void rd_pick_sb_modes(VP10_COMP *cpi,
     p[i].eobs = ctx->eobs_pbuf[i][0];
   }
 
-  for (i = 0; i < 2; ++i)
-    pd[i].color_index_map = ctx->color_index_map[i];
+  for (i = 0; i < 2; ++i) pd[i].color_index_map = ctx->color_index_map[i];
 
   ctx->is_coded = 0;
   ctx->skippable = 0;
@@ -1153,39 +1087,37 @@ static void rd_pick_sb_modes(VP10_COMP *cpi,
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    x->source_variance =
-        vp10_high_get_sby_perpixel_variance(cpi, &x->plane[0].src,
-                                            bsize, xd->bd);
+    x->source_variance = vp10_high_get_sby_perpixel_variance(
+        cpi, &x->plane[0].src, bsize, xd->bd);
   } else {
     x->source_variance =
-      vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+        vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
   }
 #else
   x->source_variance =
-    vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+      vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
   // Save rdmult before it might be changed, so it can be restored later.
   orig_rdmult = x->rdmult;
 
   if (aq_mode == VARIANCE_AQ) {
-    const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
-                                            : vp10_block_energy(cpi, x, bsize);
-    if (cm->frame_type == KEY_FRAME ||
-        cpi->refresh_alt_ref_frame ||
+    const int energy =
+        bsize <= BLOCK_16X16 ? x->mb_energy : vp10_block_energy(cpi, x, bsize);
+    if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
         (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
       mbmi->segment_id = vp10_vaq_segment_id(energy);
     } else {
-      const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
-                                                    : cm->last_frame_seg_map;
+      const uint8_t *const map =
+          cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
       mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
     }
     x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
   } else if (aq_mode == COMPLEXITY_AQ) {
     x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
   } else if (aq_mode == CYCLIC_REFRESH_AQ) {
-    const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
-                                                  : cm->last_frame_seg_map;
+    const uint8_t *const map =
+        cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
     // If segment is boosted, use rdmult for that segment.
     if (cyclic_refresh_segment_id_boosted(
             get_segment_id(cm, map, bsize, mi_row, mi_col)))
@@ -1200,22 +1132,20 @@ static void rd_pick_sb_modes(VP10_COMP *cpi,
     if (bsize >= BLOCK_8X8) {
       if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
         vp10_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
-                                           ctx, best_rd);
+                                            ctx, best_rd);
       else
-        vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col,
-                                  rd_cost, bsize, ctx, best_rd);
+        vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+                                   bsize, ctx, best_rd);
     } else {
-      vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
-                                    rd_cost, bsize, ctx, best_rd);
+      vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+                                     bsize, ctx, best_rd);
     }
   }
 
-
   // Examine the resulting rate and for AQ mode 2 make a segment choice.
-  if ((rd_cost->rate != INT_MAX) &&
-      (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) &&
-      (cm->frame_type == KEY_FRAME ||
-       cpi->refresh_alt_ref_frame ||
+  if ((rd_cost->rate != INT_MAX) && (aq_mode == COMPLEXITY_AQ) &&
+      (bsize >= BLOCK_16X16) &&
+      (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
        (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
     vp10_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
   }
@@ -1224,8 +1154,7 @@ static void rd_pick_sb_modes(VP10_COMP *cpi,
 
   // TODO(jingning) The rate-distortion optimization flow needs to be
   // refactored to provide proper exit/return handle.
-  if (rd_cost->rate == INT_MAX)
-    rd_cost->rdcost = INT64_MAX;
+  if (rd_cost->rate == INT_MAX) rd_cost->rdcost = INT64_MAX;
 
   ctx->rate = rd_cost->rate;
   ctx->dist = rd_cost->dist;
@@ -1242,8 +1171,8 @@ static void update_stats(VP10_COMMON *cm, ThreadData *td) {
   if (!frame_is_intra_only(cm)) {
     FRAME_COUNTS *const counts = td->counts;
     const int inter_block = is_inter_block(mbmi);
-    const int seg_ref_active = segfeature_active(&cm->seg, mbmi->segment_id,
-                                                 SEG_LVL_REF_FRAME);
+    const int seg_ref_active =
+        segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_REF_FRAME);
     if (!seg_ref_active) {
       counts->intra_inter[vp10_get_intra_inter_context(xd)][inter_block]++;
       // If the segment reference feature is enabled we have only a single
@@ -1252,18 +1181,18 @@ static void update_stats(VP10_COMMON *cm, ThreadData *td) {
       if (inter_block) {
         const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
         if (cm->reference_mode == REFERENCE_MODE_SELECT)
-          counts->comp_inter[vp10_get_reference_mode_context(cm, xd)]
-                            [has_second_ref(mbmi)]++;
+          counts->comp_inter[vp10_get_reference_mode_context(
+              cm, xd)][has_second_ref(mbmi)]++;
 
         if (has_second_ref(mbmi)) {
-          counts->comp_ref[vp10_get_pred_context_comp_ref_p(cm, xd)]
-                          [ref0 == GOLDEN_FRAME]++;
+          counts->comp_ref[vp10_get_pred_context_comp_ref_p(
+              cm, xd)][ref0 == GOLDEN_FRAME]++;
         } else {
-          counts->single_ref[vp10_get_pred_context_single_ref_p1(xd)][0]
-                            [ref0 != LAST_FRAME]++;
+          counts->single_ref[vp10_get_pred_context_single_ref_p1(
+              xd)][0][ref0 != LAST_FRAME]++;
           if (ref0 != LAST_FRAME)
-            counts->single_ref[vp10_get_pred_context_single_ref_p2(xd)][1]
-                              [ref0 != GOLDEN_FRAME]++;
+            counts->single_ref[vp10_get_pred_context_single_ref_p2(
+                xd)][1][ref0 != GOLDEN_FRAME]++;
         }
       }
     }
@@ -1301,17 +1230,15 @@ static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
   int mi_width = num_8x8_blocks_wide_lookup[bsize];
   int mi_height = num_8x8_blocks_high_lookup[bsize];
   for (p = 0; p < MAX_MB_PLANE; p++) {
-    memcpy(
-        xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
-        a + num_4x4_blocks_wide * p,
-        (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
-        xd->plane[p].subsampling_x);
-    memcpy(
-        xd->left_context[p]
-            + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
-        l + num_4x4_blocks_high * p,
-        (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
-        xd->plane[p].subsampling_y);
+    memcpy(xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
+           a + num_4x4_blocks_wide * p,
+           (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+               xd->plane[p].subsampling_x);
+    memcpy(xd->left_context[p] +
+               ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+           l + num_4x4_blocks_high * p,
+           (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+               xd->plane[p].subsampling_y);
   }
   memcpy(xd->above_seg_context + mi_col, sa,
          sizeof(*xd->above_seg_context) * mi_width);
@@ -1333,17 +1260,15 @@ static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
 
   // buffer the above/left context information of the block in search.
   for (p = 0; p < MAX_MB_PLANE; ++p) {
-    memcpy(
-        a + num_4x4_blocks_wide * p,
-        xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
-        (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
-        xd->plane[p].subsampling_x);
-    memcpy(
-        l + num_4x4_blocks_high * p,
-        xd->left_context[p]
-            + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
-        (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
-        xd->plane[p].subsampling_y);
+    memcpy(a + num_4x4_blocks_wide * p,
+           xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
+           (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+               xd->plane[p].subsampling_x);
+    memcpy(l + num_4x4_blocks_high * p,
+           xd->left_context[p] +
+               ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+           (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+               xd->plane[p].subsampling_y);
   }
   memcpy(sa, xd->above_seg_context + mi_col,
          sizeof(*xd->above_seg_context) * mi_width);
@@ -1351,8 +1276,7 @@ static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
          sizeof(xd->left_seg_context[0]) * mi_height);
 }
 
-static void encode_b(VP10_COMP *cpi, const TileInfo *const tile,
-                     ThreadData *td,
+static void encode_b(VP10_COMP *cpi, const TileInfo *const tile, ThreadData *td,
                      TOKENEXTRA **tp, int mi_row, int mi_col,
                      int output_enabled, BLOCK_SIZE bsize,
                      PICK_MODE_CONTEXT *ctx) {
@@ -1367,9 +1291,8 @@ static void encode_b(VP10_COMP *cpi, const TileInfo *const tile,
 }
 
 static void encode_sb(VP10_COMP *cpi, ThreadData *td,
-                      const TileInfo *const tile,
-                      TOKENEXTRA **tp, int mi_row, int mi_col,
-                      int output_enabled, BLOCK_SIZE bsize,
+                      const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
+                      int mi_col, int output_enabled, BLOCK_SIZE bsize,
                       PC_TREE *pc_tree) {
   VP10_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
@@ -1380,8 +1303,7 @@ static void encode_sb(VP10_COMP *cpi, ThreadData *td,
   PARTITION_TYPE partition;
   BLOCK_SIZE subsize = bsize;
 
-  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
-    return;
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
 
   if (bsize >= BLOCK_8X8) {
     ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
@@ -1431,9 +1353,7 @@ static void encode_sb(VP10_COMP *cpi, ThreadData *td,
                   subsize, pc_tree->split[3]);
       }
       break;
-    default:
-      assert(0 && "Invalid partition type.");
-      break;
+    default: assert(0 && "Invalid partition type."); break;
   }
 
   if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
@@ -1443,9 +1363,8 @@ static void encode_sb(VP10_COMP *cpi, ThreadData *td,
 // Check to see if the given partition size is allowed for a specified number
 // of 8x8 block rows and columns remaining in the image.
 // If not then return the largest allowed partition size
-static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
-                                      int rows_left, int cols_left,
-                                      int *bh, int *bw) {
+static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left,
+                                      int cols_left, int *bh, int *bw) {
   if (rows_left <= 0 || cols_left <= 0) {
     return VPXMIN(bsize, BLOCK_8X8);
   } else {
@@ -1460,9 +1379,10 @@ static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
   return bsize;
 }
 
-static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
-    int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
-    BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
+static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, int bh_in,
+                                         int bw_in, int row8x8_remaining,
+                                         int col8x8_remaining, BLOCK_SIZE bsize,
+                                         MODE_INFO **mi_8x8) {
   int bh = bh_in;
   int r, c;
   for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
@@ -1470,8 +1390,8 @@ static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
     for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
       const int index = r * mis + c;
       mi_8x8[index] = mi + index;
-      mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize,
-          row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
+      mi_8x8[index]->mbmi.sb_type = find_partition_size(
+          bsize, row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
     }
   }
 }
@@ -1508,17 +1428,14 @@ static void set_fixed_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
   } else {
     // Else this is a partial SB64.
     set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
-        col8x8_remaining, bsize, mi_8x8);
+                                 col8x8_remaining, bsize, mi_8x8);
   }
 }
 
-static void rd_use_partition(VP10_COMP *cpi,
-                             ThreadData *td,
-                             TileDataEnc *tile_data,
-                             MODE_INFO **mi_8x8, TOKENEXTRA **tp,
-                             int mi_row, int mi_col,
-                             BLOCK_SIZE bsize,
-                             int *rate, int64_t *dist,
+static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
+                             TileDataEnc *tile_data, MODE_INFO **mi_8x8,
+                             TOKENEXTRA **tp, int mi_row, int mi_col,
+                             BLOCK_SIZE bsize, int *rate, int64_t *dist,
                              int do_recon, PC_TREE *pc_tree) {
   VP10_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
@@ -1540,8 +1457,7 @@ static void rd_use_partition(VP10_COMP *cpi,
   int do_partition_search = 1;
   PICK_MODE_CONTEXT *ctx = &pc_tree->none;
 
-  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
-    return;
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
 
   assert(num_4x4_blocks_wide_lookup[bsize] ==
          num_4x4_blocks_high_lookup[bsize]);
@@ -1583,15 +1499,15 @@ static void rd_use_partition(VP10_COMP *cpi,
         mi_row + (mi_step >> 1) < cm->mi_rows &&
         mi_col + (mi_step >> 1) < cm->mi_cols) {
       pc_tree->partitioning = PARTITION_NONE;
-      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize,
-                       ctx, INT64_MAX);
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize, ctx,
+                       INT64_MAX);
 
       pl = partition_plane_context(xd, mi_row, mi_col, bsize);
 
       if (none_rdc.rate < INT_MAX) {
         none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
-        none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
-                                 none_rdc.dist);
+        none_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, none_rdc.rate, none_rdc.dist);
       }
 
       restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
@@ -1602,23 +1518,21 @@ static void rd_use_partition(VP10_COMP *cpi,
 
   switch (partition) {
     case PARTITION_NONE:
-      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
-                       bsize, ctx, INT64_MAX);
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, bsize,
+                       ctx, INT64_MAX);
       break;
     case PARTITION_HORZ:
       rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
-                       subsize, &pc_tree->horizontal[0],
-                       INT64_MAX);
-      if (last_part_rdc.rate != INT_MAX &&
-          bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
+                       subsize, &pc_tree->horizontal[0], INT64_MAX);
+      if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
+          mi_row + (mi_step >> 1) < cm->mi_rows) {
         RD_COST tmp_rdc;
         PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
         vp10_rd_cost_init(&tmp_rdc);
         update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
         encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
-        rd_pick_sb_modes(cpi, tile_data, x,
-                         mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
-                         subsize, &pc_tree->horizontal[1], INT64_MAX);
+        rd_pick_sb_modes(cpi, tile_data, x, mi_row + (mi_step >> 1), mi_col,
+                         &tmp_rdc, subsize, &pc_tree->horizontal[1], INT64_MAX);
         if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
           vp10_rd_cost_reset(&last_part_rdc);
           break;
@@ -1631,17 +1545,16 @@ static void rd_use_partition(VP10_COMP *cpi,
     case PARTITION_VERT:
       rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
                        subsize, &pc_tree->vertical[0], INT64_MAX);
-      if (last_part_rdc.rate != INT_MAX &&
-          bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
+      if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
+          mi_col + (mi_step >> 1) < cm->mi_cols) {
         RD_COST tmp_rdc;
         PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
         vp10_rd_cost_init(&tmp_rdc);
         update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
         encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
-        rd_pick_sb_modes(cpi, tile_data, x,
-                         mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
-                         subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
-                         INT64_MAX);
+        rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + (mi_step >> 1),
+                         &tmp_rdc, subsize,
+                         &pc_tree->vertical[bsize > BLOCK_8X8], INT64_MAX);
         if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
           vp10_rd_cost_reset(&last_part_rdc);
           break;
@@ -1669,11 +1582,10 @@ static void rd_use_partition(VP10_COMP *cpi,
           continue;
 
         vp10_rd_cost_init(&tmp_rdc);
-        rd_use_partition(cpi, td, tile_data,
-                         mi_8x8 + jj * bss * mis + ii * bss, tp,
-                         mi_row + y_idx, mi_col + x_idx, subsize,
-                         &tmp_rdc.rate, &tmp_rdc.dist,
-                         i != 3, pc_tree->split[i]);
+        rd_use_partition(cpi, td, tile_data, mi_8x8 + jj * bss * mis + ii * bss,
+                         tp, mi_row + y_idx, mi_col + x_idx, subsize,
+                         &tmp_rdc.rate, &tmp_rdc.dist, i != 3,
+                         pc_tree->split[i]);
         if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
           vp10_rd_cost_reset(&last_part_rdc);
           break;
@@ -1682,26 +1594,23 @@ static void rd_use_partition(VP10_COMP *cpi,
         last_part_rdc.dist += tmp_rdc.dist;
       }
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 
   pl = partition_plane_context(xd, mi_row, mi_col, bsize);
   if (last_part_rdc.rate < INT_MAX) {
     last_part_rdc.rate += cpi->partition_cost[pl][partition];
-    last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
-                                  last_part_rdc.rate, last_part_rdc.dist);
-  }
-
-  if (do_partition_search
-      && cpi->sf.adjust_partitioning_from_last_frame
-      && cpi->sf.partition_search_type == SEARCH_PARTITION
-      && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
-      && (mi_row + mi_step < cm->mi_rows ||
-          mi_row + (mi_step >> 1) == cm->mi_rows)
-      && (mi_col + mi_step < cm->mi_cols ||
-          mi_col + (mi_step >> 1) == cm->mi_cols)) {
+    last_part_rdc.rdcost =
+        RDCOST(x->rdmult, x->rddiv, last_part_rdc.rate, last_part_rdc.dist);
+  }
+
+  if (do_partition_search && cpi->sf.adjust_partitioning_from_last_frame &&
+      cpi->sf.partition_search_type == SEARCH_PARTITION &&
+      partition != PARTITION_SPLIT && bsize > BLOCK_8X8 &&
+      (mi_row + mi_step < cm->mi_rows ||
+       mi_row + (mi_step >> 1) == cm->mi_rows) &&
+      (mi_col + mi_step < cm->mi_cols ||
+       mi_col + (mi_step >> 1) == cm->mi_cols)) {
     BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
     chosen_rdc.rate = 0;
     chosen_rdc.dist = 0;
@@ -1721,9 +1630,9 @@ static void rd_use_partition(VP10_COMP *cpi,
 
       save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
       pc_tree->split[i]->partitioning = PARTITION_NONE;
-      rd_pick_sb_modes(cpi, tile_data, x,
-                       mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
-                       split_subsize, &pc_tree->split[i]->none, INT64_MAX);
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row + y_idx, mi_col + x_idx,
+                       &tmp_rdc, split_subsize, &pc_tree->split[i]->none,
+                       INT64_MAX);
 
       restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
 
@@ -1736,7 +1645,7 @@ static void rd_use_partition(VP10_COMP *cpi,
       chosen_rdc.dist += tmp_rdc.dist;
 
       if (i != 3)
-        encode_sb(cpi, td, tile_info, tp,  mi_row + y_idx, mi_col + x_idx, 0,
+        encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
                   split_subsize, pc_tree->split[i]);
 
       pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
@@ -1746,22 +1655,20 @@ static void rd_use_partition(VP10_COMP *cpi,
     pl = partition_plane_context(xd, mi_row, mi_col, bsize);
     if (chosen_rdc.rate < INT_MAX) {
       chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
-      chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
-                                 chosen_rdc.rate, chosen_rdc.dist);
+      chosen_rdc.rdcost =
+          RDCOST(x->rdmult, x->rddiv, chosen_rdc.rate, chosen_rdc.dist);
     }
   }
 
   // If last_part is better set the partitioning to that.
   if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
     mi_8x8[0]->mbmi.sb_type = bsize;
-    if (bsize >= BLOCK_8X8)
-      pc_tree->partitioning = partition;
+    if (bsize >= BLOCK_8X8) pc_tree->partitioning = partition;
     chosen_rdc = last_part_rdc;
   }
   // If none was better set the partitioning to that.
   if (none_rdc.rdcost < chosen_rdc.rdcost) {
-    if (bsize >= BLOCK_8X8)
-      pc_tree->partitioning = PARTITION_NONE;
+    if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
     chosen_rdc = none_rdc;
   }
 
@@ -1783,22 +1690,17 @@ static void rd_use_partition(VP10_COMP *cpi,
 }
 
 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
-  BLOCK_4X4,   BLOCK_4X4,   BLOCK_4X4,
-  BLOCK_4X4,   BLOCK_4X4,   BLOCK_4X4,
-  BLOCK_8X8,   BLOCK_8X8,   BLOCK_8X8,
-  BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
-  BLOCK_16X16
+  BLOCK_4X4,   BLOCK_4X4,   BLOCK_4X4,  BLOCK_4X4, BLOCK_4X4,
+  BLOCK_4X4,   BLOCK_8X8,   BLOCK_8X8,  BLOCK_8X8, BLOCK_16X16,
+  BLOCK_16X16, BLOCK_16X16, BLOCK_16X16
 };
 
 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
-  BLOCK_8X8,   BLOCK_16X16, BLOCK_16X16,
-  BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
-  BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
-  BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
-  BLOCK_64X64
+  BLOCK_8X8,   BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
+  BLOCK_32X32, BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
+  BLOCK_64X64, BLOCK_64X64, BLOCK_64X64
 };
 
-
 // Look at all the mode_info entries for blocks that are part of this
 // partition and find the min and max values for sb_type.
 // At the moment this is designed to work on a 64x64 SB but could be
@@ -1811,14 +1713,14 @@ static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8,
                                         BLOCK_SIZE *max_block_size,
                                         int bs_hist[BLOCK_SIZES]) {
   int sb_width_in_blocks = MI_BLOCK_SIZE;
-  int sb_height_in_blocks  = MI_BLOCK_SIZE;
+  int sb_height_in_blocks = MI_BLOCK_SIZE;
   int i, j;
   int index = 0;
 
   // Check the sb_type for each block that belongs to this region.
   for (i = 0; i < sb_height_in_blocks; ++i) {
     for (j = 0; j < sb_width_in_blocks; ++j) {
-      MODE_INFO *mi = mi_8x8[index+j];
+      MODE_INFO *mi = mi_8x8[index + j];
       BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
       bs_hist[sb_type]++;
       *min_block_size = VPXMIN(*min_block_size, sb_type);
@@ -1830,19 +1732,16 @@ static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8,
 
 // Next square block size less or equal than current block size.
 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
-  BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
-  BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
-  BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
-  BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
-  BLOCK_64X64
+  BLOCK_4X4,   BLOCK_4X4,   BLOCK_4X4,   BLOCK_8X8,   BLOCK_8X8,
+  BLOCK_8X8,   BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
+  BLOCK_32X32, BLOCK_32X32, BLOCK_64X64
 };
 
 // Look at neighboring blocks and set a min and max partition size based on
 // what they chose.
 static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
-                                    MACROBLOCKD *const xd,
-                                    int mi_row, int mi_col,
-                                    BLOCK_SIZE *min_block_size,
+                                    MACROBLOCKD *const xd, int mi_row,
+                                    int mi_col, BLOCK_SIZE *min_block_size,
                                     BLOCK_SIZE *max_block_size) {
   VP10_COMMON *const cm = &cpi->common;
   MODE_INFO **mi = xd->mi;
@@ -1853,7 +1752,7 @@ static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
   int bh, bw;
   BLOCK_SIZE min_size = BLOCK_4X4;
   BLOCK_SIZE max_size = BLOCK_64X64;
-  int bs_hist[BLOCK_SIZES] = {0};
+  int bs_hist[BLOCK_SIZES] = { 0 };
 
   // Trap case where we do not have a prediction.
   if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
@@ -1890,8 +1789,7 @@ static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
   }
 
   // Check border cases where max and min from neighbors may not be legal.
-  max_size = find_partition_size(max_size,
-                                 row8x8_remaining, col8x8_remaining,
+  max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
                                  &bh, &bw);
   // Test for blocks at the edge of the active image.
   // This may be the actual edge of the image or where there are formatting
@@ -1908,7 +1806,7 @@ static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
   // *min_block_size.
   if (cpi->sf.use_square_partition_only &&
       next_square_size[max_size] < min_size) {
-     min_size = next_square_size[max_size];
+    min_size = next_square_size[max_size];
   }
 
   *min_block_size = min_size;
@@ -1916,10 +1814,10 @@ static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
 }
 
 // TODO(jingning) refactor functions setting partition search range
-static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd,
-                                int mi_row, int mi_col, BLOCK_SIZE bsize,
+static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, int mi_row,
+                                int mi_col, BLOCK_SIZE bsize,
                                 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
-  int mi_width  = num_8x8_blocks_wide_lookup[bsize];
+  int mi_width = num_8x8_blocks_wide_lookup[bsize];
   int mi_height = num_8x8_blocks_high_lookup[bsize];
   int idx, idy;
 
@@ -1978,16 +1876,19 @@ static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
 }
 
 #if CONFIG_FP_MB_STATS
-const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
-  {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
-const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
-  {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
-const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
-  {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
-const int qindex_split_threshold_lookup[BLOCK_SIZES] =
-  {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120};
-const int complexity_16x16_blocks_threshold[BLOCK_SIZES] =
-  {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6};
+const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
+                                                        1, 2, 2, 2, 4, 4 };
+const int num_16x16_blocks_high_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
+                                                        2, 1, 2, 4, 2, 4 };
+const int qindex_skip_threshold_lookup[BLOCK_SIZES] = {
+  0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120
+};
+const int qindex_split_threshold_lookup[BLOCK_SIZES] = {
+  0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120
+};
+const int complexity_16x16_blocks_threshold[BLOCK_SIZES] = {
+  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6
+};
 
 typedef enum {
   MV_ZERO = 0,
@@ -2026,10 +1927,10 @@ static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
 // unlikely to be selected depending on previous rate-distortion optimization
 // results, for encoding speed-up.
 static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
-                              TileDataEnc *tile_data,
-                              TOKENEXTRA **tp, int mi_row, int mi_col,
-                              BLOCK_SIZE bsize, RD_COST *rd_cost,
-                              int64_t best_rd, PC_TREE *pc_tree) {
+                              TileDataEnc *tile_data, TOKENEXTRA **tp,
+                              int mi_row, int mi_col, BLOCK_SIZE bsize,
+                              RD_COST *rd_cost, int64_t best_rd,
+                              PC_TREE *pc_tree) {
   VP10_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCK *const x = &td->mb;
@@ -2060,14 +1961,14 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
 #endif
 
   int partition_none_allowed = !force_horz_split && !force_vert_split;
-  int partition_horz_allowed = !force_vert_split && yss <= xss &&
-                               bsize >= BLOCK_8X8;
-  int partition_vert_allowed = !force_horz_split && xss <= yss &&
-                               bsize >= BLOCK_8X8;
-  (void) *tp_orig;
+  int partition_horz_allowed =
+      !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
+  int partition_vert_allowed =
+      !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
+  (void)*tp_orig;
 
   assert(num_8x8_blocks_wide_lookup[bsize] ==
-             num_8x8_blocks_high_lookup[bsize]);
+         num_8x8_blocks_high_lookup[bsize]);
 
   vp10_rd_cost_init(&this_rdc);
   vp10_rd_cost_init(&sum_rdc);
@@ -2080,8 +1981,10 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
     x->mb_energy = vp10_block_energy(cpi, x, bsize);
 
   if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
-    int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
-        + get_chessboard_index(cm->current_video_frame)) & 0x1;
+    int cb_partition_search_ctrl =
+        ((pc_tree->index == 0 || pc_tree->index == 3) +
+         get_chessboard_index(cm->current_video_frame)) &
+        0x1;
 
     if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
       set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
@@ -2091,10 +1994,10 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
   // The threshold set here has to be of square block size.
   if (cpi->sf.auto_min_max_partition_size) {
     partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
-    partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
-                                force_horz_split);
-    partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
-                                force_vert_split);
+    partition_horz_allowed &=
+        ((bsize <= max_size && bsize > min_size) || force_horz_split);
+    partition_vert_allowed &=
+        ((bsize <= max_size && bsize > min_size) || force_vert_split);
     do_split &= bsize > min_size;
   }
   if (cpi->sf.use_square_partition_only) {
@@ -2107,8 +2010,8 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
 #if CONFIG_FP_MB_STATS
   if (cpi->use_fp_mb_stats) {
     set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
-    src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
-                                                  mi_row, mi_col, bsize);
+    src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, mi_row,
+                                                  mi_col, bsize);
   }
 #endif
 
@@ -2128,7 +2031,7 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
 
     // compute a complexity measure, basically measure inconsistency of motion
     // vectors obtained from the first pass in the current block
-    for (r = mb_row; r < mb_row_end ; r++) {
+    for (r = mb_row; r < mb_row_end; r++) {
       for (c = mb_col; c < mb_col_end; c++) {
         const int mb_index = r * cm->mb_cols + c;
 
@@ -2165,14 +2068,14 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
 
   // PARTITION_NONE
   if (partition_none_allowed) {
-    rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
-                     &this_rdc, bsize, ctx, best_rdc.rdcost);
+    rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, ctx,
+                     best_rdc.rdcost);
     if (this_rdc.rate != INT_MAX) {
       if (bsize >= BLOCK_8X8) {
         pl = partition_plane_context(xd, mi_row, mi_col, bsize);
         this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
-        this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
-                                 this_rdc.rate, this_rdc.dist);
+        this_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
       }
 
       if (this_rdc.rdcost < best_rdc.rdcost) {
@@ -2180,12 +2083,11 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
         int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
 
         best_rdc = this_rdc;
-        if (bsize >= BLOCK_8X8)
-          pc_tree->partitioning = PARTITION_NONE;
+        if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
 
         // Adjust dist breakout threshold according to the partition size.
-        dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
-            b_height_log2_lookup[bsize]);
+        dist_breakout_thr >>=
+            8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
 
         rate_breakout_thr *= num_pels_log2_lookup[bsize];
 
@@ -2196,7 +2098,7 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
         // early termination at that speed.
         if (!x->e_mbd.lossless[xd->mi[0]->mbmi.segment_id] &&
             (ctx->skippable && best_rdc.dist < dist_breakout_thr &&
-            best_rdc.rate < rate_breakout_thr)) {
+             best_rdc.rate < rate_breakout_thr)) {
           do_split = 0;
           do_rect = 0;
         }
@@ -2252,8 +2154,7 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
   }
 
   // store estimated motion vector
-  if (cpi->sf.adaptive_motion_search)
-    store_pred_mv(x, ctx);
+  if (cpi->sf.adaptive_motion_search) store_pred_mv(x, ctx);
 
   // PARTITION_SPLIT
   // TODO(jingning): use the motion vectors given by the above search as
@@ -2267,23 +2168,20 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
             ctx->mic.mbmi.interp_filter;
       rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
                        pc_tree->leaf_split[0], best_rdc.rdcost);
-      if (sum_rdc.rate == INT_MAX)
-        sum_rdc.rdcost = INT64_MAX;
+      if (sum_rdc.rate == INT_MAX) sum_rdc.rdcost = INT64_MAX;
     } else {
       for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
-      const int x_idx = (i & 1) * mi_step;
-      const int y_idx = (i >> 1) * mi_step;
+        const int x_idx = (i & 1) * mi_step;
+        const int y_idx = (i >> 1) * mi_step;
 
         if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
           continue;
 
-        if (cpi->sf.adaptive_motion_search)
-          load_pred_mv(x, ctx);
+        if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
 
         pc_tree->split[i]->index = i;
-        rd_pick_partition(cpi, td, tile_data, tp,
-                          mi_row + y_idx, mi_col + x_idx,
-                          subsize, &this_rdc,
+        rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
+                          mi_col + x_idx, subsize, &this_rdc,
                           best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
 
         if (this_rdc.rate == INT_MAX) {
@@ -2300,8 +2198,7 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
     if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
       pl = partition_plane_context(xd, mi_row, mi_col, bsize);
       sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
-      sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
-                              sum_rdc.rate, sum_rdc.dist);
+      sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
 
       if (sum_rdc.rdcost < best_rdc.rdcost) {
         best_rdc = sum_rdc;
@@ -2310,8 +2207,7 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
     } else {
       // skip rectangular partition test when larger block size
       // gives better rd cost
-      if (cpi->sf.less_rectangular_check)
-        do_rect &= !partition_none_allowed;
+      if (cpi->sf.less_rectangular_check) do_rect &= !partition_none_allowed;
     }
     restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
   }
@@ -2319,13 +2215,11 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
   // PARTITION_HORZ
   if (partition_horz_allowed &&
       (do_rect || vp10_active_h_edge(cpi, mi_row, mi_step))) {
-      subsize = get_subsize(bsize, PARTITION_HORZ);
-    if (cpi->sf.adaptive_motion_search)
-      load_pred_mv(x, ctx);
+    subsize = get_subsize(bsize, PARTITION_HORZ);
+    if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
     if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
         partition_none_allowed)
-      pc_tree->horizontal[0].pred_interp_filter =
-          ctx->mic.mbmi.interp_filter;
+      pc_tree->horizontal[0].pred_interp_filter = ctx->mic.mbmi.interp_filter;
     rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
                      &pc_tree->horizontal[0], best_rdc.rdcost);
 
@@ -2335,14 +2229,12 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
       update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
       encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
 
-      if (cpi->sf.adaptive_motion_search)
-        load_pred_mv(x, ctx);
+      if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
       if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
           partition_none_allowed)
-        pc_tree->horizontal[1].pred_interp_filter =
-            ctx->mic.mbmi.interp_filter;
-      rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col,
-                       &this_rdc, subsize, &pc_tree->horizontal[1],
+        pc_tree->horizontal[1].pred_interp_filter = ctx->mic.mbmi.interp_filter;
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc,
+                       subsize, &pc_tree->horizontal[1],
                        best_rdc.rdcost - sum_rdc.rdcost);
       if (this_rdc.rate == INT_MAX) {
         sum_rdc.rdcost = INT64_MAX;
@@ -2367,14 +2259,12 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
   // PARTITION_VERT
   if (partition_vert_allowed &&
       (do_rect || vp10_active_v_edge(cpi, mi_col, mi_step))) {
-      subsize = get_subsize(bsize, PARTITION_VERT);
+    subsize = get_subsize(bsize, PARTITION_VERT);
 
-    if (cpi->sf.adaptive_motion_search)
-      load_pred_mv(x, ctx);
+    if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
     if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
         partition_none_allowed)
-      pc_tree->vertical[0].pred_interp_filter =
-          ctx->mic.mbmi.interp_filter;
+      pc_tree->vertical[0].pred_interp_filter = ctx->mic.mbmi.interp_filter;
     rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
                      &pc_tree->vertical[0], best_rdc.rdcost);
     if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
@@ -2383,15 +2273,13 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
       encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
                         &pc_tree->vertical[0]);
 
-      if (cpi->sf.adaptive_motion_search)
-        load_pred_mv(x, ctx);
+      if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
       if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
           partition_none_allowed)
-        pc_tree->vertical[1].pred_interp_filter =
-            ctx->mic.mbmi.interp_filter;
-      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step,
-                       &this_rdc, subsize,
-                       &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
+        pc_tree->vertical[1].pred_interp_filter = ctx->mic.mbmi.interp_filter;
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc,
+                       subsize, &pc_tree->vertical[1],
+                       best_rdc.rdcost - sum_rdc.rdcost);
       if (this_rdc.rate == INT_MAX) {
         sum_rdc.rdcost = INT64_MAX;
       } else {
@@ -2404,8 +2292,7 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
     if (sum_rdc.rdcost < best_rdc.rdcost) {
       pl = partition_plane_context(xd, mi_row, mi_col, bsize);
       sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
-      sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
-                              sum_rdc.rate, sum_rdc.dist);
+      sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
       if (sum_rdc.rdcost < best_rdc.rdcost) {
         best_rdc = sum_rdc;
         pc_tree->partitioning = PARTITION_VERT;
@@ -2418,15 +2305,14 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
   // warning related to the fact that best_rd isn't used after this
   // point.  This code should be refactored so that the duplicate
   // checks occur in some sub function and thus are used...
-  (void) best_rd;
+  (void)best_rd;
   *rd_cost = best_rdc;
 
-
   if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
       pc_tree->index != 3) {
     int output_enabled = (bsize == BLOCK_64X64);
-    encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
-              bsize, pc_tree);
+    encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
+              pc_tree);
   }
 
   if (bsize == BLOCK_64X64) {
@@ -2438,10 +2324,8 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
   }
 }
 
-static void encode_rd_sb_row(VP10_COMP *cpi,
-                             ThreadData *td,
-                             TileDataEnc *tile_data,
-                             int mi_row,
+static void encode_rd_sb_row(VP10_COMP *cpi, ThreadData *td,
+                             TileDataEnc *tile_data, int mi_row,
                              TOKENEXTRA **tp) {
   VP10_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
@@ -2468,8 +2352,7 @@ static void encode_rd_sb_row(VP10_COMP *cpi,
     MODE_INFO **mi = cm->mi_grid_visible + idx_str;
 
     if (sf->adaptive_pred_interp_filter) {
-      for (i = 0; i < 64; ++i)
-        td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
+      for (i = 0; i < 64; ++i) td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
 
       for (i = 0; i < 64; ++i) {
         td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
@@ -2483,8 +2366,8 @@ static void encode_rd_sb_row(VP10_COMP *cpi,
     td->pc_root->index = 0;
 
     if (seg->enabled) {
-      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
-                                                 : cm->last_frame_seg_map;
+      const uint8_t *const map =
+          seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
       int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
       seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
     }
@@ -2495,27 +2378,26 @@ static void encode_rd_sb_row(VP10_COMP *cpi,
           seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
       set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
       set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
-      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
-                       BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
+      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
+                       &dummy_rate, &dummy_dist, 1, td->pc_root);
     } else if (cpi->partition_search_skippable_frame) {
       BLOCK_SIZE bsize;
       set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
       bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
       set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
-      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
-                       BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
+      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
+                       &dummy_rate, &dummy_dist, 1, td->pc_root);
     } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
                cm->frame_type != KEY_FRAME) {
       choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
-      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
-                       BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
+      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
+                       &dummy_rate, &dummy_dist, 1, td->pc_root);
     } else {
       // If required set upper and lower partition size limits
       if (sf->auto_min_max_partition_size) {
         set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
         rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
-                                &x->min_partition_size,
-                                &x->max_partition_size);
+                                &x->min_partition_size, &x->max_partition_size);
       }
       rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
                         &dummy_rdc, INT64_MAX, td->pc_root);
@@ -2537,8 +2419,7 @@ static void init_encode_frame_mb_context(VP10_COMP *cpi) {
   // Note: this memset assumes above_context[0], [1] and [2]
   // are allocated as part of the same buffer.
   memset(xd->above_context[0], 0,
-         sizeof(*xd->above_context[0]) *
-         2 * aligned_mi_cols * MAX_MB_PLANE);
+         sizeof(*xd->above_context[0]) * 2 * aligned_mi_cols * MAX_MB_PLANE);
   memset(xd->above_seg_context, 0,
          sizeof(*xd->above_seg_context) * aligned_mi_cols);
 }
@@ -2549,8 +2430,8 @@ static int check_dual_ref_flags(VP10_COMP *cpi) {
   if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
     return 0;
   } else {
-    return (!!(ref_flags & VPX_GOLD_FLAG) + !!(ref_flags & VPX_LAST_FLAG)
-        + !!(ref_flags & VPX_ALT_FLAG)) >= 2;
+    return (!!(ref_flags & VPX_GOLD_FLAG) + !!(ref_flags & VPX_LAST_FLAG) +
+            !!(ref_flags & VPX_ALT_FLAG)) >= 2;
   }
 }
 
@@ -2579,11 +2460,10 @@ static MV_REFERENCE_FRAME get_frame_type(const VP10_COMP *cpi) {
 }
 
 static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) {
-  if (xd->lossless[0])
-    return ONLY_4X4;
+  if (xd->lossless[0]) return ONLY_4X4;
   if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
     return ALLOW_32X32;
-  else if (cpi->sf.tx_size_search_method == USE_FULL_RD||
+  else if (cpi->sf.tx_size_search_method == USE_FULL_RD ||
            cpi->sf.tx_size_search_method == USE_TX_8X8)
     return TX_MODE_SELECT;
   else
@@ -2599,10 +2479,9 @@ void vp10_init_tile_data(VP10_COMP *cpi) {
   int tile_tok = 0;
 
   if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
-    if (cpi->tile_data != NULL)
-      vpx_free(cpi->tile_data);
-    CHECK_MEM_ERROR(cm, cpi->tile_data,
-        vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
+    if (cpi->tile_data != NULL) vpx_free(cpi->tile_data);
+    CHECK_MEM_ERROR(cm, cpi->tile_data, vpx_malloc(tile_cols * tile_rows *
+                                                   sizeof(*cpi->tile_data)));
     cpi->allocated_tiles = tile_cols * tile_rows;
 
     for (tile_row = 0; tile_row < tile_rows; ++tile_row)
@@ -2632,13 +2511,12 @@ void vp10_init_tile_data(VP10_COMP *cpi) {
   }
 }
 
-void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td,
-                     int tile_row, int tile_col) {
+void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td, int tile_row,
+                      int tile_col) {
   VP10_COMMON *const cm = &cpi->common;
   const int tile_cols = 1 << cm->log2_tile_cols;
-  TileDataEnc *this_tile =
-      &cpi->tile_data[tile_row * tile_cols + tile_col];
-  const TileInfo * const tile_info = &this_tile->tile_info;
+  TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
+  const TileInfo *const tile_info = &this_tile->tile_info;
   TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
   int mi_row;
 
@@ -2653,7 +2531,7 @@ void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td,
   cpi->tok_count[tile_row][tile_col] =
       (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
   assert(tok - cpi->tile_tok[tile_row][tile_col] <=
-      allocated_tokens(*tile_info));
+         allocated_tokens(*tile_info));
 }
 
 static void encode_tiles(VP10_COMP *cpi) {
@@ -2673,10 +2551,9 @@ static void encode_tiles(VP10_COMP *cpi) {
 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
                             VP10_COMMON *cm, uint8_t **this_frame_mb_stats) {
   uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
-      cm->current_video_frame * cm->MBs * sizeof(uint8_t);
+                         cm->current_video_frame * cm->MBs * sizeof(uint8_t);
 
-  if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
-    return EOF;
+  if (mb_stats_in > firstpass_mb_stats->mb_stats_end) return EOF;
 
   *this_frame_mb_stats = mb_stats_in;
 
@@ -2703,17 +2580,14 @@ static void encode_frame_internal(VP10_COMP *cpi) {
   rdc->ex_search_count = 0;  // Exhaustive mesh search hits.
 
   for (i = 0; i < MAX_SEGMENTS; ++i) {
-    const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled ?
-                       vp10_get_qindex(&cm->seg, i, cm->base_qindex) :
-                       cm->base_qindex;
-    xd->lossless[i] = qindex == 0 &&
-                      cm->y_dc_delta_q == 0 &&
-                      cm->uv_dc_delta_q == 0 &&
-                      cm->uv_ac_delta_q == 0;
+    const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled
+                           ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+                           : cm->base_qindex;
+    xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
+                      cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
   }
 
-  if (!cm->seg.enabled && xd->lossless[0])
-    x->optimize = 0;
+  if (!cm->seg.enabled && xd->lossless[0]) x->optimize = 0;
 
   cm->tx_mode = select_tx_mode(cpi, xd);
 
@@ -2722,15 +2596,13 @@ static void encode_frame_internal(VP10_COMP *cpi) {
   vp10_initialize_rd_consts(cpi);
   vp10_initialize_me_consts(cpi, x, cm->base_qindex);
   init_encode_frame_mb_context(cpi);
-  cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
-                           cm->width == cm->last_width &&
-                           cm->height == cm->last_height &&
-                           !cm->intra_only &&
-                           cm->last_show_frame;
+  cm->use_prev_frame_mvs =
+      !cm->error_resilient_mode && cm->width == cm->last_width &&
+      cm->height == cm->last_height && !cm->intra_only && cm->last_show_frame;
   // Special case: set prev_mi to NULL when the previous mode info
   // context cannot be used.
-  cm->prev_mi = cm->use_prev_frame_mvs ?
-                cm->prev_mip + cm->mi_stride + 1 : NULL;
+  cm->prev_mi =
+      cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL;
 
   x->quant_fp = cpi->sf.use_quant_fp;
   vp10_zero(x->skip_txfm);
@@ -2740,10 +2612,10 @@ static void encode_frame_internal(VP10_COMP *cpi) {
     vpx_usec_timer_start(&emr_timer);
 
 #if CONFIG_FP_MB_STATS
-  if (cpi->use_fp_mb_stats) {
-    input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
-                     &cpi->twopass.this_frame_mb_stats);
-  }
+    if (cpi->use_fp_mb_stats) {
+      input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
+                       &cpi->twopass.this_frame_mb_stats);
+    }
 #endif
 
     // If allowed, encoding tiles in parallel with one thread handling one tile.
@@ -2764,8 +2636,7 @@ static void encode_frame_internal(VP10_COMP *cpi) {
 
 static INTERP_FILTER get_interp_filter(
     const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
-  if (!is_alt_ref &&
-      threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
+  if (!is_alt_ref && threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
       threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
       threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
     return EIGHTTAP_SMOOTH;
@@ -2790,9 +2661,9 @@ void vp10_encode_frame(VP10_COMP *cpi) {
   // the other two.
   if (!frame_is_intra_only(cm)) {
     if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
-             cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
+         cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
         (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
-             cm->ref_frame_sign_bias[LAST_FRAME])) {
+         cm->ref_frame_sign_bias[LAST_FRAME])) {
       cpi->allow_comp_inter_inter = 0;
     } else {
       cpi->allow_comp_inter_inter = 1;
@@ -2826,10 +2697,8 @@ void vp10_encode_frame(VP10_COMP *cpi) {
     if (is_alt_ref || !cpi->allow_comp_inter_inter)
       cm->reference_mode = SINGLE_REFERENCE;
     else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
-             mode_thrs[COMPOUND_REFERENCE] >
-                 mode_thrs[REFERENCE_MODE_SELECT] &&
-             check_dual_ref_flags(cpi) &&
-             cpi->static_mb_pct == 100)
+             mode_thrs[COMPOUND_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT] &&
+             check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
       cm->reference_mode = COMPOUND_REFERENCE;
     else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
       cm->reference_mode = SINGLE_REFERENCE;
@@ -2941,18 +2810,17 @@ static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi,
   ++counts->uv_mode[y_mode][uv_mode];
 }
 
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td,
-                              TOKENEXTRA **t, int output_enabled,
-                              int mi_row, int mi_col, BLOCK_SIZE bsize,
-                              PICK_MODE_CONTEXT *ctx) {
+static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+                              int output_enabled, int mi_row, int mi_col,
+                              BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
   VP10_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   MODE_INFO **mi_8x8 = xd->mi;
   MODE_INFO *mi = mi_8x8[0];
   MB_MODE_INFO *mbmi = &mi->mbmi;
-  const int seg_skip = segfeature_active(&cm->seg, mbmi->segment_id,
-                                         SEG_LVL_SKIP);
+  const int seg_skip =
+      segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
   const int mis = cm->mi_stride;
   const int mi_width = num_8x8_blocks_wide_lookup[bsize];
   const int mi_height = num_8x8_blocks_high_lookup[bsize];
@@ -2962,8 +2830,7 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td,
                    cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
                    cpi->sf.allow_skip_recode;
 
-  if (!x->skip_recode)
-    memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+  if (!x->skip_recode) memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
 
   x->skip_optimize = ctx->is_coded;
   ctx->is_coded = 1;
@@ -2983,11 +2850,10 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td,
     const int is_compound = has_second_ref(mbmi);
     set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
     for (ref = 0; ref < 1 + is_compound; ++ref) {
-      YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
-                                                     mbmi->ref_frame[ref]);
+      YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
       assert(cfg != NULL);
       vp10_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
-                           &xd->block_refs[ref]->sf);
+                            &xd->block_refs[ref]->sf);
     }
     if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
       vp10_build_inter_predictors_sby(xd, mi_row, mi_col,
@@ -3001,8 +2867,7 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td,
   }
 
   if (output_enabled) {
-    if (cm->tx_mode == TX_MODE_SELECT &&
-        mbmi->sb_type >= BLOCK_8X8  &&
+    if (cm->tx_mode == TX_MODE_SELECT && mbmi->sb_type >= BLOCK_8X8 &&
         !(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) {
       ++get_tx_counts(max_txsize_lookup[bsize], get_tx_size_context(xd),
                       &td->counts->tx)[mbmi->tx_size];
@@ -3024,15 +2889,14 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td,
     }
     ++td->counts->tx.tx_totals[mbmi->tx_size];
     ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])];
-    if (mbmi->tx_size < TX_32X32 &&
-        cm->base_qindex > 0 && !mbmi->skip &&
+    if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
         !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
       if (is_inter_block(mbmi)) {
         ++td->counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
       } else {
-        ++td->counts->intra_ext_tx[mbmi->tx_size]
-                                  [intra_mode_to_tx_type_context[mbmi->mode]]
-                                  [mbmi->tx_type];
+        ++td->counts
+              ->intra_ext_tx[mbmi->tx_size][intra_mode_to_tx_type_context
+                                                [mbmi->mode]][mbmi->tx_type];
       }
     }
   }
diff --git a/vp10/encoder/encodeframe.h b/vp10/encoder/encodeframe.h
index fbb81f8b171322abc688e5982a5f9195e0239b09..f24a572a5872e698540b138a95168410b0dee3c5 100644
--- a/vp10/encoder/encodeframe.h
+++ b/vp10/encoder/encodeframe.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_ENCODEFRAME_H_
 #define VP10_ENCODER_ENCODEFRAME_H_
 
@@ -31,14 +30,14 @@ struct ThreadData;
 #define VAR_HIST_SMALL_CUT_OFF 45
 
 void vp10_setup_src_planes(struct macroblock *x,
-                          const struct yv12_buffer_config *src,
-                          int mi_row, int mi_col);
+                           const struct yv12_buffer_config *src, int mi_row,
+                           int mi_col);
 
 void vp10_encode_frame(struct VP10_COMP *cpi);
 
 void vp10_init_tile_data(struct VP10_COMP *cpi);
 void vp10_encode_tile(struct VP10_COMP *cpi, struct ThreadData *td,
-                     int tile_row, int tile_col);
+                      int tile_row, int tile_col);
 
 void vp10_set_variance_partition_thresholds(struct VP10_COMP *cpi, int q);
 
diff --git a/vp10/encoder/encodemb.c b/vp10/encoder/encodemb.c
index 0a23d35293248b49804a33d85c8df685a115f86a..b2fbf13a9d824ee5ea3510e8754055142aabe4b4 100644
--- a/vp10/encoder/encodemb.c
+++ b/vp10/encoder/encodemb.c
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #include "./vp10_rtcd.h"
 #include "./vpx_config.h"
 #include "./vpx_dsp_rtcd.h"
@@ -53,32 +52,30 @@ void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
 #define RDTRUNC(RM, DM, R, D) ((128 + (R) * (RM)) & 0xFF)
 
 typedef struct vp10_token_state {
-  int           rate;
-  int           error;
-  int           next;
-  int16_t       token;
-  short         qc;
+  int rate;
+  int error;
+  int next;
+  int16_t token;
+  short qc;
 } vp10_token_state;
 
 // TODO(jimbankoski): experiment to find optimal RD numbers.
 static const int plane_rd_mult[PLANE_TYPES] = { 4, 2 };
 
-#define UPDATE_RD_COST()\
-{\
-  rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);\
-  rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);\
-  if (rd_cost0 == rd_cost1) {\
-    rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);\
-    rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);\
-  }\
-}
+#define UPDATE_RD_COST()                                \
+  {                                                     \
+    rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);    \
+    rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);    \
+    if (rd_cost0 == rd_cost1) {                         \
+      rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0); \
+      rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1); \
+    }                                                   \
+  }
 
 // This function is a place holder for now but may ultimately need
 // to scan previous tokens to work out the correct context.
-static int trellis_get_coeff_context(const int16_t *scan,
-                                     const int16_t *nb,
-                                     int idx, int token,
-                                     uint8_t *token_cache) {
+static int trellis_get_coeff_context(const int16_t *scan, const int16_t *nb,
+                                     int idx, int token, uint8_t *token_cache) {
   int bak = token_cache[scan[idx]], pt;
   token_cache[scan[idx]] = vp10_pt_energy_class[token];
   pt = get_coef_context(nb, token_cache, idx + 1);
@@ -86,8 +83,8 @@ static int trellis_get_coeff_context(const int16_t *scan,
   return pt;
 }
 
-static int optimize_b(MACROBLOCK *mb, int plane, int block,
-                      TX_SIZE tx_size, int ctx) {
+static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+                      int ctx) {
   MACROBLOCKD *const xd = &mb->e_mbd;
   struct macroblock_plane *const p = &mb->plane[plane];
   struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -125,8 +122,7 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block,
   assert(eob <= default_eob);
 
   /* Now set up a Viterbi trellis to evaluate alternative roundings. */
-  if (!ref)
-    rdmult = (rdmult * 9) >> 4;
+  if (!ref) rdmult = (rdmult * 9) >> 4;
 
   /* Initialize the sentinel node of the trellis. */
   tokens[eob][0].rate = 0;
@@ -157,10 +153,12 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block,
       if (next < default_eob) {
         band = band_translate[i + 1];
         pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
-        rate0 += mb->token_costs[tx_size][type][ref][band][0][pt]
-                                [tokens[next][0].token];
-        rate1 += mb->token_costs[tx_size][type][ref][band][0][pt]
-                                [tokens[next][1].token];
+        rate0 +=
+            mb->token_costs[tx_size][type][ref][band][0][pt][tokens[next][0]
+                                                                 .token];
+        rate1 +=
+            mb->token_costs[tx_size][type][ref][band][0][pt][tokens[next][1]
+                                                                 .token];
       }
       UPDATE_RD_COST();
       /* And pick the best. */
@@ -185,8 +183,8 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block,
       rate1 = tokens[next][1].rate;
 
       if ((abs(x) * dequant_ptr[rc != 0] > abs(coeff[rc]) * mul) &&
-          (abs(x) * dequant_ptr[rc != 0] < abs(coeff[rc]) * mul +
-                                               dequant_ptr[rc != 0]))
+          (abs(x) * dequant_ptr[rc != 0] <
+           abs(coeff[rc]) * mul + dequant_ptr[rc != 0]))
         shortcut = 1;
       else
         shortcut = 0;
@@ -212,13 +210,15 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block,
         band = band_translate[i + 1];
         if (t0 != EOB_TOKEN) {
           pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
-          rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt]
-                                  [tokens[next][0].token];
+          rate0 +=
+              mb->token_costs[tx_size][type][ref][band][!x][pt][tokens[next][0]
+                                                                    .token];
         }
         if (t1 != EOB_TOKEN) {
           pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache);
-          rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt]
-                                  [tokens[next][1].token];
+          rate1 +=
+              mb->token_costs[tx_size][type][ref][band][!x][pt][tokens[next][1]
+                                                                    .token];
         }
       }
 
@@ -304,9 +304,8 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block,
   return final_eob;
 }
 
-static INLINE void fdct32x32(int rd_transform,
-                             const int16_t *src, tran_low_t *dst,
-                             int src_stride) {
+static INLINE void fdct32x32(int rd_transform, const int16_t *src,
+                             tran_low_t *dst, int src_stride) {
   if (rd_transform)
     vpx_fdct32x32_rd(src, dst, src_stride);
   else
@@ -329,17 +328,11 @@ void vp10_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
     vp10_fwht4x4(src_diff, coeff, diff_stride);
   } else {
     switch (tx_type) {
-      case DCT_DCT:
-        vpx_fdct4x4(src_diff, coeff, diff_stride);
-        break;
+      case DCT_DCT: vpx_fdct4x4(src_diff, coeff, diff_stride); break;
       case ADST_DCT:
       case DCT_ADST:
-      case ADST_ADST:
-        vp10_fht4x4(src_diff, coeff, diff_stride, tx_type);
-        break;
-      default:
-        assert(0);
-        break;
+      case ADST_ADST: vp10_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
+      default: assert(0); break;
     }
   }
 }
@@ -350,12 +343,8 @@ static void fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
     case DCT_DCT:
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST:
-      vp10_fht8x8(src_diff, coeff, diff_stride, tx_type);
-      break;
-    default:
-      assert(0);
-      break;
+    case ADST_ADST: vp10_fht8x8(src_diff, coeff, diff_stride, tx_type); break;
+    default: assert(0); break;
   }
 }
 
@@ -365,12 +354,8 @@ static void fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff,
     case DCT_DCT:
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST:
-      vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
-      break;
-    default:
-      assert(0);
-      break;
+    case ADST_ADST: vp10_fht16x16(src_diff, coeff, diff_stride, tx_type); break;
+    default: assert(0); break;
   }
 }
 
@@ -378,17 +363,11 @@ static void fwd_txfm_32x32(int rd_transform, const int16_t *src_diff,
                            tran_low_t *coeff, int diff_stride,
                            TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT:
-      fdct32x32(rd_transform, src_diff, coeff, diff_stride);
-      break;
+    case DCT_DCT: fdct32x32(rd_transform, src_diff, coeff, diff_stride); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST:
-      assert(0);
-      break;
-    default:
-      assert(0);
-      break;
+    case ADST_ADST: assert(0); break;
+    default: assert(0); break;
   }
 }
 
@@ -400,52 +379,40 @@ void vp10_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
     vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
   } else {
     switch (tx_type) {
-      case DCT_DCT:
-        vpx_highbd_fdct4x4(src_diff, coeff, diff_stride);
-        break;
+      case DCT_DCT: vpx_highbd_fdct4x4(src_diff, coeff, diff_stride); break;
       case ADST_DCT:
       case DCT_ADST:
       case ADST_ADST:
         vp10_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
         break;
-      default:
-        assert(0);
-        break;
+      default: assert(0); break;
     }
   }
 }
 
 static void highbd_fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
-                         int diff_stride, TX_TYPE tx_type) {
+                                int diff_stride, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT:
-      vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
-      break;
+    case DCT_DCT: vpx_highbd_fdct8x8(src_diff, coeff, diff_stride); break;
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
       vp10_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
 
 static void highbd_fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff,
-                           int diff_stride, TX_TYPE tx_type) {
+                                  int diff_stride, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT:
-      vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
-      break;
+    case DCT_DCT: vpx_highbd_fdct16x16(src_diff, coeff, diff_stride); break;
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
       vp10_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
 
@@ -458,19 +425,14 @@ static void highbd_fwd_txfm_32x32(int rd_transform, const int16_t *src_diff,
       break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST:
-      assert(0);
-      break;
-    default:
-      assert(0);
-      break;
+    case ADST_ADST: assert(0); break;
+    default: assert(0); break;
   }
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block,
-                         int blk_row, int blk_col,
-                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
+void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
+                         int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -491,24 +453,23 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block,
       case TX_32X32:
         highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
         vp10_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
-                                     p->round_fp, p->quant_fp, p->quant_shift,
-                                     qcoeff, dqcoeff, pd->dequant,
-                                     eob, scan_order->scan,
-                                     scan_order->iscan);
+                                      p->round_fp, p->quant_fp, p->quant_shift,
+                                      qcoeff, dqcoeff, pd->dequant, eob,
+                                      scan_order->scan, scan_order->iscan);
         break;
       case TX_16X16:
         vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
         vp10_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
-                               p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
-                               pd->dequant, eob,
-                               scan_order->scan, scan_order->iscan);
+                                p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+                                pd->dequant, eob, scan_order->scan,
+                                scan_order->iscan);
         break;
       case TX_8X8:
         vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
         vp10_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
-                               p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
-                               pd->dequant, eob,
-                               scan_order->scan, scan_order->iscan);
+                                p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+                                pd->dequant, eob, scan_order->scan,
+                                scan_order->iscan);
         break;
       case TX_4X4:
         if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -517,12 +478,11 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block,
           vpx_highbd_fdct4x4(src_diff, coeff, diff_stride);
         }
         vp10_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
-                               p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
-                               pd->dequant, eob,
-                               scan_order->scan, scan_order->iscan);
+                                p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+                                pd->dequant, eob, scan_order->scan,
+                                scan_order->iscan);
         break;
-      default:
-        assert(0);
+      default: assert(0);
     }
     return;
   }
@@ -532,23 +492,21 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block,
     case TX_32X32:
       fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
       vp10_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
-                            p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
-                            pd->dequant, eob, scan_order->scan,
-                            scan_order->iscan);
+                             p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+                             pd->dequant, eob, scan_order->scan,
+                             scan_order->iscan);
       break;
     case TX_16X16:
       vpx_fdct16x16(src_diff, coeff, diff_stride);
       vp10_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
-                      p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
-                      pd->dequant, eob,
-                      scan_order->scan, scan_order->iscan);
+                       p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+                       pd->dequant, eob, scan_order->scan, scan_order->iscan);
       break;
     case TX_8X8:
-      vp10_fdct8x8_quant(src_diff, diff_stride, coeff, 64,
-                        x->skip_block, p->zbin, p->round_fp,
-                        p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
-                        pd->dequant, eob,
-                        scan_order->scan, scan_order->iscan);
+      vp10_fdct8x8_quant(src_diff, diff_stride, coeff, 64, x->skip_block,
+                         p->zbin, p->round_fp, p->quant_fp, p->quant_shift,
+                         qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
+                         scan_order->iscan);
       break;
     case TX_4X4:
       if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -557,19 +515,15 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block,
         vpx_fdct4x4(src_diff, coeff, diff_stride);
       }
       vp10_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
-                      p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
-                      pd->dequant, eob,
-                      scan_order->scan, scan_order->iscan);
-      break;
-    default:
-      assert(0);
+                       p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+                       pd->dequant, eob, scan_order->scan, scan_order->iscan);
       break;
+    default: assert(0); break;
   }
 }
 
-void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block,
-                         int blk_row, int blk_col,
-                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
+void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
+                         int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -593,14 +547,14 @@ void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block,
       case TX_16X16:
         vpx_highbd_fdct16x16_1(src_diff, coeff, diff_stride);
         vpx_highbd_quantize_dc(coeff, 256, x->skip_block, p->round,
-                               p->quant_fp[0], qcoeff, dqcoeff,
-                               pd->dequant[0], eob);
+                               p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
+                               eob);
         break;
       case TX_8X8:
         vpx_highbd_fdct8x8_1(src_diff, coeff, diff_stride);
         vpx_highbd_quantize_dc(coeff, 64, x->skip_block, p->round,
-                               p->quant_fp[0], qcoeff, dqcoeff,
-                               pd->dequant[0], eob);
+                               p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
+                               eob);
         break;
       case TX_4X4:
         if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -609,11 +563,10 @@ void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block,
           vpx_highbd_fdct4x4(src_diff, coeff, diff_stride);
         }
         vpx_highbd_quantize_dc(coeff, 16, x->skip_block, p->round,
-                               p->quant_fp[0], qcoeff, dqcoeff,
-                               pd->dequant[0], eob);
+                               p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
+                               eob);
         break;
-      default:
-        assert(0);
+      default: assert(0);
     }
     return;
   }
@@ -622,21 +575,18 @@ void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block,
   switch (tx_size) {
     case TX_32X32:
       vpx_fdct32x32_1(src_diff, coeff, diff_stride);
-      vpx_quantize_dc_32x32(coeff, x->skip_block, p->round,
-                            p->quant_fp[0], qcoeff, dqcoeff,
-                            pd->dequant[0], eob);
+      vpx_quantize_dc_32x32(coeff, x->skip_block, p->round, p->quant_fp[0],
+                            qcoeff, dqcoeff, pd->dequant[0], eob);
       break;
     case TX_16X16:
       vpx_fdct16x16_1(src_diff, coeff, diff_stride);
-      vpx_quantize_dc(coeff, 256, x->skip_block, p->round,
-                     p->quant_fp[0], qcoeff, dqcoeff,
-                     pd->dequant[0], eob);
+      vpx_quantize_dc(coeff, 256, x->skip_block, p->round, p->quant_fp[0],
+                      qcoeff, dqcoeff, pd->dequant[0], eob);
       break;
     case TX_8X8:
       vpx_fdct8x8_1(src_diff, coeff, diff_stride);
-      vpx_quantize_dc(coeff, 64, x->skip_block, p->round,
-                      p->quant_fp[0], qcoeff, dqcoeff,
-                      pd->dequant[0], eob);
+      vpx_quantize_dc(coeff, 64, x->skip_block, p->round, p->quant_fp[0],
+                      qcoeff, dqcoeff, pd->dequant[0], eob);
       break;
     case TX_4X4:
       if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -644,21 +594,15 @@ void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block,
       } else {
         vpx_fdct4x4(src_diff, coeff, diff_stride);
       }
-      vpx_quantize_dc(coeff, 16, x->skip_block, p->round,
-                      p->quant_fp[0], qcoeff, dqcoeff,
-                      pd->dequant[0], eob);
-      break;
-    default:
-      assert(0);
+      vpx_quantize_dc(coeff, 16, x->skip_block, p->round, p->quant_fp[0],
+                      qcoeff, dqcoeff, pd->dequant[0], eob);
       break;
+    default: assert(0); break;
   }
 }
 
-
-
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block,
-                      int blk_row, int blk_col,
-                      BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
+void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+                      int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -675,39 +619,38 @@ void vp10_xform_quant(MACROBLOCK *x, int plane, int block,
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-     switch (tx_size) {
+    switch (tx_size) {
       case TX_32X32:
         highbd_fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride,
-                         tx_type);
+                              tx_type);
         vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
                                     p->round, p->quant, p->quant_shift, qcoeff,
-                                    dqcoeff, pd->dequant, eob,
-                                    scan_order->scan, scan_order->iscan);
+                                    dqcoeff, pd->dequant, eob, scan_order->scan,
+                                    scan_order->iscan);
         break;
       case TX_16X16:
         highbd_fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
         vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
                               p->quant, p->quant_shift, qcoeff, dqcoeff,
-                              pd->dequant, eob,
-                              scan_order->scan, scan_order->iscan);
+                              pd->dequant, eob, scan_order->scan,
+                              scan_order->iscan);
         break;
       case TX_8X8:
         highbd_fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
         vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
                               p->quant, p->quant_shift, qcoeff, dqcoeff,
-                              pd->dequant, eob,
-                              scan_order->scan, scan_order->iscan);
+                              pd->dequant, eob, scan_order->scan,
+                              scan_order->iscan);
         break;
       case TX_4X4:
         vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
                                  xd->lossless[xd->mi[0]->mbmi.segment_id]);
         vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
                               p->quant, p->quant_shift, qcoeff, dqcoeff,
-                              pd->dequant, eob,
-                              scan_order->scan, scan_order->iscan);
+                              pd->dequant, eob, scan_order->scan,
+                              scan_order->iscan);
         break;
-      default:
-        assert(0);
+      default: assert(0);
     }
     return;
   }
@@ -723,35 +666,29 @@ void vp10_xform_quant(MACROBLOCK *x, int plane, int block,
       break;
     case TX_16X16:
       fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
-      vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
-                     p->quant, p->quant_shift, qcoeff, dqcoeff,
-                     pd->dequant, eob,
+      vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant,
+                     p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
                      scan_order->scan, scan_order->iscan);
       break;
     case TX_8X8:
       fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
-      vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
-                     p->quant, p->quant_shift, qcoeff, dqcoeff,
-                     pd->dequant, eob,
+      vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
+                     p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
                      scan_order->scan, scan_order->iscan);
       break;
     case TX_4X4:
       vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
                         xd->lossless[xd->mi[0]->mbmi.segment_id]);
-      vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
-                     p->quant, p->quant_shift, qcoeff, dqcoeff,
-                     pd->dequant, eob,
+      vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
+                     p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
                      scan_order->scan, scan_order->iscan);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
 
 static void encode_block(int plane, int block, int blk_row, int blk_col,
-                         BLOCK_SIZE plane_bsize,
-                         TX_SIZE tx_size, void *arg) {
+                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
   struct encode_b_args *const args = arg;
   MACROBLOCK *const x = args->x;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -783,20 +720,20 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
         *a = *l = 0;
         return;
       } else {
-        vp10_xform_quant_fp(x, plane, block, blk_row, blk_col,
-                            plane_bsize, tx_size);
+        vp10_xform_quant_fp(x, plane, block, blk_row, blk_col, plane_bsize,
+                            tx_size);
       }
     } else {
       if (max_txsize_lookup[plane_bsize] == tx_size) {
         int txfm_blk_index = (plane << 2) + (block >> (tx_size << 1));
         if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_NONE) {
           // full forward transform and quantization
-          vp10_xform_quant(x, plane, block, blk_row, blk_col,
-                           plane_bsize, tx_size);
+          vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
+                           tx_size);
         } else if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_AC_ONLY) {
           // fast path forward transform and quantization
-          vp10_xform_quant_dc(x, plane, block, blk_row, blk_col,
-                              plane_bsize, tx_size);
+          vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
+                              tx_size);
         } else {
           // skip forward transform
           p->eobs[block] = 0;
@@ -804,8 +741,8 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
           return;
         }
       } else {
-        vp10_xform_quant(x, plane, block, blk_row, blk_col,
-                         plane_bsize, tx_size);
+        vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
+                         tx_size);
       }
     }
   }
@@ -817,11 +754,9 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
     *a = *l = p->eobs[block] > 0;
   }
 
-  if (p->eobs[block])
-    *(args->skip) = 0;
+  if (p->eobs[block]) *(args->skip) = 0;
 
-  if (p->eobs[block] == 0)
-    return;
+  if (p->eobs[block] == 0) return;
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     switch (tx_size) {
@@ -845,9 +780,7 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
                                      p->eobs[block], xd->bd, tx_type,
                                      xd->lossless[xd->mi[0]->mbmi.segment_id]);
         break;
-      default:
-        assert(0 && "Invalid transform size");
-        break;
+      default: assert(0 && "Invalid transform size"); break;
     }
 
     return;
@@ -874,15 +807,13 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
       vp10_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride, p->eobs[block],
                             tx_type, xd->lossless[xd->mi[0]->mbmi.segment_id]);
       break;
-    default:
-      assert(0 && "Invalid transform size");
-      break;
+    default: assert(0 && "Invalid transform size"); break;
   }
 }
 
 static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
-                               BLOCK_SIZE plane_bsize,
-                               TX_SIZE tx_size, void *arg) {
+                               BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                               void *arg) {
   MACROBLOCK *const x = (MACROBLOCK *)arg;
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblock_plane *const p = &x->plane[plane];
@@ -897,11 +828,11 @@ static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
 #if CONFIG_VPX_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       if (xd->lossless[0]) {
-        vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride,
-                                p->eobs[block], xd->bd);
+        vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+                                xd->bd);
       } else {
-        vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride,
-                                p->eobs[block], xd->bd);
+        vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+                                xd->bd);
       }
       return;
     }
@@ -917,41 +848,39 @@ static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
 void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
   vp10_subtract_plane(x, bsize, 0);
   vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
-                                         encode_block_pass1, x);
+                                          encode_block_pass1, x);
 }
 
 void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
   MACROBLOCKD *const xd = &x->e_mbd;
   struct optimize_ctx ctx;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-  struct encode_b_args arg = {x, &ctx, &mbmi->skip};
+  struct encode_b_args arg = { x, &ctx, &mbmi->skip };
   int plane;
 
   mbmi->skip = 1;
 
-  if (x->skip)
-    return;
+  if (x->skip) return;
 
   for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
-    if (!x->skip_recode)
-      vp10_subtract_plane(x, bsize, plane);
+    if (!x->skip_recode) vp10_subtract_plane(x, bsize, plane);
 
     if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
-      const struct macroblockd_plane* const pd = &xd->plane[plane];
+      const struct macroblockd_plane *const pd = &xd->plane[plane];
       const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
-      vp10_get_entropy_contexts(bsize, tx_size, pd,
-                               ctx.ta[plane], ctx.tl[plane]);
+      vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane],
+                                ctx.tl[plane]);
     }
 
     vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
-                                           &arg);
+                                            &arg);
   }
 }
 
 void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
-                             BLOCK_SIZE plane_bsize,
-                             TX_SIZE tx_size, void *arg) {
-  struct encode_b_args* const args = arg;
+                             BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                             void *arg) {
+  struct encode_b_args *const args = arg;
   MACROBLOCK *const x = args->x;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -977,16 +906,16 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
   src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
 
   mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
-  vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride,
-                          dst, dst_stride, blk_col, blk_row, plane);
+  vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
+                           dst_stride, blk_col, blk_row, plane);
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     switch (tx_size) {
       case TX_32X32:
         if (!x->skip_recode) {
-          vpx_highbd_subtract_block(32, 32, src_diff, diff_stride,
-                                    src, src_stride, dst, dst_stride, xd->bd);
+          vpx_highbd_subtract_block(32, 32, src_diff, diff_stride, src,
+                                    src_stride, dst, dst_stride, xd->bd);
           highbd_fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff,
                                 diff_stride, tx_type);
           vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
@@ -1000,13 +929,13 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
         break;
       case TX_16X16:
         if (!x->skip_recode) {
-          vpx_highbd_subtract_block(16, 16, src_diff, diff_stride,
-                                    src, src_stride, dst, dst_stride, xd->bd);
+          vpx_highbd_subtract_block(16, 16, src_diff, diff_stride, src,
+                                    src_stride, dst, dst_stride, xd->bd);
           highbd_fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
           vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
                                 p->quant, p->quant_shift, qcoeff, dqcoeff,
-                                pd->dequant, eob,
-                                scan_order->scan, scan_order->iscan);
+                                pd->dequant, eob, scan_order->scan,
+                                scan_order->iscan);
         }
         if (*eob)
           vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, xd->bd,
@@ -1014,13 +943,13 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
         break;
       case TX_8X8:
         if (!x->skip_recode) {
-          vpx_highbd_subtract_block(8, 8, src_diff, diff_stride,
-                                    src, src_stride, dst, dst_stride, xd->bd);
+          vpx_highbd_subtract_block(8, 8, src_diff, diff_stride, src,
+                                    src_stride, dst, dst_stride, xd->bd);
           highbd_fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
           vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
                                 p->quant, p->quant_shift, qcoeff, dqcoeff,
-                                pd->dequant, eob,
-                                scan_order->scan, scan_order->iscan);
+                                pd->dequant, eob, scan_order->scan,
+                                scan_order->iscan);
         }
         if (*eob)
           vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, xd->bd,
@@ -1028,14 +957,14 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
         break;
       case TX_4X4:
         if (!x->skip_recode) {
-          vpx_highbd_subtract_block(4, 4, src_diff, diff_stride,
-                                    src, src_stride, dst, dst_stride, xd->bd);
+          vpx_highbd_subtract_block(4, 4, src_diff, diff_stride, src,
+                                    src_stride, dst, dst_stride, xd->bd);
           vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
                                    xd->lossless[mbmi->segment_id]);
           vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
                                 p->quant, p->quant_shift, qcoeff, dqcoeff,
-                                pd->dequant, eob,
-                                scan_order->scan, scan_order->iscan);
+                                pd->dequant, eob, scan_order->scan,
+                                scan_order->iscan);
         }
 
         if (*eob)
@@ -1045,12 +974,9 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
           vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, xd->bd,
                                        tx_type, xd->lossless[mbmi->segment_id]);
         break;
-      default:
-        assert(0);
-        return;
+      default: assert(0); return;
     }
-    if (*eob)
-      *(args->skip) = 0;
+    if (*eob) *(args->skip) = 0;
     return;
   }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
@@ -1058,8 +984,8 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
   switch (tx_size) {
     case TX_32X32:
       if (!x->skip_recode) {
-        vpx_subtract_block(32, 32, src_diff, diff_stride,
-                           src, src_stride, dst, dst_stride);
+        vpx_subtract_block(32, 32, src_diff, diff_stride, src, src_stride, dst,
+                           dst_stride);
         fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride,
                        tx_type);
         vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
@@ -1072,40 +998,36 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
       break;
     case TX_16X16:
       if (!x->skip_recode) {
-        vpx_subtract_block(16, 16, src_diff, diff_stride,
-                           src, src_stride, dst, dst_stride);
+        vpx_subtract_block(16, 16, src_diff, diff_stride, src, src_stride, dst,
+                           dst_stride);
         fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
-        vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
-                       p->quant, p->quant_shift, qcoeff, dqcoeff,
-                       pd->dequant, eob, scan_order->scan,
-                       scan_order->iscan);
+        vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant,
+                       p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
+                       scan_order->scan, scan_order->iscan);
       }
       if (*eob)
         vp10_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, tx_type);
       break;
     case TX_8X8:
       if (!x->skip_recode) {
-        vpx_subtract_block(8, 8, src_diff, diff_stride,
-                           src, src_stride, dst, dst_stride);
+        vpx_subtract_block(8, 8, src_diff, diff_stride, src, src_stride, dst,
+                           dst_stride);
         fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
         vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
-                       p->quant_shift, qcoeff, dqcoeff,
-                       pd->dequant, eob, scan_order->scan,
-                       scan_order->iscan);
+                       p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
+                       scan_order->scan, scan_order->iscan);
       }
-      if (*eob)
-        vp10_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
+      if (*eob) vp10_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
       break;
     case TX_4X4:
       if (!x->skip_recode) {
-        vpx_subtract_block(4, 4, src_diff, diff_stride,
-                           src, src_stride, dst, dst_stride);
+        vpx_subtract_block(4, 4, src_diff, diff_stride, src, src_stride, dst,
+                           dst_stride);
         vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
                           xd->lossless[mbmi->segment_id]);
         vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
-                       p->quant_shift, qcoeff, dqcoeff,
-                       pd->dequant, eob, scan_order->scan,
-                       scan_order->iscan);
+                       p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
+                       scan_order->scan, scan_order->iscan);
       }
 
       if (*eob) {
@@ -1116,17 +1038,14 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
                               xd->lossless[mbmi->segment_id]);
       }
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
-  if (*eob)
-    *(args->skip) = 0;
+  if (*eob) *(args->skip) = 0;
 }
 
 void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   const MACROBLOCKD *const xd = &x->e_mbd;
-  struct encode_b_args arg = {x, NULL, &xd->mi[0]->mbmi.skip};
+  struct encode_b_args arg = { x, NULL, &xd->mi[0]->mbmi.skip };
 
   vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
                                           vp10_encode_block_intra, &arg);
diff --git a/vp10/encoder/encodemb.h b/vp10/encoder/encodemb.h
index b3e6f632e5b0ddcd0b0caadda9d2eb6743113251..4d1c826ce8279c159e7e6f2e7b98d200a2d3f39f 100644
--- a/vp10/encoder/encodemb.h
+++ b/vp10/encoder/encodemb.h
@@ -25,21 +25,18 @@ struct encode_b_args {
 };
 void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
 void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
-void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block,
-                         int blk_row, int blk_col,
-                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
-void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block,
-                         int blk_row, int blk_col,
-                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block,
-                      int blk_row, int blk_col,
-                      BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
+void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
+                         int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
+void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
+                         int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
+void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+                      int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
 
 void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
 
 void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
-                             BLOCK_SIZE plane_bsize,
-                             TX_SIZE tx_size, void *arg);
+                             BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                             void *arg);
 
 void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
 
diff --git a/vp10/encoder/encodemv.c b/vp10/encoder/encodemv.c
index 0736c65b3fad8a7ab21b2bf2de45386a6763f7b5..ef670f5249423d94eb447df92f3e848643344064 100644
--- a/vp10/encoder/encodemv.c
+++ b/vp10/encoder/encodemv.c
@@ -31,15 +31,15 @@ void vp10_entropy_mv_init(void) {
   vp10_tokens_from_tree(mv_fp_encodings, vp10_mv_fp_tree);
 }
 
-static void encode_mv_component(vpx_writer* w, int comp,
-                                const nmv_component* mvcomp, int usehp) {
+static void encode_mv_component(vpx_writer *w, int comp,
+                                const nmv_component *mvcomp, int usehp) {
   int offset;
   const int sign = comp < 0;
   const int mag = sign ? -comp : comp;
   const int mv_class = vp10_get_mv_class(mag - 1, &offset);
-  const int d = offset >> 3;                // int mv data
-  const int fr = (offset >> 1) & 3;         // fractional mv data
-  const int hp = offset & 1;                // high precision mv data
+  const int d = offset >> 3;         // int mv data
+  const int fr = (offset >> 1) & 3;  // fractional mv data
+  const int hp = offset & 1;         // high precision mv data
 
   assert(comp != 0);
 
@@ -48,33 +48,30 @@ static void encode_mv_component(vpx_writer* w, int comp,
 
   // Class
   vp10_write_token(w, vp10_mv_class_tree, mvcomp->classes,
-                  &mv_class_encodings[mv_class]);
+                   &mv_class_encodings[mv_class]);
 
   // Integer bits
   if (mv_class == MV_CLASS_0) {
     vp10_write_token(w, vp10_mv_class0_tree, mvcomp->class0,
-                    &mv_class0_encodings[d]);
+                     &mv_class0_encodings[d]);
   } else {
     int i;
     const int n = mv_class + CLASS0_BITS - 1;  // number of bits
-    for (i = 0; i < n; ++i)
-      vpx_write(w, (d >> i) & 1, mvcomp->bits[i]);
+    for (i = 0; i < n; ++i) vpx_write(w, (d >> i) & 1, mvcomp->bits[i]);
   }
 
   // Fractional bits
   vp10_write_token(w, vp10_mv_fp_tree,
-                  mv_class == MV_CLASS_0 ?  mvcomp->class0_fp[d] : mvcomp->fp,
-                  &mv_fp_encodings[fr]);
+                   mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
+                   &mv_fp_encodings[fr]);
 
   // High precision bit
   if (usehp)
-    vpx_write(w, hp,
-              mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
+    vpx_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
 }
 
-
 static void build_nmv_component_cost_table(int *mvcost,
-                                           const nmv_component* const mvcomp,
+                                           const nmv_component *const mvcomp,
                                            int usehp) {
   int i, v;
   int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
@@ -107,16 +104,15 @@ static void build_nmv_component_cost_table(int *mvcost,
     z = v - 1;
     c = vp10_get_mv_class(z, &o);
     cost += class_cost[c];
-    d = (o >> 3);               /* int mv data */
-    f = (o >> 1) & 3;           /* fractional pel mv data */
-    e = (o & 1);                /* high precision mv data */
+    d = (o >> 3);     /* int mv data */
+    f = (o >> 1) & 3; /* fractional pel mv data */
+    e = (o & 1);      /* high precision mv data */
     if (c == MV_CLASS_0) {
       cost += class0_cost[d];
     } else {
       int i, b;
-      b = c + CLASS0_BITS - 1;  /* number of bits */
-      for (i = 0; i < b; ++i)
-        cost += bits_cost[i][((d >> i) & 1)];
+      b = c + CLASS0_BITS - 1; /* number of bits */
+      for (i = 0; i < b; ++i) cost += bits_cost[i][((d >> i) & 1)];
     }
     if (c == MV_CLASS_0) {
       cost += class0_fp_cost[d][f];
@@ -138,7 +134,7 @@ static void build_nmv_component_cost_table(int *mvcost,
 static void update_mv(vpx_writer *w, const unsigned int ct[2], vpx_prob *cur_p,
                       vpx_prob upd_p) {
 #if CONFIG_MISC_FIXES
-  (void) upd_p;
+  (void)upd_p;
   vp10_cond_prob_diff_update(w, cur_p, ct);
 #else
   const vpx_prob new_p = get_binary_prob(ct[0], ct[1]) | 1;
@@ -154,8 +150,8 @@ static void update_mv(vpx_writer *w, const unsigned int ct[2], vpx_prob *cur_p,
 
 static void write_mv_update(const vpx_tree_index *tree,
                             vpx_prob probs[/*n - 1*/],
-                            const unsigned int counts[/*n - 1*/],
-                            int n, vpx_writer *w) {
+                            const unsigned int counts[/*n - 1*/], int n,
+                            vpx_writer *w) {
   int i;
   unsigned int branch_ct[32][2];
 
@@ -168,11 +164,12 @@ static void write_mv_update(const vpx_tree_index *tree,
 }
 
 void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
-                         nmv_context_counts *const counts) {
+                          nmv_context_counts *const counts) {
   int i, j;
   nmv_context *const mvc = &cm->fc->nmvc;
 
-  write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w);
+  write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
+                  w);
 
   for (i = 0; i < 2; ++i) {
     nmv_component *comp = &mvc->comps[i];
@@ -205,15 +202,14 @@ void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
   }
 }
 
-void vp10_encode_mv(VP10_COMP* cpi, vpx_writer* w,
-                   const MV* mv, const MV* ref,
-                   const nmv_context* mvctx, int usehp) {
-  const MV diff = {mv->row - ref->row,
-                   mv->col - ref->col};
+void vp10_encode_mv(VP10_COMP *cpi, vpx_writer *w, const MV *mv, const MV *ref,
+                    const nmv_context *mvctx, int usehp) {
+  const MV diff = { mv->row - ref->row, mv->col - ref->col };
   const MV_JOINT_TYPE j = vp10_get_mv_joint(&diff);
   usehp = usehp && vp10_use_mv_hp(ref);
 
-  vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
+  vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints,
+                   &mv_joint_encodings[j]);
   if (mv_joint_vertical(j))
     encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
 
@@ -229,21 +225,20 @@ void vp10_encode_mv(VP10_COMP* cpi, vpx_writer* w,
 }
 
 void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
-                              const nmv_context* ctx, int usehp) {
+                               const nmv_context *ctx, int usehp) {
   vp10_cost_tokens(mvjoint, ctx->joints, vp10_mv_joint_tree);
   build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp);
   build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
 }
 
 static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
-                    const int_mv mvs[2],
-                    nmv_context_counts *counts) {
+                    const int_mv mvs[2], nmv_context_counts *counts) {
   int i;
 
   for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
-    const MV diff = {mvs[i].as_mv.row - ref->row,
-                     mvs[i].as_mv.col - ref->col};
+    const MV diff = { mvs[i].as_mv.row - ref->row,
+                      mvs[i].as_mv.col - ref->col };
     vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
   }
 }
@@ -267,8 +262,6 @@ void vp10_update_mv_count(ThreadData *td) {
       }
     }
   } else {
-    if (mbmi->mode == NEWMV)
-      inc_mvs(mbmi, mbmi_ext, mbmi->mv, &td->counts->mv);
+    if (mbmi->mode == NEWMV) inc_mvs(mbmi, mbmi_ext, mbmi->mv, &td->counts->mv);
   }
 }
-
diff --git a/vp10/encoder/encodemv.h b/vp10/encoder/encodemv.h
index 006f6d7c71ccd1c65368fd9418a539bf75d59e5d..6187488388e71e6ebbf1aa32937ea21cdf3ebe1b 100644
--- a/vp10/encoder/encodemv.h
+++ b/vp10/encoder/encodemv.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_ENCODEMV_H_
 #define VP10_ENCODER_ENCODEMV_H_
 
@@ -21,13 +20,13 @@ extern "C" {
 void vp10_entropy_mv_init(void);
 
 void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
-                         nmv_context_counts *const counts);
+                          nmv_context_counts *const counts);
 
-void vp10_encode_mv(VP10_COMP *cpi, vpx_writer* w, const MV* mv, const MV* ref,
-                   const nmv_context* mvctx, int usehp);
+void vp10_encode_mv(VP10_COMP *cpi, vpx_writer *w, const MV *mv, const MV *ref,
+                    const nmv_context *mvctx, int usehp);
 
 void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
-                              const nmv_context* mvctx, int usehp);
+                               const nmv_context *mvctx, int usehp);
 
 void vp10_update_mv_count(ThreadData *td);
 
diff --git a/vp10/encoder/encoder.c b/vp10/encoder/encoder.c
index 91c7e43efd1d1bd991e4234fbfb962e9f77e6f31..af68409b9d37b98085a34cdd4af1d148601a21a5 100644
--- a/vp10/encoder/encoder.c
+++ b/vp10/encoder/encoder.c
@@ -58,14 +58,14 @@
 #define AM_SEGMENT_ID_INACTIVE 7
 #define AM_SEGMENT_ID_ACTIVE 0
 
-#define SHARP_FILTER_QTHRESH 0          /* Q threshold for 8-tap sharp filter */
-
-#define ALTREF_HIGH_PRECISION_MV 1      // Whether to use high precision mv
-                                         //  for altref computation.
-#define HIGH_PRECISION_MV_QTHRESH 200   // Q threshold for high precision
-                                         // mv. Choose a very high value for
-                                         // now so that HIGH_PRECISION is always
-                                         // chosen.
+#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */
+
+#define ALTREF_HIGH_PRECISION_MV 1     // Whether to use high precision mv
+                                       //  for altref computation.
+#define HIGH_PRECISION_MV_QTHRESH 200  // Q threshold for high precision
+                                       // mv. Choose a very high value for
+                                       // now so that HIGH_PRECISION is always
+                                       // chosen.
 // #define OUTPUT_YUV_REC
 
 #ifdef OUTPUT_YUV_DENOISED
@@ -97,15 +97,15 @@ static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
     case THREEFIVE:
       *hr = 3;
       *hs = 5;
-    break;
+      break;
     case ONETWO:
       *hr = 1;
       *hs = 2;
-    break;
+      break;
     default:
       *hr = 1;
       *hs = 1;
-       assert(0);
+      assert(0);
       break;
   }
 }
@@ -143,8 +143,8 @@ static void apply_active_map(VP10_COMP *cpi) {
       vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
       // Setting the data to -MAX_LOOP_FILTER will result in the computed loop
       // filter level being zero regardless of the value of seg->abs_delta.
-      vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE,
-                      SEG_LVL_ALT_LF, -MAX_LOOP_FILTER);
+      vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
+                       -MAX_LOOP_FILTER);
     } else {
       vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
       vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
@@ -157,10 +157,8 @@ static void apply_active_map(VP10_COMP *cpi) {
   }
 }
 
-int vp10_set_active_map(VP10_COMP* cpi,
-                       unsigned char* new_map_16x16,
-                       int rows,
-                       int cols) {
+int vp10_set_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+                        int cols) {
   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
     unsigned char *const active_map_8x8 = cpi->active_map.map;
     const int mi_rows = cpi->common.mi_rows;
@@ -186,13 +184,11 @@ int vp10_set_active_map(VP10_COMP* cpi,
   }
 }
 
-int vp10_get_active_map(VP10_COMP* cpi,
-                       unsigned char* new_map_16x16,
-                       int rows,
-                       int cols) {
+int vp10_get_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+                        int cols) {
   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
       new_map_16x16) {
-    unsigned char* const seg_map_8x8 = cpi->segmentation_map;
+    unsigned char *const seg_map_8x8 = cpi->segmentation_map;
     const int mi_rows = cpi->common.mi_rows;
     const int mi_cols = cpi->common.mi_cols;
     memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
@@ -268,19 +264,16 @@ static void vp10_enc_setup_mi(VP10_COMMON *cm) {
 
 static int vp10_enc_alloc_mi(VP10_COMMON *cm, int mi_size) {
   cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
-  if (!cm->mip)
-    return 1;
+  if (!cm->mip) return 1;
   cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip));
-  if (!cm->prev_mip)
-    return 1;
+  if (!cm->prev_mip) return 1;
   cm->mi_alloc_size = mi_size;
 
-  cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
-  if (!cm->mi_grid_base)
-    return 1;
-  cm->prev_mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
-  if (!cm->prev_mi_grid_base)
-    return 1;
+  cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+  if (!cm->mi_grid_base) return 1;
+  cm->prev_mi_grid_base =
+      (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+  if (!cm->prev_mi_grid_base) return 1;
 
   return 0;
 }
@@ -399,7 +392,7 @@ static void save_coding_context(VP10_COMP *cpi) {
   // restored with a call to vp10_restore_coding_context. These functions are
   // intended for use in a re-code loop in vp10_compress_frame where the
   // quantizer value is adjusted between loop iterations.
-  vp10_copy(cc->nmvjointcost,  cpi->td.mb.nmvjointcost);
+  vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
 
   memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
          MV_VALS * sizeof(*cpi->nmvcosts[0]));
@@ -414,8 +407,8 @@ static void save_coding_context(VP10_COMP *cpi) {
   vp10_copy(cc->segment_pred_probs, cm->segp.pred_probs);
 #endif
 
-  memcpy(cpi->coding_context.last_frame_seg_map_copy,
-         cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
+  memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
+         (cm->mi_rows * cm->mi_cols));
 
   vp10_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
   vp10_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
@@ -442,8 +435,7 @@ static void restore_coding_context(VP10_COMP *cpi) {
   vp10_copy(cm->segp.pred_probs, cc->segment_pred_probs);
 #endif
 
-  memcpy(cm->last_frame_seg_map,
-         cpi->coding_context.last_frame_seg_map_copy,
+  memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
          (cm->mi_rows * cm->mi_cols));
 
   vp10_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
@@ -495,8 +487,8 @@ static void configure_static_seg_features(VP10_COMP *cpi) {
       seg->update_map = 1;
       seg->update_data = 1;
 
-      qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875,
-                                    cm->bit_depth);
+      qi_delta =
+          vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
       vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
       vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
 
@@ -518,7 +510,7 @@ static void configure_static_seg_features(VP10_COMP *cpi) {
         seg->abs_delta = SEGMENT_DELTADATA;
 
         qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
-                                      cm->bit_depth);
+                                       cm->bit_depth);
         vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
         vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
 
@@ -598,18 +590,17 @@ static void alloc_raw_frame_buffers(VP10_COMP *cpi) {
 
   if (!cpi->lookahead)
     cpi->lookahead = vp10_lookahead_init(oxcf->width, oxcf->height,
-                                        cm->subsampling_x, cm->subsampling_y,
+                                         cm->subsampling_x, cm->subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
-                                      cm->use_highbitdepth,
+                                         cm->use_highbitdepth,
 #endif
-                                      oxcf->lag_in_frames);
+                                         oxcf->lag_in_frames);
   if (!cpi->lookahead)
     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
                        "Failed to allocate lag buffers");
 
   // TODO(agrange) Check if ARF is enabled and skip allocation if not.
-  if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer,
-                               oxcf->width, oxcf->height,
+  if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
                                cm->subsampling_x, cm->subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                                cm->use_highbitdepth,
@@ -622,8 +613,7 @@ static void alloc_raw_frame_buffers(VP10_COMP *cpi) {
 
 static void alloc_util_frame_buffers(VP10_COMP *cpi) {
   VP10_COMMON *const cm = &cpi->common;
-  if (vpx_realloc_frame_buffer(&cpi->last_frame_uf,
-                               cm->width, cm->height,
+  if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                                cm->use_highbitdepth,
@@ -633,8 +623,7 @@ static void alloc_util_frame_buffers(VP10_COMP *cpi) {
     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
                        "Failed to allocate last frame buffer");
 
-  if (vpx_realloc_frame_buffer(&cpi->scaled_source,
-                               cm->width, cm->height,
+  if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                                cm->use_highbitdepth,
@@ -644,8 +633,7 @@ static void alloc_util_frame_buffers(VP10_COMP *cpi) {
     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
                        "Failed to allocate scaled source buffer");
 
-  if (vpx_realloc_frame_buffer(&cpi->scaled_last_source,
-                               cm->width, cm->height,
+  if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                                cm->use_highbitdepth,
@@ -656,14 +644,12 @@ static void alloc_util_frame_buffers(VP10_COMP *cpi) {
                        "Failed to allocate scaled last source buffer");
 }
 
-
 static int alloc_context_buffers_ext(VP10_COMP *cpi) {
   VP10_COMMON *cm = &cpi->common;
   int mi_size = cm->mi_cols * cm->mi_rows;
 
   cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
-  if (!cpi->mbmi_ext_base)
-    return 1;
+  if (!cpi->mbmi_ext_base) return 1;
 
   return 0;
 }
@@ -680,7 +666,7 @@ void vp10_alloc_compressor_data(VP10_COMP *cpi) {
   {
     unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
     CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0],
-        vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
+                    vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
   }
 
   vp10_setup_pc_tree(&cpi->common, &cpi->td);
@@ -697,8 +683,8 @@ static void set_tile_limits(VP10_COMP *cpi) {
   int min_log2_tile_cols, max_log2_tile_cols;
   vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
 
-  cm->log2_tile_cols = clamp(cpi->oxcf.tile_columns,
-                             min_log2_tile_cols, max_log2_tile_cols);
+  cm->log2_tile_cols =
+      clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
   cm->log2_tile_rows = cpi->oxcf.tile_rows;
 }
 
@@ -759,154 +745,122 @@ static void set_rc_buffer_sizes(RATE_CONTROL *rc,
   const int64_t maximum = oxcf->maximum_buffer_size_ms;
 
   rc->starting_buffer_level = starting * bandwidth / 1000;
-  rc->optimal_buffer_level = (optimal == 0) ? bandwidth / 8
-                                            : optimal * bandwidth / 1000;
-  rc->maximum_buffer_size = (maximum == 0) ? bandwidth / 8
-                                           : maximum * bandwidth / 1000;
+  rc->optimal_buffer_level =
+      (optimal == 0) ? bandwidth / 8 : optimal * bandwidth / 1000;
+  rc->maximum_buffer_size =
+      (maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
 #define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
-    cpi->fn_ptr[BT].sdf = SDF; \
-    cpi->fn_ptr[BT].sdaf = SDAF; \
-    cpi->fn_ptr[BT].vf = VF; \
-    cpi->fn_ptr[BT].svf = SVF; \
-    cpi->fn_ptr[BT].svaf = SVAF; \
-    cpi->fn_ptr[BT].sdx3f = SDX3F; \
-    cpi->fn_ptr[BT].sdx8f = SDX8F; \
-    cpi->fn_ptr[BT].sdx4df = SDX4DF;
-
-#define MAKE_BFP_SAD_WRAPPER(fnname) \
-static unsigned int fnname##_bits8(const uint8_t *src_ptr, \
-                                   int source_stride, \
-                                   const uint8_t *ref_ptr, \
-                                   int ref_stride) {  \
-  return fnname(src_ptr, source_stride, ref_ptr, ref_stride); \
-} \
-static unsigned int fnname##_bits10(const uint8_t *src_ptr, \
-                                    int source_stride, \
-                                    const uint8_t *ref_ptr, \
-                                    int ref_stride) {  \
-  return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2; \
-} \
-static unsigned int fnname##_bits12(const uint8_t *src_ptr, \
-                                    int source_stride, \
-                                    const uint8_t *ref_ptr, \
-                                    int ref_stride) {  \
-  return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4; \
-}
-
-#define MAKE_BFP_SADAVG_WRAPPER(fnname) static unsigned int \
-fnname##_bits8(const uint8_t *src_ptr, \
-               int source_stride, \
-               const uint8_t *ref_ptr, \
-               int ref_stride, \
-               const uint8_t *second_pred) {  \
-  return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred); \
-} \
-static unsigned int fnname##_bits10(const uint8_t *src_ptr, \
-                                    int source_stride, \
-                                    const uint8_t *ref_ptr, \
-                                    int ref_stride, \
-                                    const uint8_t *second_pred) {  \
-  return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \
-                second_pred) >> 2; \
-} \
-static unsigned int fnname##_bits12(const uint8_t *src_ptr, \
-                                    int source_stride, \
-                                    const uint8_t *ref_ptr, \
-                                    int ref_stride, \
-                                    const uint8_t *second_pred) {  \
-  return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \
-                second_pred) >> 4; \
-}
-
-#define MAKE_BFP_SAD3_WRAPPER(fnname) \
-static void fnname##_bits8(const uint8_t *src_ptr, \
-                           int source_stride, \
-                           const uint8_t *ref_ptr, \
-                           int  ref_stride, \
-                           unsigned int *sad_array) {  \
-  fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-} \
-static void fnname##_bits10(const uint8_t *src_ptr, \
-                            int source_stride, \
-                            const uint8_t *ref_ptr, \
-                            int  ref_stride, \
-                            unsigned int *sad_array) {  \
-  int i; \
-  fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-  for (i = 0; i < 3; i++) \
-    sad_array[i] >>= 2; \
-} \
-static void fnname##_bits12(const uint8_t *src_ptr, \
-                            int source_stride, \
-                            const uint8_t *ref_ptr, \
-                            int  ref_stride, \
-                            unsigned int *sad_array) {  \
-  int i; \
-  fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-  for (i = 0; i < 3; i++) \
-    sad_array[i] >>= 4; \
-}
-
-#define MAKE_BFP_SAD8_WRAPPER(fnname) \
-static void fnname##_bits8(const uint8_t *src_ptr, \
-                           int source_stride, \
-                           const uint8_t *ref_ptr, \
-                           int  ref_stride, \
-                           unsigned int *sad_array) {  \
-  fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-} \
-static void fnname##_bits10(const uint8_t *src_ptr, \
-                            int source_stride, \
-                            const uint8_t *ref_ptr, \
-                            int  ref_stride, \
-                            unsigned int *sad_array) {  \
-  int i; \
-  fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-  for (i = 0; i < 8; i++) \
-    sad_array[i] >>= 2; \
-} \
-static void fnname##_bits12(const uint8_t *src_ptr, \
-                            int source_stride, \
-                            const uint8_t *ref_ptr, \
-                            int  ref_stride, \
-                            unsigned int *sad_array) {  \
-  int i; \
-  fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-  for (i = 0; i < 8; i++) \
-    sad_array[i] >>= 4; \
-}
-#define MAKE_BFP_SAD4D_WRAPPER(fnname) \
-static void fnname##_bits8(const uint8_t *src_ptr, \
-                           int source_stride, \
-                           const uint8_t* const ref_ptr[], \
-                           int  ref_stride, \
-                           unsigned int *sad_array) {  \
-  fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-} \
-static void fnname##_bits10(const uint8_t *src_ptr, \
-                            int source_stride, \
-                            const uint8_t* const ref_ptr[], \
-                            int  ref_stride, \
-                            unsigned int *sad_array) {  \
-  int i; \
-  fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-  for (i = 0; i < 4; i++) \
-  sad_array[i] >>= 2; \
-} \
-static void fnname##_bits12(const uint8_t *src_ptr, \
-                            int source_stride, \
-                            const uint8_t* const ref_ptr[], \
-                            int  ref_stride, \
-                            unsigned int *sad_array) {  \
-  int i; \
-  fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-  for (i = 0; i < 4; i++) \
-  sad_array[i] >>= 4; \
-}
-
+  cpi->fn_ptr[BT].sdf = SDF;                                           \
+  cpi->fn_ptr[BT].sdaf = SDAF;                                         \
+  cpi->fn_ptr[BT].vf = VF;                                             \
+  cpi->fn_ptr[BT].svf = SVF;                                           \
+  cpi->fn_ptr[BT].svaf = SVAF;                                         \
+  cpi->fn_ptr[BT].sdx3f = SDX3F;                                       \
+  cpi->fn_ptr[BT].sdx8f = SDX8F;                                       \
+  cpi->fn_ptr[BT].sdx4df = SDX4DF;
+
+#define MAKE_BFP_SAD_WRAPPER(fnname)                                           \
+  static unsigned int fnname##_bits8(const uint8_t *src_ptr,                   \
+                                     int source_stride,                        \
+                                     const uint8_t *ref_ptr, int ref_stride) { \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride);                \
+  }                                                                            \
+  static unsigned int fnname##_bits10(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride) {                                                        \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2;           \
+  }                                                                            \
+  static unsigned int fnname##_bits12(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride) {                                                        \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4;           \
+  }
+
+#define MAKE_BFP_SADAVG_WRAPPER(fnname)                                        \
+  static unsigned int fnname##_bits8(                                          \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride, const uint8_t *second_pred) {                            \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred);   \
+  }                                                                            \
+  static unsigned int fnname##_bits10(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride, const uint8_t *second_pred) {                            \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
+           2;                                                                  \
+  }                                                                            \
+  static unsigned int fnname##_bits12(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride, const uint8_t *second_pred) {                            \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
+           4;                                                                  \
+  }
+
+#define MAKE_BFP_SAD3_WRAPPER(fnname)                                    \
+  static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,  \
+                             const uint8_t *ref_ptr, int ref_stride,     \
+                             unsigned int *sad_array) {                  \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+  }                                                                      \
+  static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+                              const uint8_t *ref_ptr, int ref_stride,    \
+                              unsigned int *sad_array) {                 \
+    int i;                                                               \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+    for (i = 0; i < 3; i++) sad_array[i] >>= 2;                          \
+  }                                                                      \
+  static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+                              const uint8_t *ref_ptr, int ref_stride,    \
+                              unsigned int *sad_array) {                 \
+    int i;                                                               \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+    for (i = 0; i < 3; i++) sad_array[i] >>= 4;                          \
+  }
+
+#define MAKE_BFP_SAD8_WRAPPER(fnname)                                    \
+  static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,  \
+                             const uint8_t *ref_ptr, int ref_stride,     \
+                             unsigned int *sad_array) {                  \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+  }                                                                      \
+  static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+                              const uint8_t *ref_ptr, int ref_stride,    \
+                              unsigned int *sad_array) {                 \
+    int i;                                                               \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+    for (i = 0; i < 8; i++) sad_array[i] >>= 2;                          \
+  }                                                                      \
+  static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+                              const uint8_t *ref_ptr, int ref_stride,    \
+                              unsigned int *sad_array) {                 \
+    int i;                                                               \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+    for (i = 0; i < 8; i++) sad_array[i] >>= 4;                          \
+  }
+#define MAKE_BFP_SAD4D_WRAPPER(fnname)                                        \
+  static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,       \
+                             const uint8_t *const ref_ptr[], int ref_stride,  \
+                             unsigned int *sad_array) {                       \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
+  }                                                                           \
+  static void fnname##_bits10(const uint8_t *src_ptr, int source_stride,      \
+                              const uint8_t *const ref_ptr[], int ref_stride, \
+                              unsigned int *sad_array) {                      \
+    int i;                                                                    \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
+    for (i = 0; i < 4; i++) sad_array[i] >>= 2;                               \
+  }                                                                           \
+  static void fnname##_bits12(const uint8_t *src_ptr, int source_stride,      \
+                              const uint8_t *const ref_ptr[], int ref_stride, \
+                              unsigned int *sad_array) {                      \
+    int i;                                                                    \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
+    for (i = 0; i < 4; i++) sad_array[i] >>= 4;                               \
+  }
+
+/* clang-format off */
 MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16)
 MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)
 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x16x4d)
@@ -962,410 +916,269 @@ MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x4_avg)
 MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad4x4x3)
 MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x4x8)
 MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d)
+/* clang-format on */
 
-static void  highbd_set_var_fns(VP10_COMP *const cpi) {
+static void highbd_set_var_fns(VP10_COMP *const cpi) {
   VP10_COMMON *const cm = &cpi->common;
   if (cm->use_highbitdepth) {
     switch (cm->bit_depth) {
       case VPX_BITS_8:
-        HIGHBD_BFP(BLOCK_32X16,
-                   vpx_highbd_sad32x16_bits8,
-                   vpx_highbd_sad32x16_avg_bits8,
-                   vpx_highbd_8_variance32x16,
+        HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8,
+                   vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16,
                    vpx_highbd_8_sub_pixel_variance32x16,
-                   vpx_highbd_8_sub_pixel_avg_variance32x16,
-                   NULL,
-                   NULL,
+                   vpx_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL,
                    vpx_highbd_sad32x16x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_16X32,
-                   vpx_highbd_sad16x32_bits8,
-                   vpx_highbd_sad16x32_avg_bits8,
-                   vpx_highbd_8_variance16x32,
+        HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8,
+                   vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32,
                    vpx_highbd_8_sub_pixel_variance16x32,
-                   vpx_highbd_8_sub_pixel_avg_variance16x32,
-                   NULL,
-                   NULL,
+                   vpx_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL,
                    vpx_highbd_sad16x32x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_64X32,
-                   vpx_highbd_sad64x32_bits8,
-                   vpx_highbd_sad64x32_avg_bits8,
-                   vpx_highbd_8_variance64x32,
+        HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8,
+                   vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32,
                    vpx_highbd_8_sub_pixel_variance64x32,
-                   vpx_highbd_8_sub_pixel_avg_variance64x32,
-                   NULL,
-                   NULL,
+                   vpx_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL,
                    vpx_highbd_sad64x32x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_32X64,
-                   vpx_highbd_sad32x64_bits8,
-                   vpx_highbd_sad32x64_avg_bits8,
-                   vpx_highbd_8_variance32x64,
+        HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8,
+                   vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64,
                    vpx_highbd_8_sub_pixel_variance32x64,
-                   vpx_highbd_8_sub_pixel_avg_variance32x64,
-                   NULL,
-                   NULL,
+                   vpx_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL,
                    vpx_highbd_sad32x64x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_32X32,
-                   vpx_highbd_sad32x32_bits8,
-                   vpx_highbd_sad32x32_avg_bits8,
-                   vpx_highbd_8_variance32x32,
+        HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8,
+                   vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32,
                    vpx_highbd_8_sub_pixel_variance32x32,
                    vpx_highbd_8_sub_pixel_avg_variance32x32,
-                   vpx_highbd_sad32x32x3_bits8,
-                   vpx_highbd_sad32x32x8_bits8,
+                   vpx_highbd_sad32x32x3_bits8, vpx_highbd_sad32x32x8_bits8,
                    vpx_highbd_sad32x32x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_64X64,
-                   vpx_highbd_sad64x64_bits8,
-                   vpx_highbd_sad64x64_avg_bits8,
-                   vpx_highbd_8_variance64x64,
+        HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8,
+                   vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64,
                    vpx_highbd_8_sub_pixel_variance64x64,
                    vpx_highbd_8_sub_pixel_avg_variance64x64,
-                   vpx_highbd_sad64x64x3_bits8,
-                   vpx_highbd_sad64x64x8_bits8,
+                   vpx_highbd_sad64x64x3_bits8, vpx_highbd_sad64x64x8_bits8,
                    vpx_highbd_sad64x64x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_16X16,
-                   vpx_highbd_sad16x16_bits8,
-                   vpx_highbd_sad16x16_avg_bits8,
-                   vpx_highbd_8_variance16x16,
+        HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8,
+                   vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16,
                    vpx_highbd_8_sub_pixel_variance16x16,
                    vpx_highbd_8_sub_pixel_avg_variance16x16,
-                   vpx_highbd_sad16x16x3_bits8,
-                   vpx_highbd_sad16x16x8_bits8,
+                   vpx_highbd_sad16x16x3_bits8, vpx_highbd_sad16x16x8_bits8,
                    vpx_highbd_sad16x16x4d_bits8)
 
-        HIGHBD_BFP(BLOCK_16X8,
-                   vpx_highbd_sad16x8_bits8,
-                   vpx_highbd_sad16x8_avg_bits8,
-                   vpx_highbd_8_variance16x8,
-                   vpx_highbd_8_sub_pixel_variance16x8,
-                   vpx_highbd_8_sub_pixel_avg_variance16x8,
-                   vpx_highbd_sad16x8x3_bits8,
-                   vpx_highbd_sad16x8x8_bits8,
-                   vpx_highbd_sad16x8x4d_bits8)
-
-        HIGHBD_BFP(BLOCK_8X16,
-                   vpx_highbd_sad8x16_bits8,
-                   vpx_highbd_sad8x16_avg_bits8,
-                   vpx_highbd_8_variance8x16,
-                   vpx_highbd_8_sub_pixel_variance8x16,
-                   vpx_highbd_8_sub_pixel_avg_variance8x16,
-                   vpx_highbd_sad8x16x3_bits8,
-                   vpx_highbd_sad8x16x8_bits8,
-                   vpx_highbd_sad8x16x4d_bits8)
-
-        HIGHBD_BFP(BLOCK_8X8,
-                   vpx_highbd_sad8x8_bits8,
-                   vpx_highbd_sad8x8_avg_bits8,
-                   vpx_highbd_8_variance8x8,
-                   vpx_highbd_8_sub_pixel_variance8x8,
-                   vpx_highbd_8_sub_pixel_avg_variance8x8,
-                   vpx_highbd_sad8x8x3_bits8,
-                   vpx_highbd_sad8x8x8_bits8,
-                   vpx_highbd_sad8x8x4d_bits8)
-
-        HIGHBD_BFP(BLOCK_8X4,
-                   vpx_highbd_sad8x4_bits8,
-                   vpx_highbd_sad8x4_avg_bits8,
-                   vpx_highbd_8_variance8x4,
+        HIGHBD_BFP(
+            BLOCK_16X8, vpx_highbd_sad16x8_bits8, vpx_highbd_sad16x8_avg_bits8,
+            vpx_highbd_8_variance16x8, vpx_highbd_8_sub_pixel_variance16x8,
+            vpx_highbd_8_sub_pixel_avg_variance16x8, vpx_highbd_sad16x8x3_bits8,
+            vpx_highbd_sad16x8x8_bits8, vpx_highbd_sad16x8x4d_bits8)
+
+        HIGHBD_BFP(
+            BLOCK_8X16, vpx_highbd_sad8x16_bits8, vpx_highbd_sad8x16_avg_bits8,
+            vpx_highbd_8_variance8x16, vpx_highbd_8_sub_pixel_variance8x16,
+            vpx_highbd_8_sub_pixel_avg_variance8x16, vpx_highbd_sad8x16x3_bits8,
+            vpx_highbd_sad8x16x8_bits8, vpx_highbd_sad8x16x4d_bits8)
+
+        HIGHBD_BFP(
+            BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8,
+            vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8,
+            vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits8,
+            vpx_highbd_sad8x8x8_bits8, vpx_highbd_sad8x8x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits8,
+                   vpx_highbd_sad8x4_avg_bits8, vpx_highbd_8_variance8x4,
                    vpx_highbd_8_sub_pixel_variance8x4,
-                   vpx_highbd_8_sub_pixel_avg_variance8x4,
-                   NULL,
-                   vpx_highbd_sad8x4x8_bits8,
-                   vpx_highbd_sad8x4x4d_bits8)
-
-        HIGHBD_BFP(BLOCK_4X8,
-                   vpx_highbd_sad4x8_bits8,
-                   vpx_highbd_sad4x8_avg_bits8,
-                   vpx_highbd_8_variance4x8,
+                   vpx_highbd_8_sub_pixel_avg_variance8x4, NULL,
+                   vpx_highbd_sad8x4x8_bits8, vpx_highbd_sad8x4x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits8,
+                   vpx_highbd_sad4x8_avg_bits8, vpx_highbd_8_variance4x8,
                    vpx_highbd_8_sub_pixel_variance4x8,
-                   vpx_highbd_8_sub_pixel_avg_variance4x8,
-                   NULL,
-                   vpx_highbd_sad4x8x8_bits8,
-                   vpx_highbd_sad4x8x4d_bits8)
-
-        HIGHBD_BFP(BLOCK_4X4,
-                   vpx_highbd_sad4x4_bits8,
-                   vpx_highbd_sad4x4_avg_bits8,
-                   vpx_highbd_8_variance4x4,
-                   vpx_highbd_8_sub_pixel_variance4x4,
-                   vpx_highbd_8_sub_pixel_avg_variance4x4,
-                   vpx_highbd_sad4x4x3_bits8,
-                   vpx_highbd_sad4x4x8_bits8,
-                   vpx_highbd_sad4x4x4d_bits8)
+                   vpx_highbd_8_sub_pixel_avg_variance4x8, NULL,
+                   vpx_highbd_sad4x8x8_bits8, vpx_highbd_sad4x8x4d_bits8)
+
+        HIGHBD_BFP(
+            BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8,
+            vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4,
+            vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits8,
+            vpx_highbd_sad4x4x8_bits8, vpx_highbd_sad4x4x4d_bits8)
         break;
 
       case VPX_BITS_10:
-        HIGHBD_BFP(BLOCK_32X16,
-                   vpx_highbd_sad32x16_bits10,
-                   vpx_highbd_sad32x16_avg_bits10,
-                   vpx_highbd_10_variance32x16,
+        HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10,
+                   vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16,
                    vpx_highbd_10_sub_pixel_variance32x16,
-                   vpx_highbd_10_sub_pixel_avg_variance32x16,
-                   NULL,
-                   NULL,
+                   vpx_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL,
                    vpx_highbd_sad32x16x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_16X32,
-                   vpx_highbd_sad16x32_bits10,
-                   vpx_highbd_sad16x32_avg_bits10,
-                   vpx_highbd_10_variance16x32,
+        HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10,
+                   vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32,
                    vpx_highbd_10_sub_pixel_variance16x32,
-                   vpx_highbd_10_sub_pixel_avg_variance16x32,
-                   NULL,
-                   NULL,
+                   vpx_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL,
                    vpx_highbd_sad16x32x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_64X32,
-                   vpx_highbd_sad64x32_bits10,
-                   vpx_highbd_sad64x32_avg_bits10,
-                   vpx_highbd_10_variance64x32,
+        HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10,
+                   vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32,
                    vpx_highbd_10_sub_pixel_variance64x32,
-                   vpx_highbd_10_sub_pixel_avg_variance64x32,
-                   NULL,
-                   NULL,
+                   vpx_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL,
                    vpx_highbd_sad64x32x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_32X64,
-                   vpx_highbd_sad32x64_bits10,
-                   vpx_highbd_sad32x64_avg_bits10,
-                   vpx_highbd_10_variance32x64,
+        HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10,
+                   vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64,
                    vpx_highbd_10_sub_pixel_variance32x64,
-                   vpx_highbd_10_sub_pixel_avg_variance32x64,
-                   NULL,
-                   NULL,
+                   vpx_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL,
                    vpx_highbd_sad32x64x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_32X32,
-                   vpx_highbd_sad32x32_bits10,
-                   vpx_highbd_sad32x32_avg_bits10,
-                   vpx_highbd_10_variance32x32,
+        HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10,
+                   vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32,
                    vpx_highbd_10_sub_pixel_variance32x32,
                    vpx_highbd_10_sub_pixel_avg_variance32x32,
-                   vpx_highbd_sad32x32x3_bits10,
-                   vpx_highbd_sad32x32x8_bits10,
+                   vpx_highbd_sad32x32x3_bits10, vpx_highbd_sad32x32x8_bits10,
                    vpx_highbd_sad32x32x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_64X64,
-                   vpx_highbd_sad64x64_bits10,
-                   vpx_highbd_sad64x64_avg_bits10,
-                   vpx_highbd_10_variance64x64,
+        HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10,
+                   vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64,
                    vpx_highbd_10_sub_pixel_variance64x64,
                    vpx_highbd_10_sub_pixel_avg_variance64x64,
-                   vpx_highbd_sad64x64x3_bits10,
-                   vpx_highbd_sad64x64x8_bits10,
+                   vpx_highbd_sad64x64x3_bits10, vpx_highbd_sad64x64x8_bits10,
                    vpx_highbd_sad64x64x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_16X16,
-                   vpx_highbd_sad16x16_bits10,
-                   vpx_highbd_sad16x16_avg_bits10,
-                   vpx_highbd_10_variance16x16,
+        HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10,
+                   vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16,
                    vpx_highbd_10_sub_pixel_variance16x16,
                    vpx_highbd_10_sub_pixel_avg_variance16x16,
-                   vpx_highbd_sad16x16x3_bits10,
-                   vpx_highbd_sad16x16x8_bits10,
+                   vpx_highbd_sad16x16x3_bits10, vpx_highbd_sad16x16x8_bits10,
                    vpx_highbd_sad16x16x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_16X8,
-                   vpx_highbd_sad16x8_bits10,
-                   vpx_highbd_sad16x8_avg_bits10,
-                   vpx_highbd_10_variance16x8,
+        HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10,
+                   vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8,
                    vpx_highbd_10_sub_pixel_variance16x8,
                    vpx_highbd_10_sub_pixel_avg_variance16x8,
-                   vpx_highbd_sad16x8x3_bits10,
-                   vpx_highbd_sad16x8x8_bits10,
+                   vpx_highbd_sad16x8x3_bits10, vpx_highbd_sad16x8x8_bits10,
                    vpx_highbd_sad16x8x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_8X16,
-                   vpx_highbd_sad8x16_bits10,
-                   vpx_highbd_sad8x16_avg_bits10,
-                   vpx_highbd_10_variance8x16,
+        HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10,
+                   vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16,
                    vpx_highbd_10_sub_pixel_variance8x16,
                    vpx_highbd_10_sub_pixel_avg_variance8x16,
-                   vpx_highbd_sad8x16x3_bits10,
-                   vpx_highbd_sad8x16x8_bits10,
+                   vpx_highbd_sad8x16x3_bits10, vpx_highbd_sad8x16x8_bits10,
                    vpx_highbd_sad8x16x4d_bits10)
 
-        HIGHBD_BFP(BLOCK_8X8,
-                   vpx_highbd_sad8x8_bits10,
-                   vpx_highbd_sad8x8_avg_bits10,
-                   vpx_highbd_10_variance8x8,
-                   vpx_highbd_10_sub_pixel_variance8x8,
-                   vpx_highbd_10_sub_pixel_avg_variance8x8,
-                   vpx_highbd_sad8x8x3_bits10,
-                   vpx_highbd_sad8x8x8_bits10,
-                   vpx_highbd_sad8x8x4d_bits10)
-
-        HIGHBD_BFP(BLOCK_8X4,
-                   vpx_highbd_sad8x4_bits10,
-                   vpx_highbd_sad8x4_avg_bits10,
-                   vpx_highbd_10_variance8x4,
+        HIGHBD_BFP(
+            BLOCK_8X8, vpx_highbd_sad8x8_bits10, vpx_highbd_sad8x8_avg_bits10,
+            vpx_highbd_10_variance8x8, vpx_highbd_10_sub_pixel_variance8x8,
+            vpx_highbd_10_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits10,
+            vpx_highbd_sad8x8x8_bits10, vpx_highbd_sad8x8x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10,
+                   vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4,
                    vpx_highbd_10_sub_pixel_variance8x4,
-                   vpx_highbd_10_sub_pixel_avg_variance8x4,
-                   NULL,
-                   vpx_highbd_sad8x4x8_bits10,
-                   vpx_highbd_sad8x4x4d_bits10)
-
-        HIGHBD_BFP(BLOCK_4X8,
-                   vpx_highbd_sad4x8_bits10,
-                   vpx_highbd_sad4x8_avg_bits10,
-                   vpx_highbd_10_variance4x8,
+                   vpx_highbd_10_sub_pixel_avg_variance8x4, NULL,
+                   vpx_highbd_sad8x4x8_bits10, vpx_highbd_sad8x4x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10,
+                   vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8,
                    vpx_highbd_10_sub_pixel_variance4x8,
-                   vpx_highbd_10_sub_pixel_avg_variance4x8,
-                   NULL,
-                   vpx_highbd_sad4x8x8_bits10,
-                   vpx_highbd_sad4x8x4d_bits10)
-
-        HIGHBD_BFP(BLOCK_4X4,
-                   vpx_highbd_sad4x4_bits10,
-                   vpx_highbd_sad4x4_avg_bits10,
-                   vpx_highbd_10_variance4x4,
-                   vpx_highbd_10_sub_pixel_variance4x4,
-                   vpx_highbd_10_sub_pixel_avg_variance4x4,
-                   vpx_highbd_sad4x4x3_bits10,
-                   vpx_highbd_sad4x4x8_bits10,
-                   vpx_highbd_sad4x4x4d_bits10)
+                   vpx_highbd_10_sub_pixel_avg_variance4x8, NULL,
+                   vpx_highbd_sad4x8x8_bits10, vpx_highbd_sad4x8x4d_bits10)
+
+        HIGHBD_BFP(
+            BLOCK_4X4, vpx_highbd_sad4x4_bits10, vpx_highbd_sad4x4_avg_bits10,
+            vpx_highbd_10_variance4x4, vpx_highbd_10_sub_pixel_variance4x4,
+            vpx_highbd_10_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits10,
+            vpx_highbd_sad4x4x8_bits10, vpx_highbd_sad4x4x4d_bits10)
         break;
 
       case VPX_BITS_12:
-        HIGHBD_BFP(BLOCK_32X16,
-                   vpx_highbd_sad32x16_bits12,
-                   vpx_highbd_sad32x16_avg_bits12,
-                   vpx_highbd_12_variance32x16,
+        HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12,
+                   vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16,
                    vpx_highbd_12_sub_pixel_variance32x16,
-                   vpx_highbd_12_sub_pixel_avg_variance32x16,
-                   NULL,
-                   NULL,
+                   vpx_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL,
                    vpx_highbd_sad32x16x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_16X32,
-                   vpx_highbd_sad16x32_bits12,
-                   vpx_highbd_sad16x32_avg_bits12,
-                   vpx_highbd_12_variance16x32,
+        HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12,
+                   vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32,
                    vpx_highbd_12_sub_pixel_variance16x32,
-                   vpx_highbd_12_sub_pixel_avg_variance16x32,
-                   NULL,
-                   NULL,
+                   vpx_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL,
                    vpx_highbd_sad16x32x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_64X32,
-                   vpx_highbd_sad64x32_bits12,
-                   vpx_highbd_sad64x32_avg_bits12,
-                   vpx_highbd_12_variance64x32,
+        HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12,
+                   vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32,
                    vpx_highbd_12_sub_pixel_variance64x32,
-                   vpx_highbd_12_sub_pixel_avg_variance64x32,
-                   NULL,
-                   NULL,
+                   vpx_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL,
                    vpx_highbd_sad64x32x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_32X64,
-                   vpx_highbd_sad32x64_bits12,
-                   vpx_highbd_sad32x64_avg_bits12,
-                   vpx_highbd_12_variance32x64,
+        HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12,
+                   vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64,
                    vpx_highbd_12_sub_pixel_variance32x64,
-                   vpx_highbd_12_sub_pixel_avg_variance32x64,
-                   NULL,
-                   NULL,
+                   vpx_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL,
                    vpx_highbd_sad32x64x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_32X32,
-                   vpx_highbd_sad32x32_bits12,
-                   vpx_highbd_sad32x32_avg_bits12,
-                   vpx_highbd_12_variance32x32,
+        HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12,
+                   vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32,
                    vpx_highbd_12_sub_pixel_variance32x32,
                    vpx_highbd_12_sub_pixel_avg_variance32x32,
-                   vpx_highbd_sad32x32x3_bits12,
-                   vpx_highbd_sad32x32x8_bits12,
+                   vpx_highbd_sad32x32x3_bits12, vpx_highbd_sad32x32x8_bits12,
                    vpx_highbd_sad32x32x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_64X64,
-                   vpx_highbd_sad64x64_bits12,
-                   vpx_highbd_sad64x64_avg_bits12,
-                   vpx_highbd_12_variance64x64,
+        HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12,
+                   vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64,
                    vpx_highbd_12_sub_pixel_variance64x64,
                    vpx_highbd_12_sub_pixel_avg_variance64x64,
-                   vpx_highbd_sad64x64x3_bits12,
-                   vpx_highbd_sad64x64x8_bits12,
+                   vpx_highbd_sad64x64x3_bits12, vpx_highbd_sad64x64x8_bits12,
                    vpx_highbd_sad64x64x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_16X16,
-                   vpx_highbd_sad16x16_bits12,
-                   vpx_highbd_sad16x16_avg_bits12,
-                   vpx_highbd_12_variance16x16,
+        HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12,
+                   vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16,
                    vpx_highbd_12_sub_pixel_variance16x16,
                    vpx_highbd_12_sub_pixel_avg_variance16x16,
-                   vpx_highbd_sad16x16x3_bits12,
-                   vpx_highbd_sad16x16x8_bits12,
+                   vpx_highbd_sad16x16x3_bits12, vpx_highbd_sad16x16x8_bits12,
                    vpx_highbd_sad16x16x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_16X8,
-                   vpx_highbd_sad16x8_bits12,
-                   vpx_highbd_sad16x8_avg_bits12,
-                   vpx_highbd_12_variance16x8,
+        HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12,
+                   vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8,
                    vpx_highbd_12_sub_pixel_variance16x8,
                    vpx_highbd_12_sub_pixel_avg_variance16x8,
-                   vpx_highbd_sad16x8x3_bits12,
-                   vpx_highbd_sad16x8x8_bits12,
+                   vpx_highbd_sad16x8x3_bits12, vpx_highbd_sad16x8x8_bits12,
                    vpx_highbd_sad16x8x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_8X16,
-                   vpx_highbd_sad8x16_bits12,
-                   vpx_highbd_sad8x16_avg_bits12,
-                   vpx_highbd_12_variance8x16,
+        HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12,
+                   vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16,
                    vpx_highbd_12_sub_pixel_variance8x16,
                    vpx_highbd_12_sub_pixel_avg_variance8x16,
-                   vpx_highbd_sad8x16x3_bits12,
-                   vpx_highbd_sad8x16x8_bits12,
+                   vpx_highbd_sad8x16x3_bits12, vpx_highbd_sad8x16x8_bits12,
                    vpx_highbd_sad8x16x4d_bits12)
 
-        HIGHBD_BFP(BLOCK_8X8,
-                   vpx_highbd_sad8x8_bits12,
-                   vpx_highbd_sad8x8_avg_bits12,
-                   vpx_highbd_12_variance8x8,
-                   vpx_highbd_12_sub_pixel_variance8x8,
-                   vpx_highbd_12_sub_pixel_avg_variance8x8,
-                   vpx_highbd_sad8x8x3_bits12,
-                   vpx_highbd_sad8x8x8_bits12,
-                   vpx_highbd_sad8x8x4d_bits12)
-
-        HIGHBD_BFP(BLOCK_8X4,
-                   vpx_highbd_sad8x4_bits12,
-                   vpx_highbd_sad8x4_avg_bits12,
-                   vpx_highbd_12_variance8x4,
+        HIGHBD_BFP(
+            BLOCK_8X8, vpx_highbd_sad8x8_bits12, vpx_highbd_sad8x8_avg_bits12,
+            vpx_highbd_12_variance8x8, vpx_highbd_12_sub_pixel_variance8x8,
+            vpx_highbd_12_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits12,
+            vpx_highbd_sad8x8x8_bits12, vpx_highbd_sad8x8x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12,
+                   vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4,
                    vpx_highbd_12_sub_pixel_variance8x4,
-                   vpx_highbd_12_sub_pixel_avg_variance8x4,
-                   NULL,
-                   vpx_highbd_sad8x4x8_bits12,
-                   vpx_highbd_sad8x4x4d_bits12)
-
-        HIGHBD_BFP(BLOCK_4X8,
-                   vpx_highbd_sad4x8_bits12,
-                   vpx_highbd_sad4x8_avg_bits12,
-                   vpx_highbd_12_variance4x8,
+                   vpx_highbd_12_sub_pixel_avg_variance8x4, NULL,
+                   vpx_highbd_sad8x4x8_bits12, vpx_highbd_sad8x4x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12,
+                   vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8,
                    vpx_highbd_12_sub_pixel_variance4x8,
-                   vpx_highbd_12_sub_pixel_avg_variance4x8,
-                   NULL,
-                   vpx_highbd_sad4x8x8_bits12,
-                   vpx_highbd_sad4x8x4d_bits12)
-
-        HIGHBD_BFP(BLOCK_4X4,
-                   vpx_highbd_sad4x4_bits12,
-                   vpx_highbd_sad4x4_avg_bits12,
-                   vpx_highbd_12_variance4x4,
-                   vpx_highbd_12_sub_pixel_variance4x4,
-                   vpx_highbd_12_sub_pixel_avg_variance4x4,
-                   vpx_highbd_sad4x4x3_bits12,
-                   vpx_highbd_sad4x4x8_bits12,
-                   vpx_highbd_sad4x4x4d_bits12)
+                   vpx_highbd_12_sub_pixel_avg_variance4x8, NULL,
+                   vpx_highbd_sad4x8x8_bits12, vpx_highbd_sad4x8x4d_bits12)
+
+        HIGHBD_BFP(
+            BLOCK_4X4, vpx_highbd_sad4x4_bits12, vpx_highbd_sad4x4_avg_bits12,
+            vpx_highbd_12_variance4x4, vpx_highbd_12_sub_pixel_variance4x4,
+            vpx_highbd_12_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits12,
+            vpx_highbd_sad4x4x8_bits12, vpx_highbd_sad4x4x4d_bits12)
         break;
 
       default:
-        assert(0 && "cm->bit_depth should be VPX_BITS_8, "
-                    "VPX_BITS_10 or VPX_BITS_12");
+        assert(0 &&
+               "cm->bit_depth should be VPX_BITS_8, "
+               "VPX_BITS_10 or VPX_BITS_12");
     }
   }
 }
@@ -1380,8 +1193,7 @@ static void realloc_segmentation_maps(VP10_COMP *cpi) {
                   vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
 
   // Create a map used for cyclic background refresh.
-  if (cpi->cyclic_refresh)
-    vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+  if (cpi->cyclic_refresh) vp10_cyclic_refresh_free(cpi->cyclic_refresh);
   CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
                   vp10_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
 
@@ -1401,8 +1213,7 @@ void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
   VP10_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
 
-  if (cm->profile != oxcf->profile)
-    cm->profile = oxcf->profile;
+  if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
   cm->bit_depth = oxcf->bit_depth;
   cm->color_space = oxcf->color_space;
   cm->color_range = oxcf->color_range;
@@ -1425,10 +1236,11 @@ void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
 
   cpi->refresh_golden_frame = 0;
   cpi->refresh_last_frame = 1;
-  cm->refresh_frame_context =
-      oxcf->error_resilient_mode ? REFRESH_FRAME_CONTEXT_OFF :
-          oxcf->frame_parallel_decoding_mode ? REFRESH_FRAME_CONTEXT_FORWARD
-                                             : REFRESH_FRAME_CONTEXT_BACKWARD;
+  cm->refresh_frame_context = oxcf->error_resilient_mode
+                                  ? REFRESH_FRAME_CONTEXT_OFF
+                                  : oxcf->frame_parallel_decoding_mode
+                                        ? REFRESH_FRAME_CONTEXT_FORWARD
+                                        : REFRESH_FRAME_CONTEXT_BACKWARD;
   cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
 
   vp10_reset_segment_features(cm);
@@ -1500,7 +1312,7 @@ void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
 #ifndef M_LOG2_E
 #define M_LOG2_E 0.693147180559945309417
 #endif
-#define log2f(x) (log (x) / (float) M_LOG2_E)
+#define log2f(x) (log(x) / (float)M_LOG2_E)
 
 static void cal_nmvjointsadcost(int *mvjointsadcost) {
   mvjointsadcost[0] = 600;
@@ -1539,15 +1351,13 @@ static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
   } while (++i <= MV_MAX);
 }
 
-
 VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
-                                BufferPool *const pool) {
+                                  BufferPool *const pool) {
   unsigned int i;
   VP10_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP10_COMP));
   VP10_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
 
-  if (!cm)
-    return NULL;
+  if (!cm) return NULL;
 
   vp10_zero(*cpi);
 
@@ -1562,11 +1372,10 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
   cm->free_mi = vp10_enc_free_mi;
   cm->setup_mi = vp10_enc_setup_mi;
 
-  CHECK_MEM_ERROR(cm, cm->fc,
-                  (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
-  CHECK_MEM_ERROR(cm, cm->frame_contexts,
-                  (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS,
-                  sizeof(*cm->frame_contexts)));
+  CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+  CHECK_MEM_ERROR(
+      cm, cm->frame_contexts,
+      (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
 
   cpi->resize_state = 0;
   cpi->resize_avg_qp = 0;
@@ -1599,11 +1408,11 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
   CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1],
                   vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
 
-  for (i = 0; i < (sizeof(cpi->mbgraph_stats) /
-                   sizeof(cpi->mbgraph_stats[0])); i++) {
-    CHECK_MEM_ERROR(cm, cpi->mbgraph_stats[i].mb_stats,
-                    vpx_calloc(cm->MBs *
-                               sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
+  for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]));
+       i++) {
+    CHECK_MEM_ERROR(
+        cm, cpi->mbgraph_stats[i].mb_stats,
+        vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
   }
 
 #if CONFIG_FP_MB_STATS
@@ -1647,7 +1456,7 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
   }
 
   if (cpi->b_calculate_ssimg) {
-    cpi->ssimg.worst= 100.0;
+    cpi->ssimg.worst = 100.0;
   }
   cpi->fastssim.worst = 100.0;
 
@@ -1659,8 +1468,8 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
   }
 
   if (cpi->b_calculate_consistency) {
-    cpi->ssim_vars = vpx_malloc(sizeof(*cpi->ssim_vars) *
-                                4 * cpi->common.mi_rows * cpi->common.mi_cols);
+    cpi->ssim_vars = vpx_malloc(sizeof(*cpi->ssim_vars) * 4 *
+                                cpi->common.mi_rows * cpi->common.mi_cols);
     cpi->worst_consistency = 100.0;
   }
 
@@ -1725,79 +1534,71 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
   vp10_set_speed_features_framesize_dependent(cpi);
 
   // Allocate memory to store variances for a frame.
-  CHECK_MEM_ERROR(cm, cpi->source_diff_var,
-                  vpx_calloc(cm->MBs, sizeof(diff)));
+  CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff)));
   cpi->source_var_thresh = 0;
   cpi->frames_till_next_var_check = 0;
 
-#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF)\
-    cpi->fn_ptr[BT].sdf            = SDF; \
-    cpi->fn_ptr[BT].sdaf           = SDAF; \
-    cpi->fn_ptr[BT].vf             = VF; \
-    cpi->fn_ptr[BT].svf            = SVF; \
-    cpi->fn_ptr[BT].svaf           = SVAF; \
-    cpi->fn_ptr[BT].sdx3f          = SDX3F; \
-    cpi->fn_ptr[BT].sdx8f          = SDX8F; \
-    cpi->fn_ptr[BT].sdx4df         = SDX4DF;
-
-  BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg,
-      vpx_variance32x16, vpx_sub_pixel_variance32x16,
-      vpx_sub_pixel_avg_variance32x16, NULL, NULL, vpx_sad32x16x4d)
-
-  BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg,
-      vpx_variance16x32, vpx_sub_pixel_variance16x32,
-      vpx_sub_pixel_avg_variance16x32, NULL, NULL, vpx_sad16x32x4d)
-
-  BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg,
-      vpx_variance64x32, vpx_sub_pixel_variance64x32,
-      vpx_sub_pixel_avg_variance64x32, NULL, NULL, vpx_sad64x32x4d)
-
-  BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg,
-      vpx_variance32x64, vpx_sub_pixel_variance32x64,
-      vpx_sub_pixel_avg_variance32x64, NULL, NULL, vpx_sad32x64x4d)
-
-  BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg,
-      vpx_variance32x32, vpx_sub_pixel_variance32x32,
-      vpx_sub_pixel_avg_variance32x32, vpx_sad32x32x3, vpx_sad32x32x8,
-      vpx_sad32x32x4d)
-
-  BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg,
-      vpx_variance64x64, vpx_sub_pixel_variance64x64,
-      vpx_sub_pixel_avg_variance64x64, vpx_sad64x64x3, vpx_sad64x64x8,
-      vpx_sad64x64x4d)
-
-  BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg,
-      vpx_variance16x16, vpx_sub_pixel_variance16x16,
-      vpx_sub_pixel_avg_variance16x16, vpx_sad16x16x3, vpx_sad16x16x8,
-      vpx_sad16x16x4d)
-
-  BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg,
-      vpx_variance16x8, vpx_sub_pixel_variance16x8,
-      vpx_sub_pixel_avg_variance16x8,
-      vpx_sad16x8x3, vpx_sad16x8x8, vpx_sad16x8x4d)
-
-  BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg,
-      vpx_variance8x16, vpx_sub_pixel_variance8x16,
-      vpx_sub_pixel_avg_variance8x16,
-      vpx_sad8x16x3, vpx_sad8x16x8, vpx_sad8x16x4d)
-
-  BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg,
-      vpx_variance8x8, vpx_sub_pixel_variance8x8,
-      vpx_sub_pixel_avg_variance8x8,
-      vpx_sad8x8x3, vpx_sad8x8x8, vpx_sad8x8x4d)
-
-  BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg,
-      vpx_variance8x4, vpx_sub_pixel_variance8x4,
-      vpx_sub_pixel_avg_variance8x4, NULL, vpx_sad8x4x8, vpx_sad8x4x4d)
-
-  BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg,
-      vpx_variance4x8, vpx_sub_pixel_variance4x8,
-      vpx_sub_pixel_avg_variance4x8, NULL, vpx_sad4x8x8, vpx_sad4x8x4d)
-
-  BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg,
-      vpx_variance4x4, vpx_sub_pixel_variance4x4,
-      vpx_sub_pixel_avg_variance4x4,
-      vpx_sad4x4x3, vpx_sad4x4x8, vpx_sad4x4x4d)
+#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
+  cpi->fn_ptr[BT].sdf = SDF;                                    \
+  cpi->fn_ptr[BT].sdaf = SDAF;                                  \
+  cpi->fn_ptr[BT].vf = VF;                                      \
+  cpi->fn_ptr[BT].svf = SVF;                                    \
+  cpi->fn_ptr[BT].svaf = SVAF;                                  \
+  cpi->fn_ptr[BT].sdx3f = SDX3F;                                \
+  cpi->fn_ptr[BT].sdx8f = SDX8F;                                \
+  cpi->fn_ptr[BT].sdx4df = SDX4DF;
+
+  BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16,
+      vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16, NULL, NULL,
+      vpx_sad32x16x4d)
+
+  BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32,
+      vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32, NULL, NULL,
+      vpx_sad16x32x4d)
+
+  BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32,
+      vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32, NULL, NULL,
+      vpx_sad64x32x4d)
+
+  BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64,
+      vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64, NULL, NULL,
+      vpx_sad32x64x4d)
+
+  BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32,
+      vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32,
+      vpx_sad32x32x3, vpx_sad32x32x8, vpx_sad32x32x4d)
+
+  BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64,
+      vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64,
+      vpx_sad64x64x3, vpx_sad64x64x8, vpx_sad64x64x4d)
+
+  BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16,
+      vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16,
+      vpx_sad16x16x3, vpx_sad16x16x8, vpx_sad16x16x4d)
+
+  BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8,
+      vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8, vpx_sad16x8x3,
+      vpx_sad16x8x8, vpx_sad16x8x4d)
+
+  BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16,
+      vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16, vpx_sad8x16x3,
+      vpx_sad8x16x8, vpx_sad8x16x4d)
+
+  BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8,
+      vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x3,
+      vpx_sad8x8x8, vpx_sad8x8x4d)
+
+  BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4,
+      vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, NULL,
+      vpx_sad8x4x8, vpx_sad8x4x4d)
+
+  BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8,
+      vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, NULL,
+      vpx_sad4x8x8, vpx_sad4x8x4d)
+
+  BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4,
+      vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x3,
+      vpx_sad4x4x8, vpx_sad4x4x4d)
 
 #if CONFIG_VPX_HIGHBITDEPTH
   highbd_set_var_fns(cpi);
@@ -1816,8 +1617,7 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
 
   return cpi;
 }
-#define SNPRINT(H, T) \
-  snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T))
+#define SNPRINT(H, T) snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T))
 
 #define SNPRINT2(H, T, V) \
   snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
@@ -1827,8 +1627,7 @@ void vp10_remove_compressor(VP10_COMP *cpi) {
   unsigned int i;
   int t;
 
-  if (!cpi)
-    return;
+  if (!cpi) return;
 
   cm = &cpi->common;
   if (cm->current_video_frame > 0) {
@@ -1836,28 +1635,27 @@ void vp10_remove_compressor(VP10_COMP *cpi) {
     vpx_clear_system_state();
 
     if (cpi->oxcf.pass != 1) {
-      char headings[512] = {0};
-      char results[512] = {0};
+      char headings[512] = { 0 };
+      char results[512] = { 0 };
       FILE *f = fopen("opsnr.stt", "a");
-      double time_encoded = (cpi->last_end_time_stamp_seen
-                             - cpi->first_time_stamp_ever) / 10000000.000;
-      double total_encode_time = (cpi->time_receive_data +
-                                  cpi->time_compress_data)   / 1000.000;
+      double time_encoded =
+          (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
+          10000000.000;
+      double total_encode_time =
+          (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
       const double dr =
-          (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded;
+          (double)cpi->bytes * (double)8 / (double)1000 / time_encoded;
       const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
 
       if (cpi->b_calculate_psnr) {
-        const double total_psnr =
-            vpx_sse_to_psnr((double)cpi->total_samples, peak,
-                            (double)cpi->total_sq_error);
-        const double totalp_psnr =
-            vpx_sse_to_psnr((double)cpi->totalp_samples, peak,
-                            (double)cpi->totalp_sq_error);
-        const double total_ssim = 100 * pow(cpi->summed_quality /
-                                            cpi->summed_weights, 8.0);
-        const double totalp_ssim = 100 * pow(cpi->summedp_quality /
-                                             cpi->summedp_weights, 8.0);
+        const double total_psnr = vpx_sse_to_psnr(
+            (double)cpi->total_samples, peak, (double)cpi->total_sq_error);
+        const double totalp_psnr = vpx_sse_to_psnr(
+            (double)cpi->totalp_samples, peak, (double)cpi->totalp_sq_error);
+        const double total_ssim =
+            100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
+        const double totalp_ssim =
+            100 * pow(cpi->summedp_quality / cpi->summedp_weights, 8.0);
 
         snprintf(headings, sizeof(headings),
                  "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
@@ -1868,12 +1666,10 @@ void vp10_remove_compressor(VP10_COMP *cpi) {
                  "%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
                  "%7.3f\t%7.3f\t%7.3f\t%7.3f",
                  dr, cpi->psnr.stat[ALL] / cpi->count, total_psnr,
-                 cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr,
-                 total_ssim, totalp_ssim,
-                 cpi->fastssim.stat[ALL] / cpi->count,
-                 cpi->psnrhvs.stat[ALL] / cpi->count,
-                 cpi->psnr.worst, cpi->worst_ssim, cpi->fastssim.worst,
-                 cpi->psnrhvs.worst);
+                 cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr, total_ssim,
+                 totalp_ssim, cpi->fastssim.stat[ALL] / cpi->count,
+                 cpi->psnrhvs.stat[ALL] / cpi->count, cpi->psnr.worst,
+                 cpi->worst_ssim, cpi->fastssim.worst, cpi->psnrhvs.worst);
 
         if (cpi->b_calculate_blockiness) {
           SNPRINT(headings, "\t  Block\tWstBlck");
@@ -1935,13 +1731,12 @@ void vp10_remove_compressor(VP10_COMP *cpi) {
   vpx_free(cpi->tile_thr_data);
   vpx_free(cpi->workers);
 
-  if (cpi->num_workers > 1)
-    vp10_loop_filter_dealloc(&cpi->lf_row_sync);
+  if (cpi->num_workers > 1) vp10_loop_filter_dealloc(&cpi->lf_row_sync);
 
   dealloc_compressor_data(cpi);
 
-  for (i = 0; i < sizeof(cpi->mbgraph_stats) /
-                  sizeof(cpi->mbgraph_stats[0]); ++i) {
+  for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]);
+       ++i) {
     vpx_free(cpi->mbgraph_stats[i].mb_stats);
   }
 
@@ -1980,9 +1775,9 @@ void vp10_remove_compressor(VP10_COMP *cpi) {
 /* TODO(yaowu): The block_variance calls the unoptimized versions of variance()
  * and highbd_8_variance(). It should not.
  */
-static void encoder_variance(const uint8_t *a, int  a_stride,
-                             const uint8_t *b, int  b_stride,
-                             int  w, int  h, unsigned int *sse, int *sum) {
+static void encoder_variance(const uint8_t *a, int a_stride, const uint8_t *b,
+                             int b_stride, int w, int h, unsigned int *sse,
+                             int *sum) {
   int i, j;
 
   *sum = 0;
@@ -2001,10 +1796,9 @@ static void encoder_variance(const uint8_t *a, int  a_stride,
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static void encoder_highbd_variance64(const uint8_t *a8, int  a_stride,
-                                      const uint8_t *b8, int  b_stride,
-                                      int w, int h, uint64_t *sse,
-                                      uint64_t *sum) {
+static void encoder_highbd_variance64(const uint8_t *a8, int a_stride,
+                                      const uint8_t *b8, int b_stride, int w,
+                                      int h, uint64_t *sse, uint64_t *sum) {
   int i, j;
 
   uint16_t *a = CONVERT_TO_SHORTPTR(a8);
@@ -2023,22 +1817,20 @@ static void encoder_highbd_variance64(const uint8_t *a8, int  a_stride,
   }
 }
 
-static void encoder_highbd_8_variance(const uint8_t *a8, int  a_stride,
-                                      const uint8_t *b8, int  b_stride,
-                                      int w, int h,
-                                      unsigned int *sse, int *sum) {
+static void encoder_highbd_8_variance(const uint8_t *a8, int a_stride,
+                                      const uint8_t *b8, int b_stride, int w,
+                                      int h, unsigned int *sse, int *sum) {
   uint64_t sse_long = 0;
   uint64_t sum_long = 0;
-  encoder_highbd_variance64(a8, a_stride, b8, b_stride, w, h,
-                            &sse_long, &sum_long);
+  encoder_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long,
+                            &sum_long);
   *sse = (unsigned int)sse_long;
   *sum = (int)sum_long;
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-static int64_t get_sse(const uint8_t *a, int a_stride,
-                       const uint8_t *b, int b_stride,
-                       int width, int height) {
+static int64_t get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
+                       int b_stride, int width, int height) {
   const int dw = width % 16;
   const int dh = height % 16;
   int64_t total_sse = 0;
@@ -2047,15 +1839,15 @@ static int64_t get_sse(const uint8_t *a, int a_stride,
   int x, y;
 
   if (dw > 0) {
-    encoder_variance(&a[width - dw], a_stride, &b[width - dw], b_stride,
-                     dw, height, &sse, &sum);
+    encoder_variance(&a[width - dw], a_stride, &b[width - dw], b_stride, dw,
+                     height, &sse, &sum);
     total_sse += sse;
   }
 
   if (dh > 0) {
     encoder_variance(&a[(height - dh) * a_stride], a_stride,
-                     &b[(height - dh) * b_stride], b_stride,
-                     width - dw, dh, &sse, &sum);
+                     &b[(height - dh) * b_stride], b_stride, width - dw, dh,
+                     &sse, &sum);
     total_sse += sse;
   }
 
@@ -2079,9 +1871,8 @@ static int64_t get_sse(const uint8_t *a, int a_stride,
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride,
-                                    const uint8_t *b8, int b_stride,
-                                    int width, int height,
-                                    unsigned int input_shift) {
+                                    const uint8_t *b8, int b_stride, int width,
+                                    int height, unsigned int input_shift) {
   const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
   const uint16_t *b = CONVERT_TO_SHORTPTR(b8);
   int64_t total_sse = 0;
@@ -2098,9 +1889,8 @@ static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride,
   return total_sse;
 }
 
-static int64_t highbd_get_sse(const uint8_t *a, int a_stride,
-                              const uint8_t *b, int b_stride,
-                              int width, int height) {
+static int64_t highbd_get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
+                              int b_stride, int width, int height) {
   int64_t total_sse = 0;
   int x, y;
   const int dw = width % 16;
@@ -2108,9 +1898,8 @@ static int64_t highbd_get_sse(const uint8_t *a, int a_stride,
   unsigned int sse = 0;
   int sum = 0;
   if (dw > 0) {
-    encoder_highbd_8_variance(&a[width - dw], a_stride,
-                              &b[width - dw], b_stride,
-                              dw, height, &sse, &sum);
+    encoder_highbd_8_variance(&a[width - dw], a_stride, &b[width - dw],
+                              b_stride, dw, height, &sse, &sum);
     total_sse += sse;
   }
   if (dh > 0) {
@@ -2143,18 +1932,16 @@ typedef struct {
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
-                             const YV12_BUFFER_CONFIG *b,
-                             PSNR_STATS *psnr,
+                             const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr,
                              unsigned int bit_depth,
                              unsigned int in_bit_depth) {
-  const int widths[3] =
-      {a->y_crop_width,  a->uv_crop_width,  a->uv_crop_width };
-  const int heights[3] =
-      {a->y_crop_height, a->uv_crop_height, a->uv_crop_height};
-  const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer,  a->v_buffer };
-  const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride};
-  const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer,  b->v_buffer };
-  const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride};
+  const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
+  const int heights[3] = { a->y_crop_height, a->uv_crop_height,
+                           a->uv_crop_height };
+  const uint8_t *a_planes[3] = { a->y_buffer, a->u_buffer, a->v_buffer };
+  const int a_strides[3] = { a->y_stride, a->uv_stride, a->uv_stride };
+  const uint8_t *b_planes[3] = { b->y_buffer, b->u_buffer, b->v_buffer };
+  const int b_strides[3] = { b->y_stride, b->uv_stride, b->uv_stride };
   int i;
   uint64_t total_sse = 0;
   uint32_t total_samples = 0;
@@ -2168,17 +1955,14 @@ static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
     uint64_t sse;
     if (a->flags & YV12_FLAG_HIGHBITDEPTH) {
       if (input_shift) {
-        sse = highbd_get_sse_shift(a_planes[i], a_strides[i],
-                                   b_planes[i], b_strides[i], w, h,
-                                   input_shift);
+        sse = highbd_get_sse_shift(a_planes[i], a_strides[i], b_planes[i],
+                                   b_strides[i], w, h, input_shift);
       } else {
-        sse = highbd_get_sse(a_planes[i], a_strides[i],
-                             b_planes[i], b_strides[i], w, h);
+        sse = highbd_get_sse(a_planes[i], a_strides[i], b_planes[i],
+                             b_strides[i], w, h);
       }
     } else {
-      sse = get_sse(a_planes[i], a_strides[i],
-                    b_planes[i], b_strides[i],
-                    w, h);
+      sse = get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h);
     }
     psnr->sse[1 + i] = sse;
     psnr->samples[1 + i] = samples;
@@ -2190,23 +1974,22 @@ static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
 
   psnr->sse[0] = total_sse;
   psnr->samples[0] = total_samples;
-  psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak,
-                                  (double)total_sse);
+  psnr->psnr[0] =
+      vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
 }
 
-#else  // !CONFIG_VPX_HIGHBITDEPTH
+#else   // !CONFIG_VPX_HIGHBITDEPTH
 
 static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
                       PSNR_STATS *psnr) {
   static const double peak = 255.0;
-  const int widths[3]        = {
-      a->y_crop_width, a->uv_crop_width, a->uv_crop_width};
-  const int heights[3]       = {
-      a->y_crop_height, a->uv_crop_height, a->uv_crop_height};
-  const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer};
-  const int a_strides[3]     = {a->y_stride, a->uv_stride, a->uv_stride};
-  const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer};
-  const int b_strides[3]     = {b->y_stride, b->uv_stride, b->uv_stride};
+  const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
+  const int heights[3] = { a->y_crop_height, a->uv_crop_height,
+                           a->uv_crop_height };
+  const uint8_t *a_planes[3] = { a->y_buffer, a->u_buffer, a->v_buffer };
+  const int a_strides[3] = { a->y_stride, a->uv_stride, a->uv_stride };
+  const uint8_t *b_planes[3] = { b->y_buffer, b->u_buffer, b->v_buffer };
+  const int b_strides[3] = { b->y_stride, b->uv_stride, b->uv_stride };
   int i;
   uint64_t total_sse = 0;
   uint32_t total_samples = 0;
@@ -2215,9 +1998,8 @@ static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
     const int w = widths[i];
     const int h = heights[i];
     const uint32_t samples = w * h;
-    const uint64_t sse = get_sse(a_planes[i], a_strides[i],
-                                 b_planes[i], b_strides[i],
-                                 w, h);
+    const uint64_t sse =
+        get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h);
     psnr->sse[1 + i] = sse;
     psnr->samples[1 + i] = samples;
     psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
@@ -2228,8 +2010,8 @@ static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
 
   psnr->sse[0] = total_sse;
   psnr->samples[0] = total_samples;
-  psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak,
-                                  (double)total_sse);
+  psnr->psnr[0] =
+      vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
@@ -2254,8 +2036,7 @@ static void generate_psnr_packet(VP10_COMP *cpi) {
 }
 
 int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags) {
-  if (ref_frame_flags > 7)
-    return -1;
+  if (ref_frame_flags > 7) return -1;
 
   cpi->ref_frame_flags = ref_frame_flags;
   return 0;
@@ -2268,8 +2049,8 @@ void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags) {
   cpi->ext_refresh_frame_flags_pending = 1;
 }
 
-static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(VP10_COMP *cpi,
-                                VPX_REFFRAME ref_frame_flag) {
+static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(
+    VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag) {
   MV_REFERENCE_FRAME ref_frame = NONE;
   if (ref_frame_flag == VPX_LAST_FLAG)
     ref_frame = LAST_FRAME;
@@ -2282,7 +2063,7 @@ static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(VP10_COMP *cpi,
 }
 
 int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
-                           YV12_BUFFER_CONFIG *sd) {
+                            YV12_BUFFER_CONFIG *sd) {
   YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
   if (cfg) {
     vpx_yv12_copy_frame(cfg, sd);
@@ -2293,7 +2074,7 @@ int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
 }
 
 int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
-                          YV12_BUFFER_CONFIG *sd) {
+                           YV12_BUFFER_CONFIG *sd) {
   YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
   if (cfg) {
     vpx_yv12_copy_frame(sd, cfg);
@@ -2303,7 +2084,7 @@ int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
   }
 }
 
-int vp10_update_entropy(VP10_COMP * cpi, int update) {
+int vp10_update_entropy(VP10_COMP *cpi, int update) {
   cpi->ext_refresh_frame_context = update;
   cpi->ext_refresh_frame_context_pending = 1;
   return 0;
@@ -2352,7 +2133,7 @@ void vp10_write_yuv_rec_frame(VP10_COMMON *cm) {
     uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
 
     do {
-      fwrite(src16, s->y_width, 2,  yuv_rec_file);
+      fwrite(src16, s->y_width, 2, yuv_rec_file);
       src16 += s->y_stride;
     } while (--h);
 
@@ -2360,7 +2141,7 @@ void vp10_write_yuv_rec_frame(VP10_COMMON *cm) {
     h = s->uv_height;
 
     do {
-      fwrite(src16, s->uv_width, 2,  yuv_rec_file);
+      fwrite(src16, s->uv_width, 2, yuv_rec_file);
       src16 += s->uv_stride;
     } while (--h);
 
@@ -2378,7 +2159,7 @@ void vp10_write_yuv_rec_frame(VP10_COMMON *cm) {
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
   do {
-    fwrite(src, s->y_width, 1,  yuv_rec_file);
+    fwrite(src, s->y_width, 1, yuv_rec_file);
     src += s->y_stride;
   } while (--h);
 
@@ -2386,7 +2167,7 @@ void vp10_write_yuv_rec_frame(VP10_COMMON *cm) {
   h = s->uv_height;
 
   do {
-    fwrite(src, s->uv_width, 1,  yuv_rec_file);
+    fwrite(src, s->uv_width, 1, yuv_rec_file);
     src += s->uv_stride;
   } while (--h);
 
@@ -2412,32 +2193,33 @@ static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   // TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t
   int i;
-  const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
-  const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
-  const int src_widths[3] = {src->y_crop_width, src->uv_crop_width,
-                             src->uv_crop_width };
-  const int src_heights[3] = {src->y_crop_height, src->uv_crop_height,
-                              src->uv_crop_height};
-  uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer};
-  const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride};
-  const int dst_widths[3] = {dst->y_crop_width, dst->uv_crop_width,
-                             dst->uv_crop_width};
-  const int dst_heights[3] = {dst->y_crop_height, dst->uv_crop_height,
-                              dst->uv_crop_height};
+  const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
+                                   src->v_buffer };
+  const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+  const int src_widths[3] = { src->y_crop_width, src->uv_crop_width,
+                              src->uv_crop_width };
+  const int src_heights[3] = { src->y_crop_height, src->uv_crop_height,
+                               src->uv_crop_height };
+  uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
+  const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
+  const int dst_widths[3] = { dst->y_crop_width, dst->uv_crop_width,
+                              dst->uv_crop_width };
+  const int dst_heights[3] = { dst->y_crop_height, dst->uv_crop_height,
+                               dst->uv_crop_height };
 
   for (i = 0; i < MAX_MB_PLANE; ++i) {
 #if CONFIG_VPX_HIGHBITDEPTH
     if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
       vp10_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
-                              src_strides[i], dsts[i], dst_heights[i],
-                              dst_widths[i], dst_strides[i], bd);
+                               src_strides[i], dsts[i], dst_heights[i],
+                               dst_widths[i], dst_strides[i], bd);
     } else {
       vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
-                       dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+                        dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
     }
 #else
     vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
-                     dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+                      dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   }
   vpx_extend_frame_borders(dst);
@@ -2454,10 +2236,11 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
   const int src_h = src->y_crop_height;
   const int dst_w = dst->y_crop_width;
   const int dst_h = dst->y_crop_height;
-  const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
-  const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
-  uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer};
-  const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride};
+  const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
+                                   src->v_buffer };
+  const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+  uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
+  const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
   const InterpKernel *const kernel = vp10_filter_kernels[EIGHTTAP];
   int x, y, i;
 
@@ -2469,8 +2252,9 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
         const int y_q4 = y * (16 / factor) * src_h / dst_h;
         const int src_stride = src_strides[i];
         const int dst_stride = dst_strides[i];
-        const uint8_t *src_ptr = srcs[i] + (y / factor) * src_h / dst_h *
-                                     src_stride + (x / factor) * src_w / dst_w;
+        const uint8_t *src_ptr = srcs[i] +
+                                 (y / factor) * src_h / dst_h * src_stride +
+                                 (x / factor) * src_w / dst_w;
         uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
 
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -2482,14 +2266,14 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
         } else {
           vpx_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
                         kernel[x_q4 & 0xf], 16 * src_w / dst_w,
-                        kernel[y_q4 & 0xf], 16 * src_h / dst_h,
-                        16 / factor, 16 / factor);
+                        kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor,
+                        16 / factor);
         }
 #else
         vpx_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
                       kernel[x_q4 & 0xf], 16 * src_w / dst_w,
-                      kernel[y_q4 & 0xf], 16 * src_h / dst_h,
-                      16 / factor, 16 / factor);
+                      kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor,
+                      16 / factor);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
       }
     }
@@ -2506,8 +2290,9 @@ static int scale_down(VP10_COMP *cpi, int q) {
 
   if (rc->frame_size_selector == UNSCALED &&
       q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) {
-    const int max_size_thresh = (int)(rate_thresh_mult[SCALE_STEP1]
-        * VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
+    const int max_size_thresh =
+        (int)(rate_thresh_mult[SCALE_STEP1] *
+              VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
     scale = rc->projected_frame_size > max_size_thresh ? 1 : 0;
   }
   return scale;
@@ -2515,8 +2300,7 @@ static int scale_down(VP10_COMP *cpi, int q) {
 
 // Function to test for conditions that indicate we should loop
 // back and recode a frame.
-static int recode_loop_test(VP10_COMP *cpi,
-                            int high_limit, int low_limit,
+static int recode_loop_test(VP10_COMP *cpi, int high_limit, int low_limit,
                             int q, int maxq, int minq) {
   const RATE_CONTROL *const rc = &cpi->rc;
   const VP10EncoderConfig *const oxcf = &cpi->oxcf;
@@ -2525,14 +2309,12 @@ static int recode_loop_test(VP10_COMP *cpi,
 
   if ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
       (cpi->sf.recode_loop == ALLOW_RECODE) ||
-      (frame_is_kfgfarf &&
-       (cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF))) {
-    if (frame_is_kfgfarf &&
-        (oxcf->resize_mode == RESIZE_DYNAMIC) &&
+      (frame_is_kfgfarf && (cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF))) {
+    if (frame_is_kfgfarf && (oxcf->resize_mode == RESIZE_DYNAMIC) &&
         scale_down(cpi, q)) {
-        // Code this group at a lower resolution.
-        cpi->resize_pending = 1;
-        return 1;
+      // Code this group at a lower resolution.
+      cpi->resize_pending = 1;
+      return 1;
     }
 
     // TODO(agrange) high_limit could be greater than the scale-down threshold.
@@ -2552,16 +2334,16 @@ static int recode_loop_test(VP10_COMP *cpi,
 }
 
 void vp10_update_reference_frames(VP10_COMP *cpi) {
-  VP10_COMMON * const cm = &cpi->common;
+  VP10_COMMON *const cm = &cpi->common;
   BufferPool *const pool = cm->buffer_pool;
 
   // At this point the new frame has been encoded.
   // If any buffer copy / swapping is signaled it should be done here.
   if (cm->frame_type == KEY_FRAME) {
-    ref_cnt_fb(pool->frame_bufs,
-               &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
-    ref_cnt_fb(pool->frame_bufs,
-               &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+               cm->new_fb_idx);
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
+               cm->new_fb_idx);
   } else if (vp10_preserve_existing_gf(cpi)) {
     // We have decided to preserve the previously existing golden frame as our
     // new ARF frame. However, in the short term in function
@@ -2573,8 +2355,8 @@ void vp10_update_reference_frames(VP10_COMP *cpi) {
     // slot and, if we're updating the GF, the current frame becomes the new GF.
     int tmp;
 
-    ref_cnt_fb(pool->frame_bufs,
-               &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
+               cm->new_fb_idx);
 
     tmp = cpi->alt_fb_idx;
     cpi->alt_fb_idx = cpi->gld_fb_idx;
@@ -2587,16 +2369,15 @@ void vp10_update_reference_frames(VP10_COMP *cpi) {
         arf_idx = gf_group->arf_update_idx[gf_group->index];
       }
 
-      ref_cnt_fb(pool->frame_bufs,
-                 &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
+      ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
       memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
              cpi->interp_filter_selected[0],
              sizeof(cpi->interp_filter_selected[0]));
     }
 
     if (cpi->refresh_golden_frame) {
-      ref_cnt_fb(pool->frame_bufs,
-                 &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
+      ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+                 cm->new_fb_idx);
       if (!cpi->rc.is_src_frame_alt_ref)
         memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
                cpi->interp_filter_selected[0],
@@ -2609,8 +2390,8 @@ void vp10_update_reference_frames(VP10_COMP *cpi) {
   }
 
   if (cpi->refresh_last_frame) {
-    ref_cnt_fb(pool->frame_bufs,
-               &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx],
+               cm->new_fb_idx);
     if (!cpi->rc.is_src_frame_alt_ref)
       memcpy(cpi->interp_filter_selected[LAST_FRAME],
              cpi->interp_filter_selected[0],
@@ -2639,9 +2420,8 @@ static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
   if (lf->filter_level > 0) {
     if (cpi->num_workers > 1)
       vp10_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
-                               lf->filter_level, 0, 0,
-                               cpi->workers, cpi->num_workers,
-                               &cpi->lf_row_sync);
+                                lf->filter_level, 0, 0, cpi->workers,
+                                cpi->num_workers, &cpi->lf_row_sync);
     else
       vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
   }
@@ -2649,16 +2429,13 @@ static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
   vpx_extend_frame_inner_borders(cm->frame_to_show);
 }
 
-static INLINE void alloc_frame_mvs(const VP10_COMMON *cm,
-                                   int buffer_idx) {
+static INLINE void alloc_frame_mvs(const VP10_COMMON *cm, int buffer_idx) {
   RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
-  if (new_fb_ptr->mvs == NULL ||
-      new_fb_ptr->mi_rows < cm->mi_rows ||
+  if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
       new_fb_ptr->mi_cols < cm->mi_cols) {
     vpx_free(new_fb_ptr->mvs);
-    new_fb_ptr->mvs =
-      (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
-                           sizeof(*new_fb_ptr->mvs));
+    new_fb_ptr->mvs = (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+                                           sizeof(*new_fb_ptr->mvs));
     new_fb_ptr->mi_rows = cm->mi_rows;
     new_fb_ptr->mi_cols = cm->mi_cols;
   }
@@ -2667,14 +2444,15 @@ static INLINE void alloc_frame_mvs(const VP10_COMMON *cm,
 void vp10_scale_references(VP10_COMP *cpi) {
   VP10_COMMON *cm = &cpi->common;
   MV_REFERENCE_FRAME ref_frame;
-  const VPX_REFFRAME ref_mask[3] = {VPX_LAST_FLAG, VPX_GOLD_FLAG, VPX_ALT_FLAG};
+  const VPX_REFFRAME ref_mask[3] = { VPX_LAST_FLAG, VPX_GOLD_FLAG,
+                                     VPX_ALT_FLAG };
 
   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
     // Need to convert from VPX_REFFRAME to index into ref_mask (subtract 1).
     if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
       BufferPool *const pool = cm->buffer_pool;
-      const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi,
-                                                                 ref_frame);
+      const YV12_BUFFER_CONFIG *const ref =
+          get_ref_frame_buffer(cpi, ref_frame);
 
       if (ref == NULL) {
         cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
@@ -2690,18 +2468,14 @@ void vp10_scale_references(VP10_COMP *cpi) {
           new_fb = get_free_fb(cm);
           force_scaling = 1;
         }
-        if (new_fb == INVALID_IDX)
-          return;
+        if (new_fb == INVALID_IDX) return;
         new_fb_ptr = &pool->frame_bufs[new_fb];
-        if (force_scaling ||
-            new_fb_ptr->buf.y_crop_width != cm->width ||
+        if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
             new_fb_ptr->buf.y_crop_height != cm->height) {
-          vpx_realloc_frame_buffer(&new_fb_ptr->buf,
-                                   cm->width, cm->height,
-                                   cm->subsampling_x, cm->subsampling_y,
-                                   cm->use_highbitdepth,
-                                   VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
-                                   NULL, NULL, NULL);
+          vpx_realloc_frame_buffer(
+              &new_fb_ptr->buf, cm->width, cm->height, cm->subsampling_x,
+              cm->subsampling_y, cm->use_highbitdepth, VPX_ENC_BORDER_IN_PIXELS,
+              cm->byte_alignment, NULL, NULL, NULL);
           scale_and_extend_frame(ref, &new_fb_ptr->buf, (int)cm->bit_depth);
           cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
           alloc_frame_mvs(cm, new_fb);
@@ -2715,14 +2489,11 @@ void vp10_scale_references(VP10_COMP *cpi) {
           new_fb = get_free_fb(cm);
           force_scaling = 1;
         }
-        if (new_fb == INVALID_IDX)
-          return;
+        if (new_fb == INVALID_IDX) return;
         new_fb_ptr = &pool->frame_bufs[new_fb];
-        if (force_scaling ||
-            new_fb_ptr->buf.y_crop_width != cm->width ||
+        if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
             new_fb_ptr->buf.y_crop_height != cm->height) {
-          vpx_realloc_frame_buffer(&new_fb_ptr->buf,
-                                   cm->width, cm->height,
+          vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
                                    cm->subsampling_x, cm->subsampling_y,
                                    VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
                                    NULL, NULL, NULL);
@@ -2740,8 +2511,7 @@ void vp10_scale_references(VP10_COMP *cpi) {
         ++buf->ref_count;
       }
     } else {
-      if (cpi->oxcf.pass != 0)
-        cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
+      if (cpi->oxcf.pass != 0) cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
     }
   }
 }
@@ -2758,22 +2528,21 @@ static void release_scaled_references(VP10_COMP *cpi) {
     refresh[2] = (cpi->refresh_alt_ref_frame) ? 1 : 0;
     for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
       const int idx = cpi->scaled_ref_idx[i - 1];
-      RefCntBuffer *const buf = idx != INVALID_IDX ?
-          &cm->buffer_pool->frame_bufs[idx] : NULL;
+      RefCntBuffer *const buf =
+          idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
       const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i);
       if (buf != NULL &&
-          (refresh[i - 1] ||
-          (buf->buf.y_crop_width == ref->y_crop_width &&
-           buf->buf.y_crop_height == ref->y_crop_height))) {
+          (refresh[i - 1] || (buf->buf.y_crop_width == ref->y_crop_width &&
+                              buf->buf.y_crop_height == ref->y_crop_height))) {
         --buf->ref_count;
-        cpi->scaled_ref_idx[i -1] = INVALID_IDX;
+        cpi->scaled_ref_idx[i - 1] = INVALID_IDX;
       }
     }
   } else {
     for (i = 0; i < MAX_REF_FRAMES; ++i) {
       const int idx = cpi->scaled_ref_idx[i];
-      RefCntBuffer *const buf = idx != INVALID_IDX ?
-          &cm->buffer_pool->frame_bufs[idx] : NULL;
+      RefCntBuffer *const buf =
+          idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
       if (buf != NULL) {
         --buf->ref_count;
         cpi->scaled_ref_idx[i] = INVALID_IDX;
@@ -2907,8 +2676,8 @@ static void set_size_independent_vars(VP10_COMP *cpi) {
   cpi->common.interp_filter = cpi->sf.default_interp_filter;
 }
 
-static void set_size_dependent_vars(VP10_COMP *cpi, int *q,
-                                    int *bottom_index, int *top_index) {
+static void set_size_dependent_vars(VP10_COMP *cpi, int *q, int *bottom_index,
+                                    int *top_index) {
   VP10_COMMON *const cm = &cpi->common;
   const VP10EncoderConfig *const oxcf = &cpi->oxcf;
 
@@ -2946,40 +2715,37 @@ static void set_frame_size(VP10_COMP *cpi) {
   VP10EncoderConfig *const oxcf = &cpi->oxcf;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
-  if (oxcf->pass == 2 &&
-      oxcf->rc_mode == VPX_VBR &&
+  if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
       ((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
-        (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
-    vp10_calculate_coded_size(
-        cpi, &oxcf->scaled_frame_width, &oxcf->scaled_frame_height);
+       (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
+    vp10_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
+                              &oxcf->scaled_frame_height);
 
     // There has been a change in frame size.
     vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
-                         oxcf->scaled_frame_height);
+                          oxcf->scaled_frame_height);
   }
 
-  if (oxcf->pass == 0 &&
-      oxcf->rc_mode == VPX_CBR &&
+  if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
       oxcf->resize_mode == RESIZE_DYNAMIC) {
-      if (cpi->resize_pending == 1) {
-        oxcf->scaled_frame_width =
-            (cm->width * cpi->resize_scale_num) / cpi->resize_scale_den;
-        oxcf->scaled_frame_height =
-            (cm->height * cpi->resize_scale_num) /cpi->resize_scale_den;
-      } else if (cpi->resize_pending == -1) {
-        // Go back up to original size.
-        oxcf->scaled_frame_width = oxcf->width;
-        oxcf->scaled_frame_height = oxcf->height;
-      }
-      if (cpi->resize_pending != 0) {
-        // There has been a change in frame size.
-        vp10_set_size_literal(cpi,
-                             oxcf->scaled_frame_width,
-                             oxcf->scaled_frame_height);
-
-        // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
-        set_mv_search_params(cpi);
-      }
+    if (cpi->resize_pending == 1) {
+      oxcf->scaled_frame_width =
+          (cm->width * cpi->resize_scale_num) / cpi->resize_scale_den;
+      oxcf->scaled_frame_height =
+          (cm->height * cpi->resize_scale_num) / cpi->resize_scale_den;
+    } else if (cpi->resize_pending == -1) {
+      // Go back up to original size.
+      oxcf->scaled_frame_width = oxcf->width;
+      oxcf->scaled_frame_height = oxcf->height;
+    }
+    if (cpi->resize_pending != 0) {
+      // There has been a change in frame size.
+      vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
+                            oxcf->scaled_frame_height);
+
+      // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
+      set_mv_search_params(cpi);
+    }
   }
 
   if (oxcf->pass == 2) {
@@ -2989,14 +2755,13 @@ static void set_frame_size(VP10_COMP *cpi) {
   alloc_frame_mvs(cm, cm->new_fb_idx);
 
   // Reset the frame pointers to the current frame size.
-  vpx_realloc_frame_buffer(get_frame_new_buffer(cm),
-                           cm->width, cm->height,
+  vpx_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
                            cm->subsampling_x, cm->subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                            cm->use_highbitdepth,
 #endif
-                           VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
-                           NULL, NULL, NULL);
+                           VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL,
+                           NULL, NULL);
 
   alloc_util_frame_buffers(cpi);
   init_motion_estimation(cpi);
@@ -3011,18 +2776,15 @@ static void set_frame_size(VP10_COMP *cpi) {
       YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
       ref_buf->buf = buf;
 #if CONFIG_VPX_HIGHBITDEPTH
-      vp10_setup_scale_factors_for_frame(&ref_buf->sf,
-                                        buf->y_crop_width, buf->y_crop_height,
-                                        cm->width, cm->height,
-                                        (buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
-                                            1 : 0);
+      vp10_setup_scale_factors_for_frame(
+          &ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
+          cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
 #else
-      vp10_setup_scale_factors_for_frame(&ref_buf->sf,
-                                        buf->y_crop_width, buf->y_crop_height,
-                                        cm->width, cm->height);
+      vp10_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
+                                         buf->y_crop_height, cm->width,
+                                         cm->height);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
-      if (vp10_is_scaled(&ref_buf->sf))
-        vpx_extend_frame_borders(buf);
+      if (vp10_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf);
     } else {
       ref_buf->buf = NULL;
     }
@@ -3041,24 +2803,21 @@ static void encode_without_recode_loop(VP10_COMP *cpi) {
 
   // For 1 pass CBR under dynamic resize mode: use faster scaling for source.
   // Only for 2x2 scaling for now.
-  if (cpi->oxcf.pass == 0 &&
-      cpi->oxcf.rc_mode == VPX_CBR &&
+  if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
       cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
       cpi->un_scaled_source->y_width == (cm->width << 1) &&
       cpi->un_scaled_source->y_height == (cm->height << 1)) {
-    cpi->Source = vp10_scale_if_required_fast(cm,
-                                             cpi->un_scaled_source,
-                                             &cpi->scaled_source);
+    cpi->Source = vp10_scale_if_required_fast(cm, cpi->un_scaled_source,
+                                              &cpi->scaled_source);
     if (cpi->unscaled_last_source != NULL)
-       cpi->Last_Source = vp10_scale_if_required_fast(cm,
-                                                     cpi->unscaled_last_source,
-                                                     &cpi->scaled_last_source);
+      cpi->Last_Source = vp10_scale_if_required_fast(
+          cm, cpi->unscaled_last_source, &cpi->scaled_last_source);
   } else {
-    cpi->Source = vp10_scale_if_required(cm, cpi->un_scaled_source,
-                                        &cpi->scaled_source);
+    cpi->Source =
+        vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
     if (cpi->unscaled_last_source != NULL)
       cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
-                                               &cpi->scaled_last_source);
+                                                &cpi->scaled_last_source);
   }
 
   if (frame_is_intra_only(cm) == 0) {
@@ -3090,8 +2849,7 @@ static void encode_without_recode_loop(VP10_COMP *cpi) {
 
   // Update some stats from cyclic refresh, and check if we should not update
   // golden reference, for 1 pass CBR.
-  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
-      cm->frame_type != KEY_FRAME &&
+  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->frame_type != KEY_FRAME &&
       (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR))
     vp10_cyclic_refresh_check_golden_update(cpi);
 
@@ -3101,8 +2859,7 @@ static void encode_without_recode_loop(VP10_COMP *cpi) {
   vpx_clear_system_state();
 }
 
-static void encode_with_recode_loop(VP10_COMP *cpi,
-                                    size_t *size,
+static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
                                     uint8_t *dest) {
   VP10_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
@@ -3145,16 +2902,16 @@ static void encode_with_recode_loop(VP10_COMP *cpi,
     // Decide frame size bounds first time through.
     if (loop_count == 0) {
       vp10_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
-                                       &frame_under_shoot_limit,
-                                       &frame_over_shoot_limit);
+                                        &frame_under_shoot_limit,
+                                        &frame_over_shoot_limit);
     }
 
-    cpi->Source = vp10_scale_if_required(cm, cpi->un_scaled_source,
-                                      &cpi->scaled_source);
+    cpi->Source =
+        vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
 
     if (cpi->unscaled_last_source != NULL)
       cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
-                                               &cpi->scaled_last_source);
+                                                &cpi->scaled_last_source);
 
     if (frame_is_intra_only(cm) == 0) {
       if (loop_count > 0) {
@@ -3165,8 +2922,7 @@ static void encode_with_recode_loop(VP10_COMP *cpi,
 
     vp10_set_quantizer(cm, q);
 
-    if (loop_count == 0)
-      setup_frame(cpi);
+    if (loop_count == 0) setup_frame(cpi);
 
     // Variance adaptive and in frame q adjustment experiments are mutually
     // exclusive.
@@ -3195,16 +2951,14 @@ static void encode_with_recode_loop(VP10_COMP *cpi,
       rc->projected_frame_size = (int)(*size) << 3;
       restore_coding_context(cpi);
 
-      if (frame_over_shoot_limit == 0)
-        frame_over_shoot_limit = 1;
+      if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
     }
 
     if (cpi->oxcf.rc_mode == VPX_Q) {
       loop = 0;
     } else {
-      if ((cm->frame_type == KEY_FRAME) &&
-           rc->this_key_frame_forced &&
-           (rc->projected_frame_size < rc->max_frame_bandwidth)) {
+      if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced &&
+          (rc->projected_frame_size < rc->max_frame_bandwidth)) {
         int last_q = q;
         int64_t kf_err;
 
@@ -3251,9 +3005,9 @@ static void encode_with_recode_loop(VP10_COMP *cpi,
         q = clamp(q, q_low, q_high);
 
         loop = q != last_q;
-      } else if (recode_loop_test(
-          cpi, frame_over_shoot_limit, frame_under_shoot_limit,
-          q, VPXMAX(q_high, top_index), bottom_index)) {
+      } else if (recode_loop_test(cpi, frame_over_shoot_limit,
+                                  frame_under_shoot_limit, q,
+                                  VPXMAX(q_high, top_index), bottom_index)) {
         // Is the projected frame size out of range and are we allowed
         // to attempt to recode.
         int last_q = q;
@@ -3294,13 +3048,13 @@ static void encode_with_recode_loop(VP10_COMP *cpi,
             // Update rate_correction_factor unless
             vp10_rc_update_rate_correction_factors(cpi);
 
-            q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
-                                   bottom_index, VPXMAX(q_high, top_index));
+            q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                   VPXMAX(q_high, top_index));
 
             while (q < q_low && retries < 10) {
               vp10_rc_update_rate_correction_factors(cpi);
-              q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
-                                     bottom_index, VPXMAX(q_high, top_index));
+              q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                     VPXMAX(q_high, top_index));
               retries++;
             }
           }
@@ -3315,21 +3069,20 @@ static void encode_with_recode_loop(VP10_COMP *cpi,
             q = (q_high + q_low) / 2;
           } else {
             vp10_rc_update_rate_correction_factors(cpi);
-            q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
-                                   bottom_index, top_index);
+            q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                   top_index);
             // Special case reset for qlow for constrained quality.
             // This should only trigger where there is very substantial
             // undershoot on a frame and the auto cq level is above
             // the user passsed in value.
-            if (cpi->oxcf.rc_mode == VPX_CQ &&
-                q < q_low) {
+            if (cpi->oxcf.rc_mode == VPX_CQ && q < q_low) {
               q_low = q;
             }
 
             while (q > q_high && retries < 10) {
               vp10_rc_update_rate_correction_factors(cpi);
-              q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
-                                     bottom_index, top_index);
+              q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                     top_index);
               retries++;
             }
           }
@@ -3369,17 +3122,13 @@ static int get_ref_frame_flags(const VP10_COMP *cpi) {
   const int gold_is_alt = map[cpi->gld_fb_idx] == map[cpi->alt_fb_idx];
   int flags = VPX_ALT_FLAG | VPX_GOLD_FLAG | VPX_LAST_FLAG;
 
-  if (gold_is_last)
-    flags &= ~VPX_GOLD_FLAG;
+  if (gold_is_last) flags &= ~VPX_GOLD_FLAG;
 
-  if (cpi->rc.frames_till_gf_update_due == INT_MAX)
-    flags &= ~VPX_GOLD_FLAG;
+  if (cpi->rc.frames_till_gf_update_due == INT_MAX) flags &= ~VPX_GOLD_FLAG;
 
-  if (alt_is_last)
-    flags &= ~VPX_ALT_FLAG;
+  if (alt_is_last) flags &= ~VPX_ALT_FLAG;
 
-  if (gold_is_alt)
-    flags &= ~VPX_ALT_FLAG;
+  if (gold_is_alt) flags &= ~VPX_ALT_FLAG;
 
   return flags;
 }
@@ -3402,13 +3151,12 @@ static void set_ext_overrides(VP10_COMP *cpi) {
 }
 
 YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
-                                               YV12_BUFFER_CONFIG *unscaled,
-                                               YV12_BUFFER_CONFIG *scaled) {
+                                                YV12_BUFFER_CONFIG *unscaled,
+                                                YV12_BUFFER_CONFIG *scaled) {
   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
       cm->mi_rows * MI_SIZE != unscaled->y_height) {
     // For 2x2 scaling down.
-    vpx_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1,
-                    2, 1, 0);
+    vpx_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1, 2, 1, 0);
     vpx_extend_frame_borders(scaled);
     return scaled;
   } else {
@@ -3417,8 +3165,8 @@ YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
 }
 
 YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
-                                          YV12_BUFFER_CONFIG *unscaled,
-                                          YV12_BUFFER_CONFIG *scaled) {
+                                           YV12_BUFFER_CONFIG *unscaled,
+                                           YV12_BUFFER_CONFIG *scaled) {
   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
       cm->mi_rows * MI_SIZE != unscaled->y_height) {
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -3443,18 +3191,17 @@ static void set_arf_sign_bias(VP10_COMP *cpi) {
                      (gf_group->rf_level[gf_group->index] == GF_ARF_LOW));
   } else {
     arf_sign_bias =
-      (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame);
+        (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame);
   }
   cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias;
 }
 
 static int setup_interp_filter_search_mask(VP10_COMP *cpi) {
   INTERP_FILTER ifilter;
-  int ref_total[MAX_REF_FRAMES] = {0};
+  int ref_total[MAX_REF_FRAMES] = { 0 };
   MV_REFERENCE_FRAME ref;
   int mask = 0;
-  if (cpi->common.last_frame_type == KEY_FRAME ||
-      cpi->refresh_alt_ref_frame)
+  if (cpi->common.last_frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame)
     return mask;
   for (ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref)
     for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter)
@@ -3462,20 +3209,19 @@ static int setup_interp_filter_search_mask(VP10_COMP *cpi) {
 
   for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter) {
     if ((ref_total[LAST_FRAME] &&
-        cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) &&
+         cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) &&
         (ref_total[GOLDEN_FRAME] == 0 ||
-         cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50
-           < ref_total[GOLDEN_FRAME]) &&
+         cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50 <
+             ref_total[GOLDEN_FRAME]) &&
         (ref_total[ALTREF_FRAME] == 0 ||
-         cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50
-           < ref_total[ALTREF_FRAME]))
+         cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50 <
+             ref_total[ALTREF_FRAME]))
       mask |= 1 << ifilter;
   }
   return mask;
 }
 
-static void encode_frame_to_data_rate(VP10_COMP *cpi,
-                                      size_t *size,
+static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
                                       uint8_t *dest,
                                       unsigned int *frame_flags) {
   VP10_COMMON *const cm = &cpi->common;
@@ -3492,10 +3238,8 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi,
   // Set default state for segment based loop filter update flags.
   cm->lf.mode_ref_delta_update = 0;
 
-  if (cpi->oxcf.pass == 2 &&
-      cpi->sf.adaptive_interp_filter_search)
-    cpi->sf.interp_filter_search_mask =
-        setup_interp_filter_search_mask(cpi);
+  if (cpi->oxcf.pass == 2 && cpi->sf.adaptive_interp_filter_search)
+    cpi->sf.interp_filter_search_mask = setup_interp_filter_search_mask(cpi);
 
   // Set various flags etc to special state if it is a key frame.
   if (frame_is_intra_only(cm)) {
@@ -3525,8 +3269,7 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi,
 
   // For 1 pass CBR, check if we are dropping this frame.
   // Never drop on key frame.
-  if (oxcf->pass == 0 &&
-      oxcf->rc_mode == VPX_CBR &&
+  if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
       cm->frame_type != KEY_FRAME) {
     if (vp10_rc_drop_frame(cpi)) {
       vp10_rc_postencode_update_drop_frame(cpi);
@@ -3560,8 +3303,8 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi,
   if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) {
 #if CONFIG_VPX_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
-      cpi->ambient_err = vp10_highbd_get_y_sse(cpi->Source,
-                                              get_frame_new_buffer(cm));
+      cpi->ambient_err =
+          vp10_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
     } else {
       cpi->ambient_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
     }
@@ -3571,13 +3314,12 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi,
   }
 
   // If the encoder forced a KEY_FRAME decision
-  if (cm->frame_type == KEY_FRAME)
-    cpi->refresh_last_frame = 1;
+  if (cm->frame_type == KEY_FRAME) cpi->refresh_last_frame = 1;
 
   cm->frame_to_show = get_frame_new_buffer(cm);
   cm->frame_to_show->color_space = cm->color_space;
   cm->frame_to_show->color_range = cm->color_range;
-  cm->frame_to_show->render_width  = cm->render_width;
+  cm->frame_to_show->render_width = cm->render_width;
   cm->frame_to_show->render_height = cm->render_height;
 
   // Pick the loop filter level for the frame.
@@ -3586,8 +3328,7 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi,
   // build the bitstream
   vp10_pack_bitstream(cpi, dest, size);
 
-  if (cm->seg.update_map)
-    update_reference_segmentation_map(cpi);
+  if (cm->seg.update_map) update_reference_segmentation_map(cpi);
 
   if (frame_is_intra_only(cm) == 0) {
     release_scaled_references(cpi);
@@ -3603,8 +3344,7 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi,
 #if CONFIG_MISC_FIXES
     vp10_adapt_intra_frame_probs(cm);
 #else
-    if (!frame_is_intra_only(cm))
-      vp10_adapt_intra_frame_probs(cm);
+    if (!frame_is_intra_only(cm)) vp10_adapt_intra_frame_probs(cm);
 #endif
   }
 
@@ -3653,8 +3393,7 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi,
   cm->last_height = cm->height;
 
   // reset to normal state now that we are done.
-  if (!cm->show_existing_frame)
-    cm->last_show_frame = cm->show_frame;
+  if (!cm->show_existing_frame) cm->last_show_frame = cm->show_frame;
 
   if (cm->show_frame) {
     vp10_swap_mi_and_prev_mi(cm);
@@ -3675,8 +3414,8 @@ static void Pass0Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
   encode_frame_to_data_rate(cpi, size, dest, frame_flags);
 }
 
-static void Pass2Encode(VP10_COMP *cpi, size_t *size,
-                        uint8_t *dest, unsigned int *frame_flags) {
+static void Pass2Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+                        unsigned int *frame_flags) {
   cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
   encode_frame_to_data_rate(cpi, size, dest, frame_flags);
 
@@ -3725,8 +3464,8 @@ static void check_initial_width(VP10_COMP *cpi,
 }
 
 int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
-                          YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
-                          int64_t end_time) {
+                           YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+                           int64_t end_time) {
   VP10_COMMON *cm = &cpi->common;
   struct vpx_usec_timer timer;
   int res = 0;
@@ -3743,9 +3482,9 @@ int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
 
   if (vp10_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
 #if CONFIG_VPX_HIGHBITDEPTH
-                         use_highbitdepth,
+                          use_highbitdepth,
 #endif  // CONFIG_VPX_HIGHBITDEPTH
-                         frame_flags))
+                          frame_flags))
     res = -1;
   vpx_usec_timer_mark(&timer);
   cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
@@ -3766,17 +3505,13 @@ int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
   return res;
 }
 
-
 static int frame_is_reference(const VP10_COMP *cpi) {
   const VP10_COMMON *cm = &cpi->common;
 
-  return cm->frame_type == KEY_FRAME ||
-         cpi->refresh_last_frame ||
-         cpi->refresh_golden_frame ||
-         cpi->refresh_alt_ref_frame ||
+  return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
+         cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame ||
          cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF ||
-         cm->lf.mode_ref_delta_update ||
-         cm->seg.update_map ||
+         cm->lf.mode_ref_delta_update || cm->seg.update_map ||
          cm->seg.update_data;
 }
 
@@ -3789,8 +3524,8 @@ static void adjust_frame_rate(VP10_COMP *cpi,
     this_duration = source->ts_end - source->ts_start;
     step = 1;
   } else {
-    int64_t last_duration = cpi->last_end_time_stamp_seen
-        - cpi->last_time_stamp_seen;
+    int64_t last_duration =
+        cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
 
     this_duration = source->ts_end - cpi->last_end_time_stamp_seen;
 
@@ -3844,10 +3579,10 @@ static void check_src_altref(VP10_COMP *cpi,
   if (cpi->oxcf.pass == 2) {
     const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
     rc->is_src_frame_alt_ref =
-      (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE);
+        (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE);
   } else {
-    rc->is_src_frame_alt_ref = cpi->alt_ref_source &&
-                               (source == cpi->alt_ref_source);
+    rc->is_src_frame_alt_ref =
+        cpi->alt_ref_source && (source == cpi->alt_ref_source);
   }
 
   if (rc->is_src_frame_alt_ref) {
@@ -3862,8 +3597,8 @@ static void check_src_altref(VP10_COMP *cpi,
 
 #if CONFIG_INTERNAL_STATS
 extern double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
-                                 const unsigned char *img2, int img2_pitch,
-                                 int width, int height);
+                                  const unsigned char *img2, int img2_pitch,
+                                  int width, int height);
 
 static void adjust_image_stat(double y, double u, double v, double all,
                               ImageStat *s) {
@@ -3876,13 +3611,13 @@ static void adjust_image_stat(double y, double u, double v, double all,
 #endif  // CONFIG_INTERNAL_STATS
 
 int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
-                            size_t *size, uint8_t *dest,
-                            int64_t *time_stamp, int64_t *time_end, int flush) {
+                             size_t *size, uint8_t *dest, int64_t *time_stamp,
+                             int64_t *time_end, int flush) {
   const VP10EncoderConfig *const oxcf = &cpi->oxcf;
   VP10_COMMON *const cm = &cpi->common;
   BufferPool *const pool = cm->buffer_pool;
   RATE_CONTROL *const rc = &cpi->rc;
-  struct vpx_usec_timer  cmptimer;
+  struct vpx_usec_timer cmptimer;
   YV12_BUFFER_CONFIG *force_src_buffer = NULL;
   struct lookahead_entry *last_source = NULL;
   struct lookahead_entry *source = NULL;
@@ -3902,10 +3637,11 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
 
   // Normal defaults
   cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
-  cm->refresh_frame_context =
-      oxcf->error_resilient_mode ? REFRESH_FRAME_CONTEXT_OFF :
-          oxcf->frame_parallel_decoding_mode ? REFRESH_FRAME_CONTEXT_FORWARD
-                                             : REFRESH_FRAME_CONTEXT_BACKWARD;
+  cm->refresh_frame_context = oxcf->error_resilient_mode
+                                  ? REFRESH_FRAME_CONTEXT_OFF
+                                  : oxcf->frame_parallel_decoding_mode
+                                        ? REFRESH_FRAME_CONTEXT_FORWARD
+                                        : REFRESH_FRAME_CONTEXT_BACKWARD;
 
   cpi->refresh_last_frame = 1;
   cpi->refresh_golden_frame = 0;
@@ -3959,8 +3695,8 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
   }
 
   if (source) {
-    cpi->un_scaled_source = cpi->Source = force_src_buffer ? force_src_buffer
-                                                           : &source->img;
+    cpi->un_scaled_source = cpi->Source =
+        force_src_buffer ? force_src_buffer : &source->img;
 
     cpi->unscaled_last_source = last_source != NULL ? &last_source->img : NULL;
 
@@ -3971,7 +3707,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
   } else {
     *size = 0;
     if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) {
-      vp10_end_first_pass(cpi);    /* get last stats packet */
+      vp10_end_first_pass(cpi); /* get last stats packet */
       cpi->twopass.first_pass_done = 1;
     }
     return -1;
@@ -3997,8 +3733,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
   }
   cm->new_fb_idx = get_free_fb(cm);
 
-  if (cm->new_fb_idx == INVALID_IDX)
-    return -1;
+  if (cm->new_fb_idx == INVALID_IDX) return -1;
 
   cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
 
@@ -4023,8 +3758,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
   }
 
   if (cpi->oxcf.pass != 0 || frame_is_intra_only(cm) == 1) {
-    for (i = 0; i < MAX_REF_FRAMES; ++i)
-      cpi->scaled_ref_idx[i] = INVALID_IDX;
+    for (i = 0; i < MAX_REF_FRAMES; ++i) cpi->scaled_ref_idx[i] = INVALID_IDX;
   }
 
   if (oxcf->pass == 1) {
@@ -4087,8 +3821,8 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
 
 #if CONFIG_VPX_HIGHBITDEPTH
           if (cm->use_highbitdepth) {
-            frame_ssim2 = vpx_highbd_calc_ssim(orig, recon, &weight,
-                                               (int)cm->bit_depth);
+            frame_ssim2 =
+                vpx_highbd_calc_ssim(orig, recon, &weight, (int)cm->bit_depth);
           } else {
             frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
           }
@@ -4096,7 +3830,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
           frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-          cpi->worst_ssim= VPXMIN(cpi->worst_ssim, frame_ssim2);
+          cpi->worst_ssim = VPXMIN(cpi->worst_ssim, frame_ssim2);
           cpi->summed_quality += frame_ssim2 * weight;
           cpi->summed_weights += weight;
 
@@ -4140,8 +3874,8 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
               &cpi->metrics, 1);
 
           const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
-          double consistency = vpx_sse_to_psnr(samples, peak,
-                                             (double)cpi->total_inconsistency);
+          double consistency =
+              vpx_sse_to_psnr(samples, peak, (double)cpi->total_inconsistency);
           if (consistency > 0.0)
             cpi->worst_consistency =
                 VPXMIN(cpi->worst_consistency, consistency);
@@ -4156,8 +3890,8 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
           frame_all = vpx_highbd_calc_ssimg(cpi->Source, cm->frame_to_show, &y,
                                             &u, &v, (int)cm->bit_depth);
         } else {
-          frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u,
-                                     &v);
+          frame_all =
+              vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v);
         }
 #else
         frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v);
@@ -4169,8 +3903,8 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
 #endif
       {
         double y, u, v, frame_all;
-        frame_all = vpx_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u,
-                                      &v);
+        frame_all =
+            vpx_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u, &v);
         adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
         /* TODO(JBB): add 10/12 bit support */
       }
@@ -4212,13 +3946,12 @@ int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
   }
 }
 
-int vp10_set_internal_size(VP10_COMP *cpi,
-                          VPX_SCALING horiz_mode, VPX_SCALING vert_mode) {
+int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+                           VPX_SCALING vert_mode) {
   VP10_COMMON *cm = &cpi->common;
   int hr = 0, hs = 0, vr = 0, vs = 0;
 
-  if (horiz_mode > ONETWO || vert_mode > ONETWO)
-    return -1;
+  if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1;
 
   Scale2Ratio(horiz_mode, &hr, &hs);
   Scale2Ratio(vert_mode, &vr, &vs);
@@ -4235,7 +3968,7 @@ int vp10_set_internal_size(VP10_COMP *cpi,
 }
 
 int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
-                         unsigned int height) {
+                          unsigned int height) {
   VP10_COMMON *cm = &cpi->common;
 #if CONFIG_VPX_HIGHBITDEPTH
   check_initial_width(cpi, cm->use_highbitdepth, 1, 1);
@@ -4267,7 +4000,7 @@ int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
 }
 
 int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
-                      const YV12_BUFFER_CONFIG *b) {
+                       const YV12_BUFFER_CONFIG *b) {
   assert(a->y_crop_width == b->y_crop_width);
   assert(a->y_crop_height == b->y_crop_height);
 
@@ -4277,7 +4010,7 @@ int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
 
 #if CONFIG_VPX_HIGHBITDEPTH
 int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
-                             const YV12_BUFFER_CONFIG *b) {
+                              const YV12_BUFFER_CONFIG *b) {
   assert(a->y_crop_width == b->y_crop_width);
   assert(a->y_crop_height == b->y_crop_height);
   assert((a->flags & YV12_FLAG_HIGHBITDEPTH) != 0);
@@ -4288,40 +4021,32 @@ int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-int vp10_get_quantizer(VP10_COMP *cpi) {
-  return cpi->common.base_qindex;
-}
+int vp10_get_quantizer(VP10_COMP *cpi) { return cpi->common.base_qindex; }
 
 void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags) {
-  if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF |
-               VP8_EFLAG_NO_REF_ARF)) {
+  if (flags &
+      (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
     int ref = 7;
 
-    if (flags & VP8_EFLAG_NO_REF_LAST)
-      ref ^= VPX_LAST_FLAG;
+    if (flags & VP8_EFLAG_NO_REF_LAST) ref ^= VPX_LAST_FLAG;
 
-    if (flags & VP8_EFLAG_NO_REF_GF)
-      ref ^= VPX_GOLD_FLAG;
+    if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VPX_GOLD_FLAG;
 
-    if (flags & VP8_EFLAG_NO_REF_ARF)
-      ref ^= VPX_ALT_FLAG;
+    if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VPX_ALT_FLAG;
 
     vp10_use_as_reference(cpi, ref);
   }
 
-  if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
-               VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF |
-               VP8_EFLAG_FORCE_ARF)) {
+  if (flags &
+      (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+       VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) {
     int upd = 7;
 
-    if (flags & VP8_EFLAG_NO_UPD_LAST)
-      upd ^= VPX_LAST_FLAG;
+    if (flags & VP8_EFLAG_NO_UPD_LAST) upd ^= VPX_LAST_FLAG;
 
-    if (flags & VP8_EFLAG_NO_UPD_GF)
-      upd ^= VPX_GOLD_FLAG;
+    if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VPX_GOLD_FLAG;
 
-    if (flags & VP8_EFLAG_NO_UPD_ARF)
-      upd ^= VPX_ALT_FLAG;
+    if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VPX_ALT_FLAG;
 
     vp10_update_reference(cpi, upd);
   }
diff --git a/vp10/encoder/encoder.h b/vp10/encoder/encoder.h
index 87219edabbd09da05e7f4eb7a15a1a34d49c0d11..3547cbae3e77893925e367a2a7552cc17a537a79 100644
--- a/vp10/encoder/encoder.h
+++ b/vp10/encoder/encoder.h
@@ -64,7 +64,6 @@ typedef struct {
   FRAME_CONTEXT fc;
 } CODING_CONTEXT;
 
-
 typedef enum {
   // encode_breakout is disabled.
   ENCODE_BREAKOUT_DISABLED = 0,
@@ -75,10 +74,10 @@ typedef enum {
 } ENCODE_BREAKOUT_TYPE;
 
 typedef enum {
-  NORMAL      = 0,
-  FOURFIVE    = 1,
-  THREEFIVE   = 2,
-  ONETWO      = 3
+  NORMAL = 0,
+  FOURFIVE = 1,
+  THREEFIVE = 2,
+  ONETWO = 3
 } VPX_SCALING;
 
 typedef enum {
@@ -98,7 +97,7 @@ typedef enum {
 } MODE;
 
 typedef enum {
-  FRAMEFLAGS_KEY    = 1 << 0,
+  FRAMEFLAGS_KEY = 1 << 0,
   FRAMEFLAGS_GOLDEN = 1 << 1,
   FRAMEFLAGS_ALTREF = 1 << 2,
 } FRAMETYPE_FLAGS;
@@ -120,14 +119,14 @@ typedef enum {
 typedef struct VP10EncoderConfig {
   BITSTREAM_PROFILE profile;
   vpx_bit_depth_t bit_depth;     // Codec bit-depth.
-  int width;  // width of data passed to the compressor
-  int height;  // height of data passed to the compressor
+  int width;                     // width of data passed to the compressor
+  int height;                    // height of data passed to the compressor
   unsigned int input_bit_depth;  // Input bit depth.
-  double init_framerate;  // set to passed in framerate
-  int64_t target_bandwidth;  // bandwidth to be used in kilobits per second
+  double init_framerate;         // set to passed in framerate
+  int64_t target_bandwidth;      // bandwidth to be used in kilobits per second
 
   int noise_sensitivity;  // pre processing blur: recommendation 0
-  int sharpness;  // sharpening output: recommendation 0:
+  int sharpness;          // sharpening output: recommendation 0:
   int speed;
   // maximum allowed bitrate for any intra frame in % of bitrate target.
   unsigned int rc_max_intra_bitrate_pct;
@@ -179,7 +178,7 @@ typedef struct VP10EncoderConfig {
   int frame_periodic_boost;
 
   // two pass datarate control
-  int two_pass_vbrbias;        // two pass datarate control tweaks
+  int two_pass_vbrbias;  // two pass datarate control tweaks
   int two_pass_vbrmin_section;
   int two_pass_vbrmax_section;
   // END DATARATE CONTROL OPTIONS
@@ -267,15 +266,10 @@ typedef struct ActiveMap {
   unsigned char *map;
 } ActiveMap;
 
-typedef enum {
-  Y,
-  U,
-  V,
-  ALL
-} STAT_TYPE;
+typedef enum { Y, U, V, ALL } STAT_TYPE;
 
 typedef struct IMAGE_STAT {
-  double stat[ALL+1];
+  double stat[ALL + 1];
   double worst;
 } ImageStat;
 
@@ -287,8 +281,8 @@ typedef struct VP10_COMP {
   DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);
   VP10_COMMON common;
   VP10EncoderConfig oxcf;
-  struct lookahead_ctx    *lookahead;
-  struct lookahead_entry  *alt_ref_source;
+  struct lookahead_ctx *lookahead;
+  struct lookahead_entry *alt_ref_source;
 
   YV12_BUFFER_CONFIG *Source;
   YV12_BUFFER_CONFIG *Last_Source;  // NULL for first frame and alt_ref frames
@@ -346,11 +340,11 @@ typedef struct VP10_COMP {
 
   int interp_filter_selected[MAX_REF_FRAMES][SWITCHABLE];
 
-  struct vpx_codec_pkt_list  *output_pkt_list;
+  struct vpx_codec_pkt_list *output_pkt_list;
 
   MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
-  int mbgraph_n_frames;             // number of frames filled in the above
-  int static_mb_pct;                // % forced skip mbs by segmentation
+  int mbgraph_n_frames;  // number of frames filled in the above
+  int static_mb_pct;     // % forced skip mbs by segmentation
   int ref_frame_flags;
 
   SPEED_FEATURES sf;
@@ -370,7 +364,7 @@ typedef struct VP10_COMP {
   unsigned char *segmentation_map;
 
   // segment threashold for encode breakout
-  int  segment_encode_breakout[MAX_SEGMENTS];
+  int segment_encode_breakout[MAX_SEGMENTS];
 
   CYCLIC_REFRESH *cyclic_refresh;
   ActiveMap active_map;
@@ -392,11 +386,10 @@ typedef struct VP10_COMP {
 
   YV12_BUFFER_CONFIG alt_ref_buffer;
 
-
 #if CONFIG_INTERNAL_STATS
   unsigned int mode_chosen_counts[MAX_MODES];
 
-  int    count;
+  int count;
   uint64_t total_sq_error;
   uint64_t total_samples;
   ImageStat psnr;
@@ -408,7 +401,7 @@ typedef struct VP10_COMP {
   double total_blockiness;
   double worst_blockiness;
 
-  int    bytes;
+  int bytes;
   double summed_quality;
   double summed_weights;
   double summedp_quality;
@@ -491,20 +484,20 @@ typedef struct VP10_COMP {
 void vp10_initialize_enc(void);
 
 struct VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
-                                       BufferPool *const pool);
+                                         BufferPool *const pool);
 void vp10_remove_compressor(VP10_COMP *cpi);
 
 void vp10_change_config(VP10_COMP *cpi, const VP10EncoderConfig *oxcf);
 
-  // receive a frames worth of data. caller can assume that a copy of this
-  // frame is made and not just a copy of the pointer..
+// receive a frames worth of data. caller can assume that a copy of this
+// frame is made and not just a copy of the pointer..
 int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
-                          YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
-                          int64_t end_time_stamp);
+                           YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+                           int64_t end_time_stamp);
 
 int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
-                            size_t *size, uint8_t *dest,
-                            int64_t *time_stamp, int64_t *time_end, int flush);
+                             size_t *size, uint8_t *dest, int64_t *time_stamp,
+                             int64_t *time_end, int flush);
 
 int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest);
 
@@ -513,10 +506,10 @@ int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags);
 void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags);
 
 int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
-                           YV12_BUFFER_CONFIG *sd);
+                            YV12_BUFFER_CONFIG *sd);
 
 int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
-                          YV12_BUFFER_CONFIG *sd);
+                           YV12_BUFFER_CONFIG *sd);
 
 int vp10_update_entropy(VP10_COMP *cpi, int update);
 
@@ -524,17 +517,16 @@ int vp10_set_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
 
 int vp10_get_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
 
-int vp10_set_internal_size(VP10_COMP *cpi,
-                          VPX_SCALING horiz_mode, VPX_SCALING vert_mode);
+int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+                           VPX_SCALING vert_mode);
 
 int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
-                         unsigned int height);
+                          unsigned int height);
 
 int vp10_get_quantizer(struct VP10_COMP *cpi);
 
 static INLINE int frame_is_kf_gf_arf(const VP10_COMP *cpi) {
-  return frame_is_intra_only(&cpi->common) ||
-         cpi->refresh_alt_ref_frame ||
+  return frame_is_intra_only(&cpi->common) || cpi->refresh_alt_ref_frame ||
          (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref);
 }
 
@@ -560,8 +552,8 @@ static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
     VP10_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
   VP10_COMMON *const cm = &cpi->common;
   const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
-  return
-      buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf : NULL;
+  return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
+                                : NULL;
 }
 
 static INLINE int get_token_alloc(int mb_rows, int mb_cols) {
@@ -582,10 +574,11 @@ static INLINE int allocated_tokens(TileInfo tile) {
   return get_token_alloc(tile_mb_rows, tile_mb_cols);
 }
 
-int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
+int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
+                       const YV12_BUFFER_CONFIG *b);
 #if CONFIG_VPX_HIGHBITDEPTH
 int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
-                             const YV12_BUFFER_CONFIG *b);
+                              const YV12_BUFFER_CONFIG *b);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 void vp10_alloc_compressor_data(VP10_COMP *cpi);
@@ -597,12 +590,12 @@ void vp10_update_reference_frames(VP10_COMP *cpi);
 void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv);
 
 YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
-                                               YV12_BUFFER_CONFIG *unscaled,
-                                               YV12_BUFFER_CONFIG *scaled);
+                                                YV12_BUFFER_CONFIG *unscaled,
+                                                YV12_BUFFER_CONFIG *scaled);
 
 YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
-                                          YV12_BUFFER_CONFIG *unscaled,
-                                          YV12_BUFFER_CONFIG *scaled);
+                                           YV12_BUFFER_CONFIG *unscaled,
+                                           YV12_BUFFER_CONFIG *scaled);
 
 void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags);
 
@@ -614,10 +607,10 @@ static INLINE int is_altref_enabled(const VP10_COMP *const cpi) {
 static INLINE void set_ref_ptrs(VP10_COMMON *cm, MACROBLOCKD *xd,
                                 MV_REFERENCE_FRAME ref0,
                                 MV_REFERENCE_FRAME ref1) {
-  xd->block_refs[0] = &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME
-                                                         : 0];
-  xd->block_refs[1] = &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME
-                                                         : 0];
+  xd->block_refs[0] =
+      &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME : 0];
+  xd->block_refs[1] =
+      &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME : 0];
 }
 
 static INLINE int get_chessboard_index(const int frame_index) {
diff --git a/vp10/encoder/ethread.c b/vp10/encoder/ethread.c
index ad47ccf0430c17fae25e1e4875f3ee09cf0dc127..21f9d71d837beb00c15ae88f08fdf8668e6573e4 100644
--- a/vp10/encoder/ethread.c
+++ b/vp10/encoder/ethread.c
@@ -31,7 +31,6 @@ static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
               td->rd_counts.coef_counts[i][j][k][l][m][n] +=
                   td_t->rd_counts.coef_counts[i][j][k][l][m][n];
 
-
   // Counts of all motion searches and exhuastive mesh searches.
   td->rd_counts.m_search_count += td_t->rd_counts.m_search_count;
   td->rd_counts.ex_search_count += td_t->rd_counts.ex_search_count;
@@ -44,10 +43,10 @@ static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) {
   const int tile_rows = 1 << cm->log2_tile_rows;
   int t;
 
-  (void) unused;
+  (void)unused;
 
   for (t = thread_data->start; t < tile_rows * tile_cols;
-      t += cpi->num_workers) {
+       t += cpi->num_workers) {
     int tile_row = t / tile_cols;
     int tile_col = t % tile_cols;
 
@@ -74,8 +73,7 @@ void vp10_encode_tiles_mt(VP10_COMP *cpi) {
                     vpx_malloc(allocated_workers * sizeof(*cpi->workers)));
 
     CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
-                    vpx_calloc(allocated_workers,
-                    sizeof(*cpi->tile_thr_data)));
+                    vpx_calloc(allocated_workers, sizeof(*cpi->tile_thr_data)));
 
     for (i = 0; i < allocated_workers; i++) {
       VPxWorker *const worker = &cpi->workers[i];
@@ -122,7 +120,7 @@ void vp10_encode_tiles_mt(VP10_COMP *cpi) {
     worker->hook = (VPxWorkerHook)enc_worker_hook;
     worker->data1 = &cpi->tile_thr_data[i];
     worker->data2 = NULL;
-    thread_data = (EncWorkerData*)worker->data1;
+    thread_data = (EncWorkerData *)worker->data1;
 
     // Before encoding a frame, copy the thread data from cpi.
     if (thread_data->td != &cpi->td) {
@@ -138,7 +136,7 @@ void vp10_encode_tiles_mt(VP10_COMP *cpi) {
   // Encode a frame
   for (i = 0; i < num_workers; i++) {
     VPxWorker *const worker = &cpi->workers[i];
-    EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+    EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
 
     // Set the starting tile for each thread.
     thread_data->start = i;
@@ -157,7 +155,7 @@ void vp10_encode_tiles_mt(VP10_COMP *cpi) {
 
   for (i = 0; i < num_workers; i++) {
     VPxWorker *const worker = &cpi->workers[i];
-    EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+    EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
 
     // Accumulate counters.
     if (i < cpi->num_workers - 1) {
diff --git a/vp10/encoder/extend.c b/vp10/encoder/extend.c
index b6d32c68f7a0380002b41843869cb3198e7c44b4..3b0c20dcce4f3bdc38991324b5d835d7988a473e 100644
--- a/vp10/encoder/extend.c
+++ b/vp10/encoder/extend.c
@@ -16,8 +16,7 @@
 #include "vp10/encoder/extend.h"
 
 static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
-                                  uint8_t *dst, int dst_pitch,
-                                  int w, int h,
+                                  uint8_t *dst, int dst_pitch, int w, int h,
                                   int extend_top, int extend_left,
                                   int extend_bottom, int extend_right) {
   int i, linesize;
@@ -43,7 +42,7 @@ static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
   src_ptr1 = dst - extend_left;
   src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
   dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
-  dst_ptr2 = dst + dst_pitch * (h) - extend_left;
+  dst_ptr2 = dst + dst_pitch * (h)-extend_left;
   linesize = extend_left + extend_right + w;
 
   for (i = 0; i < extend_top; i++) {
@@ -59,9 +58,8 @@ static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
-                                         uint8_t *dst8, int dst_pitch,
-                                         int w, int h,
-                                         int extend_top, int extend_left,
+                                         uint8_t *dst8, int dst_pitch, int w,
+                                         int h, int extend_top, int extend_left,
                                          int extend_bottom, int extend_right) {
   int i, linesize;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
@@ -88,7 +86,7 @@ static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
   src_ptr1 = dst - extend_left;
   src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
   dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
-  dst_ptr2 = dst + dst_pitch * (h) - extend_left;
+  dst_ptr2 = dst + dst_pitch * (h)-extend_left;
   linesize = extend_left + extend_right + w;
 
   for (i = 0; i < extend_top; i++) {
@@ -104,7 +102,7 @@ static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
-                               YV12_BUFFER_CONFIG *dst) {
+                                YV12_BUFFER_CONFIG *dst) {
   // Extend src frame in buffer
   // Altref filtering assumes 16 pixel extension
   const int et_y = 16;
@@ -127,51 +125,46 @@ void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
-    highbd_copy_and_extend_plane(src->y_buffer, src->y_stride,
-                                 dst->y_buffer, dst->y_stride,
-                                 src->y_crop_width, src->y_crop_height,
-                                 et_y, el_y, eb_y, er_y);
-
-    highbd_copy_and_extend_plane(src->u_buffer, src->uv_stride,
-                                 dst->u_buffer, dst->uv_stride,
-                                 src->uv_crop_width, src->uv_crop_height,
-                                 et_uv, el_uv, eb_uv, er_uv);
-
-    highbd_copy_and_extend_plane(src->v_buffer, src->uv_stride,
-                                 dst->v_buffer, dst->uv_stride,
-                                 src->uv_crop_width, src->uv_crop_height,
-                                 et_uv, el_uv, eb_uv, er_uv);
+    highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
+                                 dst->y_stride, src->y_crop_width,
+                                 src->y_crop_height, et_y, el_y, eb_y, er_y);
+
+    highbd_copy_and_extend_plane(
+        src->u_buffer, src->uv_stride, dst->u_buffer, dst->uv_stride,
+        src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
+
+    highbd_copy_and_extend_plane(
+        src->v_buffer, src->uv_stride, dst->v_buffer, dst->uv_stride,
+        src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
     return;
   }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-  copy_and_extend_plane(src->y_buffer, src->y_stride,
-                        dst->y_buffer, dst->y_stride,
-                        src->y_crop_width, src->y_crop_height,
+  copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
+                        dst->y_stride, src->y_crop_width, src->y_crop_height,
                         et_y, el_y, eb_y, er_y);
 
-  copy_and_extend_plane(src->u_buffer, src->uv_stride,
-                        dst->u_buffer, dst->uv_stride,
-                        src->uv_crop_width, src->uv_crop_height,
+  copy_and_extend_plane(src->u_buffer, src->uv_stride, dst->u_buffer,
+                        dst->uv_stride, src->uv_crop_width, src->uv_crop_height,
                         et_uv, el_uv, eb_uv, er_uv);
 
-  copy_and_extend_plane(src->v_buffer, src->uv_stride,
-                        dst->v_buffer, dst->uv_stride,
-                        src->uv_crop_width, src->uv_crop_height,
+  copy_and_extend_plane(src->v_buffer, src->uv_stride, dst->v_buffer,
+                        dst->uv_stride, src->uv_crop_width, src->uv_crop_height,
                         et_uv, el_uv, eb_uv, er_uv);
 }
 
 void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
-                                         YV12_BUFFER_CONFIG *dst,
-                                         int srcy, int srcx,
-                                         int srch, int srcw) {
+                                          YV12_BUFFER_CONFIG *dst, int srcy,
+                                          int srcx, int srch, int srcw) {
   // If the side is not touching the bounder then don't extend.
   const int et_y = srcy ? 0 : dst->border;
   const int el_y = srcx ? 0 : dst->border;
-  const int eb_y = srcy + srch != src->y_height ? 0 :
-                      dst->border + dst->y_height - src->y_height;
-  const int er_y = srcx + srcw != src->y_width ? 0 :
-                      dst->border + dst->y_width - src->y_width;
+  const int eb_y = srcy + srch != src->y_height
+                       ? 0
+                       : dst->border + dst->y_height - src->y_height;
+  const int er_y = srcx + srcw != src->y_width
+                       ? 0
+                       : dst->border + dst->y_width - src->y_width;
   const int src_y_offset = srcy * src->y_stride + srcx;
   const int dst_y_offset = srcy * dst->y_stride + srcx;
 
@@ -185,17 +178,14 @@ void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
   const int srcw_uv = ROUND_POWER_OF_TWO(srcw, 1);
 
   copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride,
-                        dst->y_buffer + dst_y_offset, dst->y_stride,
-                        srcw, srch,
+                        dst->y_buffer + dst_y_offset, dst->y_stride, srcw, srch,
                         et_y, el_y, eb_y, er_y);
 
   copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride,
-                        dst->u_buffer + dst_uv_offset, dst->uv_stride,
-                        srcw_uv, srch_uv,
-                        et_uv, el_uv, eb_uv, er_uv);
+                        dst->u_buffer + dst_uv_offset, dst->uv_stride, srcw_uv,
+                        srch_uv, et_uv, el_uv, eb_uv, er_uv);
 
   copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride,
-                        dst->v_buffer + dst_uv_offset, dst->uv_stride,
-                        srcw_uv, srch_uv,
-                        et_uv, el_uv, eb_uv, er_uv);
+                        dst->v_buffer + dst_uv_offset, dst->uv_stride, srcw_uv,
+                        srch_uv, et_uv, el_uv, eb_uv, er_uv);
 }
diff --git a/vp10/encoder/extend.h b/vp10/encoder/extend.h
index 6f502ef6a0d44474d5438eeb3a1269658df63bda..f7967fde98011dc72db4f279a9a61d890475d3a0 100644
--- a/vp10/encoder/extend.h
+++ b/vp10/encoder/extend.h
@@ -18,14 +18,12 @@
 extern "C" {
 #endif
 
-
 void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
-                               YV12_BUFFER_CONFIG *dst);
+                                YV12_BUFFER_CONFIG *dst);
 
 void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
-                                         YV12_BUFFER_CONFIG *dst,
-                                         int srcy, int srcx,
-                                         int srch, int srcw);
+                                          YV12_BUFFER_CONFIG *dst, int srcy,
+                                          int srcx, int srch, int srcw);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/vp10/encoder/firstpass.c b/vp10/encoder/firstpass.c
index 450c939e3c794e7de592f1d580d573f56918ff44..b5c728c97843b697e84f8d77f00d595f77256417 100644
--- a/vp10/encoder/firstpass.c
+++ b/vp10/encoder/firstpass.c
@@ -38,35 +38,34 @@
 #include "vp10/encoder/rd.h"
 #include "vpx_dsp/variance.h"
 
-#define OUTPUT_FPF          0
-#define ARF_STATS_OUTPUT    0
+#define OUTPUT_FPF 0
+#define ARF_STATS_OUTPUT 0
 
 #define GROUP_ADAPTIVE_MAXQ 1
 
-#define BOOST_BREAKOUT      12.5
-#define BOOST_FACTOR        12.5
-#define ERR_DIVISOR         128.0
-#define FACTOR_PT_LOW       0.70
-#define FACTOR_PT_HIGH      0.90
-#define FIRST_PASS_Q        10.0
-#define GF_MAX_BOOST        96.0
-#define INTRA_MODE_PENALTY  1024
-#define KF_MAX_BOOST        128.0
-#define MIN_ARF_GF_BOOST    240
-#define MIN_DECAY_FACTOR    0.01
-#define MIN_KF_BOOST        300
+#define BOOST_BREAKOUT 12.5
+#define BOOST_FACTOR 12.5
+#define ERR_DIVISOR 128.0
+#define FACTOR_PT_LOW 0.70
+#define FACTOR_PT_HIGH 0.90
+#define FIRST_PASS_Q 10.0
+#define GF_MAX_BOOST 96.0
+#define INTRA_MODE_PENALTY 1024
+#define KF_MAX_BOOST 128.0
+#define MIN_ARF_GF_BOOST 240
+#define MIN_DECAY_FACTOR 0.01
+#define MIN_KF_BOOST 300
 #define NEW_MV_MODE_PENALTY 32
-#define DARK_THRESH         64
-#define DEFAULT_GRP_WEIGHT  1.0
-#define RC_FACTOR_MIN       0.75
-#define RC_FACTOR_MAX       1.75
-
+#define DARK_THRESH 64
+#define DEFAULT_GRP_WEIGHT 1.0
+#define RC_FACTOR_MIN 0.75
+#define RC_FACTOR_MAX 1.75
 
 #define NCOUNT_INTRA_THRESH 8192
 #define NCOUNT_INTRA_FACTOR 3
 #define NCOUNT_FRAME_II_THRESH 5.0
 
-#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001)
+#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x)-0.000001 : (x) + 0.000001)
 
 #if ARF_STATS_OUTPUT
 unsigned int arf_count = 0;
@@ -74,8 +73,7 @@ unsigned int arf_count = 0;
 
 // Resets the first pass file to the given position using a relative seek from
 // the current position.
-static void reset_fpf_position(TWO_PASS *p,
-                               const FIRSTPASS_STATS *position) {
+static void reset_fpf_position(TWO_PASS *p, const FIRSTPASS_STATS *position) {
   p->stats_in = position;
 }
 
@@ -90,8 +88,7 @@ static const FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p, int offset) {
 }
 
 static int input_stats(TWO_PASS *p, FIRSTPASS_STATS *fps) {
-  if (p->stats_in >= p->stats_in_end)
-    return EOF;
+  if (p->stats_in >= p->stats_in_end) return EOF;
 
   *fps = *p->stats_in;
   ++p->stats_in;
@@ -112,39 +109,24 @@ static void output_stats(FIRSTPASS_STATS *stats,
     FILE *fpfile;
     fpfile = fopen("firstpass.stt", "a");
 
-    fprintf(fpfile, "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf %12.4lf"
+    fprintf(fpfile,
+            "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf %12.4lf"
             "%12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf"
             "%12.4lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf\n",
-            stats->frame,
-            stats->weight,
-            stats->intra_error,
-            stats->coded_error,
-            stats->sr_coded_error,
-            stats->pcnt_inter,
-            stats->pcnt_motion,
-            stats->pcnt_second_ref,
-            stats->pcnt_neutral,
-            stats->intra_skip_pct,
-            stats->inactive_zone_rows,
-            stats->inactive_zone_cols,
-            stats->MVr,
-            stats->mvr_abs,
-            stats->MVc,
-            stats->mvc_abs,
-            stats->MVrv,
-            stats->MVcv,
-            stats->mv_in_out_count,
-            stats->new_mv_count,
-            stats->count,
-            stats->duration);
+            stats->frame, stats->weight, stats->intra_error, stats->coded_error,
+            stats->sr_coded_error, stats->pcnt_inter, stats->pcnt_motion,
+            stats->pcnt_second_ref, stats->pcnt_neutral, stats->intra_skip_pct,
+            stats->inactive_zone_rows, stats->inactive_zone_cols, stats->MVr,
+            stats->mvr_abs, stats->MVc, stats->mvc_abs, stats->MVrv,
+            stats->MVcv, stats->mv_in_out_count, stats->new_mv_count,
+            stats->count, stats->duration);
     fclose(fpfile);
   }
 #endif
 }
 
 #if CONFIG_FP_MB_STATS
-static void output_fpmb_stats(uint8_t *this_frame_mb_stats,
-                              VP10_COMMON *cm,
+static void output_fpmb_stats(uint8_t *this_frame_mb_stats, VP10_COMMON *cm,
                               struct vpx_codec_pkt_list *pktlist) {
   struct vpx_codec_cx_pkt pkt;
   pkt.kind = VPX_CODEC_FPMB_STATS_PKT;
@@ -160,23 +142,23 @@ static void zero_stats(FIRSTPASS_STATS *section) {
   section->intra_error = 0.0;
   section->coded_error = 0.0;
   section->sr_coded_error = 0.0;
-  section->pcnt_inter  = 0.0;
-  section->pcnt_motion  = 0.0;
+  section->pcnt_inter = 0.0;
+  section->pcnt_motion = 0.0;
   section->pcnt_second_ref = 0.0;
   section->pcnt_neutral = 0.0;
   section->intra_skip_pct = 0.0;
   section->inactive_zone_rows = 0.0;
   section->inactive_zone_cols = 0.0;
   section->MVr = 0.0;
-  section->mvr_abs     = 0.0;
-  section->MVc        = 0.0;
-  section->mvc_abs     = 0.0;
-  section->MVrv       = 0.0;
-  section->MVcv       = 0.0;
-  section->mv_in_out_count  = 0.0;
+  section->mvr_abs = 0.0;
+  section->MVc = 0.0;
+  section->mvc_abs = 0.0;
+  section->MVrv = 0.0;
+  section->MVcv = 0.0;
+  section->mv_in_out_count = 0.0;
   section->new_mv_count = 0.0;
-  section->count      = 0.0;
-  section->duration   = 1.0;
+  section->count = 0.0;
+  section->duration = 1.0;
 }
 
 static void accumulate_stats(FIRSTPASS_STATS *section,
@@ -186,7 +168,7 @@ static void accumulate_stats(FIRSTPASS_STATS *section,
   section->intra_error += frame->intra_error;
   section->coded_error += frame->coded_error;
   section->sr_coded_error += frame->sr_coded_error;
-  section->pcnt_inter  += frame->pcnt_inter;
+  section->pcnt_inter += frame->pcnt_inter;
   section->pcnt_motion += frame->pcnt_motion;
   section->pcnt_second_ref += frame->pcnt_second_ref;
   section->pcnt_neutral += frame->pcnt_neutral;
@@ -194,15 +176,15 @@ static void accumulate_stats(FIRSTPASS_STATS *section,
   section->inactive_zone_rows += frame->inactive_zone_rows;
   section->inactive_zone_cols += frame->inactive_zone_cols;
   section->MVr += frame->MVr;
-  section->mvr_abs     += frame->mvr_abs;
-  section->MVc        += frame->MVc;
-  section->mvc_abs     += frame->mvc_abs;
-  section->MVrv       += frame->MVrv;
-  section->MVcv       += frame->MVcv;
-  section->mv_in_out_count  += frame->mv_in_out_count;
+  section->mvr_abs += frame->mvr_abs;
+  section->MVc += frame->MVc;
+  section->mvc_abs += frame->mvc_abs;
+  section->MVrv += frame->MVrv;
+  section->MVcv += frame->MVcv;
+  section->mv_in_out_count += frame->mv_in_out_count;
   section->new_mv_count += frame->new_mv_count;
-  section->count      += frame->count;
-  section->duration   += frame->duration;
+  section->count += frame->count;
+  section->duration += frame->duration;
 }
 
 static void subtract_stats(FIRSTPASS_STATS *section,
@@ -212,7 +194,7 @@ static void subtract_stats(FIRSTPASS_STATS *section,
   section->intra_error -= frame->intra_error;
   section->coded_error -= frame->coded_error;
   section->sr_coded_error -= frame->sr_coded_error;
-  section->pcnt_inter  -= frame->pcnt_inter;
+  section->pcnt_inter -= frame->pcnt_inter;
   section->pcnt_motion -= frame->pcnt_motion;
   section->pcnt_second_ref -= frame->pcnt_second_ref;
   section->pcnt_neutral -= frame->pcnt_neutral;
@@ -220,15 +202,15 @@ static void subtract_stats(FIRSTPASS_STATS *section,
   section->inactive_zone_rows -= frame->inactive_zone_rows;
   section->inactive_zone_cols -= frame->inactive_zone_cols;
   section->MVr -= frame->MVr;
-  section->mvr_abs     -= frame->mvr_abs;
-  section->MVc        -= frame->MVc;
-  section->mvc_abs     -= frame->mvc_abs;
-  section->MVrv       -= frame->MVrv;
-  section->MVcv       -= frame->MVcv;
-  section->mv_in_out_count  -= frame->mv_in_out_count;
+  section->mvr_abs -= frame->mvr_abs;
+  section->MVc -= frame->MVc;
+  section->mvc_abs -= frame->mvc_abs;
+  section->MVrv -= frame->MVrv;
+  section->MVcv -= frame->MVcv;
+  section->mv_in_out_count -= frame->mv_in_out_count;
   section->new_mv_count -= frame->new_mv_count;
-  section->count      -= frame->count;
-  section->duration   -= frame->duration;
+  section->count -= frame->count;
+  section->duration -= frame->duration;
 }
 
 // Calculate an active area of the image that discounts formatting
@@ -236,13 +218,13 @@ static void subtract_stats(FIRSTPASS_STATS *section,
 #define MIN_ACTIVE_AREA 0.5
 #define MAX_ACTIVE_AREA 1.0
 static double calculate_active_area(const VP10_COMP *cpi,
-                                    const FIRSTPASS_STATS *this_frame)
-{
+                                    const FIRSTPASS_STATS *this_frame) {
   double active_pct;
 
-  active_pct = 1.0 -
-    ((this_frame->intra_skip_pct / 2) +
-     ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows));
+  active_pct =
+      1.0 -
+      ((this_frame->intra_skip_pct / 2) +
+       ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows));
   return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA);
 }
 
@@ -257,8 +239,9 @@ static double calculate_modified_err(const VP10_COMP *cpi,
   const double av_weight = stats->weight / stats->count;
   const double av_err = (stats->coded_error * av_weight) / stats->count;
   double modified_error =
-    av_err * pow(this_frame->coded_error * this_frame->weight /
-                 DOUBLE_DIVIDE_CHECK(av_err), oxcf->two_pass_vbrbias / 100.0);
+      av_err * pow(this_frame->coded_error * this_frame->weight /
+                       DOUBLE_DIVIDE_CHECK(av_err),
+                   oxcf->two_pass_vbrbias / 100.0);
 
   // Correction for active area. Frames with a reduced active area
   // (eg due to formatting bars) have a higher error per mb for the
@@ -266,17 +249,18 @@ static double calculate_modified_err(const VP10_COMP *cpi,
   // 0.5N blocks of complexity 2X is a little easier than coding N
   // blocks of complexity X.
   modified_error *=
-    pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION);
+      pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION);
 
-  return fclamp(modified_error,
-                twopass->modified_error_min, twopass->modified_error_max);
+  return fclamp(modified_error, twopass->modified_error_min,
+                twopass->modified_error_max);
 }
 
 // This function returns the maximum target rate per frame.
 static int frame_max_bits(const RATE_CONTROL *rc,
                           const VP10EncoderConfig *oxcf) {
   int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
-                          (int64_t)oxcf->two_pass_vbrmax_section) / 100;
+                      (int64_t)oxcf->two_pass_vbrmax_section) /
+                     100;
   if (max_bits < 0)
     max_bits = 0;
   else if (max_bits > rc->max_frame_bandwidth)
@@ -295,14 +279,10 @@ void vp10_end_first_pass(VP10_COMP *cpi) {
 
 static vpx_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) {
   switch (bsize) {
-    case BLOCK_8X8:
-      return vpx_mse8x8;
-    case BLOCK_16X8:
-      return vpx_mse16x8;
-    case BLOCK_8X16:
-      return vpx_mse8x16;
-    default:
-      return vpx_mse16x16;
+    case BLOCK_8X8: return vpx_mse8x8;
+    case BLOCK_16X8: return vpx_mse16x8;
+    case BLOCK_8X16: return vpx_mse8x16;
+    default: return vpx_mse16x16;
   }
 }
 
@@ -321,38 +301,26 @@ static vpx_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize,
   switch (bd) {
     default:
       switch (bsize) {
-        case BLOCK_8X8:
-          return vpx_highbd_8_mse8x8;
-        case BLOCK_16X8:
-          return vpx_highbd_8_mse16x8;
-        case BLOCK_8X16:
-          return vpx_highbd_8_mse8x16;
-        default:
-          return vpx_highbd_8_mse16x16;
+        case BLOCK_8X8: return vpx_highbd_8_mse8x8;
+        case BLOCK_16X8: return vpx_highbd_8_mse16x8;
+        case BLOCK_8X16: return vpx_highbd_8_mse8x16;
+        default: return vpx_highbd_8_mse16x16;
       }
       break;
     case 10:
       switch (bsize) {
-        case BLOCK_8X8:
-          return vpx_highbd_10_mse8x8;
-        case BLOCK_16X8:
-          return vpx_highbd_10_mse16x8;
-        case BLOCK_8X16:
-          return vpx_highbd_10_mse8x16;
-        default:
-          return vpx_highbd_10_mse16x16;
+        case BLOCK_8X8: return vpx_highbd_10_mse8x8;
+        case BLOCK_16X8: return vpx_highbd_10_mse16x8;
+        case BLOCK_8X16: return vpx_highbd_10_mse8x16;
+        default: return vpx_highbd_10_mse16x16;
       }
       break;
     case 12:
       switch (bsize) {
-        case BLOCK_8X8:
-          return vpx_highbd_12_mse8x8;
-        case BLOCK_16X8:
-          return vpx_highbd_12_mse16x8;
-        case BLOCK_8X16:
-          return vpx_highbd_12_mse8x16;
-        default:
-          return vpx_highbd_12_mse16x16;
+        case BLOCK_8X8: return vpx_highbd_12_mse8x8;
+        case BLOCK_16X8: return vpx_highbd_12_mse16x8;
+        case BLOCK_8X16: return vpx_highbd_12_mse8x16;
+        default: return vpx_highbd_12_mse16x16;
       }
       break;
   }
@@ -375,8 +343,7 @@ static int get_search_range(const VP10_COMP *cpi) {
   int sr = 0;
   const int dim = VPXMIN(cpi->initial_width, cpi->initial_height);
 
-  while ((dim << sr) < MAX_FULL_PEL_VAL)
-    ++sr;
+  while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr;
   return sr;
 }
 
@@ -384,8 +351,8 @@ static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
                                      const MV *ref_mv, MV *best_mv,
                                      int *best_motion_err) {
   MACROBLOCKD *const xd = &x->e_mbd;
-  MV tmp_mv = {0, 0};
-  MV ref_mv_full = {ref_mv->row >> 3, ref_mv->col >> 3};
+  MV tmp_mv = { 0, 0 };
+  MV ref_mv_full = { ref_mv->row >> 3, ref_mv->col >> 3 };
   int num00, tmp_err, n;
   const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
   vpx_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
@@ -407,12 +374,11 @@ static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
 
   // Center the initial step/diamond search on best mv.
   tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
-                                    step_param,
-                                    x->sadperbit16, &num00, &v_fn_ptr, ref_mv);
+                                    step_param, x->sadperbit16, &num00,
+                                    &v_fn_ptr, ref_mv);
   if (tmp_err < INT_MAX)
     tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
-  if (tmp_err < INT_MAX - new_mv_mode_penalty)
-    tmp_err += new_mv_mode_penalty;
+  if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty;
 
   if (tmp_err < *best_motion_err) {
     *best_motion_err = tmp_err;
@@ -430,8 +396,8 @@ static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
       --num00;
     } else {
       tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
-                                        step_param + n, x->sadperbit16,
-                                        &num00, &v_fn_ptr, ref_mv);
+                                        step_param + n, x->sadperbit16, &num00,
+                                        &v_fn_ptr, ref_mv);
       if (tmp_err < INT_MAX)
         tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
       if (tmp_err < INT_MAX - new_mv_mode_penalty)
@@ -447,11 +413,9 @@ static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
 
 static BLOCK_SIZE get_bsize(const VP10_COMMON *cm, int mb_row, int mb_col) {
   if (2 * mb_col + 1 < cm->mi_cols) {
-    return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16
-                                        : BLOCK_16X8;
+    return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 : BLOCK_16X8;
   } else {
-    return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16
-                                        : BLOCK_8X8;
+    return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16 : BLOCK_8X8;
   }
 }
 
@@ -459,11 +423,9 @@ static int find_fp_qindex(vpx_bit_depth_t bit_depth) {
   int i;
 
   for (i = 0; i < QINDEX_RANGE; ++i)
-    if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q)
-      break;
+    if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
 
-  if (i == QINDEX_RANGE)
-    i--;
+  if (i == QINDEX_RANGE) i--;
 
   return i;
 }
@@ -471,8 +433,7 @@ static int find_fp_qindex(vpx_bit_depth_t bit_depth) {
 static void set_first_pass_params(VP10_COMP *cpi) {
   VP10_COMMON *const cm = &cpi->common;
   if (!cpi->refresh_alt_ref_frame &&
-      (cm->current_video_frame == 0 ||
-       (cpi->frame_flags & FRAMEFLAGS_KEY))) {
+      (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) {
     cm->frame_type = KEY_FRAME;
   } else {
     cm->frame_type = INTER_FRAME;
@@ -511,9 +472,9 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
   int image_data_start_row = INVALID_ROW;
   int new_mv_count = 0;
   int sum_in_vectors = 0;
-  MV lastmv = {0, 0};
+  MV lastmv = { 0, 0 };
   TWO_PASS *twopass = &cpi->twopass;
-  const MV zero_mv = {0, 0};
+  const MV zero_mv = { 0, 0 };
   int recon_y_stride, recon_uv_stride, uv_mb_height;
 
   YV12_BUFFER_CONFIG *const lst_yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
@@ -576,7 +537,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
   uv_mb_height = 16 >> (new_yv12->y_height > new_yv12->uv_height);
 
   for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
-    MV best_ref_mv = {0, 0};
+    MV best_ref_mv = { 0, 0 };
 
     // Reset above block coeffs.
     xd->up_available = (mb_row != 0);
@@ -586,8 +547,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
     // Set up limit values for motion vectors to prevent them extending
     // outside the UMV borders.
     x->mv_row_min = -((mb_row * 16) + BORDER_MV_PIXELS_B16);
-    x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
-                    + BORDER_MV_PIXELS_B16;
+    x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + BORDER_MV_PIXELS_B16;
 
     for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
       int this_error;
@@ -608,16 +568,15 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
       xd->left_available = (mb_col != 0);
       xd->mi[0]->mbmi.sb_type = bsize;
       xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
-      set_mi_row_col(xd, &tile,
-                     mb_row << 1, num_8x8_blocks_high_lookup[bsize],
+      set_mi_row_col(xd, &tile, mb_row << 1, num_8x8_blocks_high_lookup[bsize],
                      mb_col << 1, num_8x8_blocks_wide_lookup[bsize],
                      cm->mi_rows, cm->mi_cols);
 
       // Do intra 16x16 prediction.
       xd->mi[0]->mbmi.segment_id = 0;
       xd->mi[0]->mbmi.mode = DC_PRED;
-      xd->mi[0]->mbmi.tx_size = use_dc_pred ?
-         (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
+      xd->mi[0]->mbmi.tx_size =
+          use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
       vp10_encode_intra_block_plane(x, bsize, 0);
       this_error = vpx_get_mb_ss(x->plane[0].src_diff);
 
@@ -635,17 +594,13 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
 #if CONFIG_VPX_HIGHBITDEPTH
       if (cm->use_highbitdepth) {
         switch (cm->bit_depth) {
-          case VPX_BITS_8:
-            break;
-          case VPX_BITS_10:
-            this_error >>= 4;
-            break;
-          case VPX_BITS_12:
-            this_error >>= 8;
-            break;
+          case VPX_BITS_8: break;
+          case VPX_BITS_10: this_error >>= 4; break;
+          case VPX_BITS_12: this_error >>= 8; break;
           default:
-            assert(0 && "cm->bit_depth should be VPX_BITS_8, "
-                        "VPX_BITS_10 or VPX_BITS_12");
+            assert(0 &&
+                   "cm->bit_depth should be VPX_BITS_8, "
+                   "VPX_BITS_10 or VPX_BITS_12");
             return;
         }
       }
@@ -699,7 +654,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
       if (cm->current_video_frame > 0) {
         int tmp_err, motion_error, raw_motion_error;
         // Assume 0,0 motion with no mv overhead.
-        MV mv = {0, 0} , tmp_mv = {0, 0};
+        MV mv = { 0, 0 }, tmp_mv = { 0, 0 };
         struct buf_2d unscaled_last_source_buf_2d;
 
         xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
@@ -708,12 +663,12 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
           motion_error = highbd_get_prediction_error(
               bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
         } else {
-          motion_error = get_prediction_error(
-              bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+          motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                              &xd->plane[0].pre[0]);
         }
 #else
-        motion_error = get_prediction_error(
-            bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+        motion_error =
+            get_prediction_error(bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
         // Compute the motion error of the 0,0 motion using the last source
@@ -728,12 +683,12 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
           raw_motion_error = highbd_get_prediction_error(
               bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd);
         } else {
-          raw_motion_error = get_prediction_error(
-              bsize, &x->plane[0].src, &unscaled_last_source_buf_2d);
+          raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                                  &unscaled_last_source_buf_2d);
         }
 #else
-        raw_motion_error = get_prediction_error(
-            bsize, &x->plane[0].src, &unscaled_last_source_buf_2d);
+        raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                                &unscaled_last_source_buf_2d);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
         // TODO(pengchong): Replace the hard-coded threshold
@@ -765,12 +720,12 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
               gf_motion_error = highbd_get_prediction_error(
                   bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
             } else {
-              gf_motion_error = get_prediction_error(
-                  bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+              gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                                     &xd->plane[0].pre[0]);
             }
 #else
-            gf_motion_error = get_prediction_error(
-                bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+            gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                                   &xd->plane[0].pre[0]);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
             first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv,
@@ -826,12 +781,12 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
           if (((this_error - intrapenalty) * 9 <= motion_error * 10) &&
               (this_error < (2 * intrapenalty))) {
             neutral_count += 1.0;
-          // Also track cases where the intra is not much worse than the inter
-          // and use this in limiting the GF/arf group length.
+            // Also track cases where the intra is not much worse than the inter
+            // and use this in limiting the GF/arf group length.
           } else if ((this_error > NCOUNT_INTRA_THRESH) &&
                      (this_error < (NCOUNT_INTRA_FACTOR * motion_error))) {
-            neutral_count += (double)motion_error /
-                             DOUBLE_DIVIDE_CHECK((double)this_error);
+            neutral_count +=
+                (double)motion_error / DOUBLE_DIVIDE_CHECK((double)this_error);
           }
 
           mv.row *= 8;
@@ -901,8 +856,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
 #endif
 
             // Non-zero vector, was it different from the last non zero vector?
-            if (!is_equal_mv(&mv, &lastmv))
-              ++new_mv_count;
+            if (!is_equal_mv(&mv, &lastmv)) ++new_mv_count;
             lastmv = mv;
 
             // Does the row vector point inwards or outwards?
@@ -948,10 +902,10 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
 
     // Adjust to the next row of MBs.
     x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
-    x->plane[1].src.buf += uv_mb_height * x->plane[1].src.stride -
-                           uv_mb_height * cm->mb_cols;
-    x->plane[2].src.buf += uv_mb_height * x->plane[1].src.stride -
-                           uv_mb_height * cm->mb_cols;
+    x->plane[1].src.buf +=
+        uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
+    x->plane[2].src.buf +=
+        uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
 
     vpx_clear_system_state();
   }
@@ -976,7 +930,8 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
     // Initial estimate here uses sqrt(mbs) to define the min_err, where the
     // number of mbs is proportional to the image area.
     const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
-                        ? cpi->initial_mbs : cpi->common.MBs;
+                            ? cpi->initial_mbs
+                            : cpi->common.MBs;
     const double min_err = 200 * sqrt(num_mbs);
 
     intra_factor = intra_factor / (double)num_mbs;
@@ -1000,10 +955,10 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
       fps.mvr_abs = (double)sum_mvr_abs / mvcount;
       fps.MVc = (double)sum_mvc / mvcount;
       fps.mvc_abs = (double)sum_mvc_abs / mvcount;
-      fps.MVrv = ((double)sum_mvrs -
-                  ((double)sum_mvr * sum_mvr / mvcount)) / mvcount;
-      fps.MVcv = ((double)sum_mvcs -
-                  ((double)sum_mvc * sum_mvc / mvcount)) / mvcount;
+      fps.MVrv =
+          ((double)sum_mvrs - ((double)sum_mvr * sum_mvr / mvcount)) / mvcount;
+      fps.MVcv =
+          ((double)sum_mvcs - ((double)sum_mvc * sum_mvc / mvcount)) / mvcount;
       fps.mv_in_out_count = (double)sum_in_vectors / (mvcount * 2);
       fps.new_mv_count = new_mv_count;
       fps.pcnt_motion = (double)mvcount / num_mbs;
@@ -1084,11 +1039,8 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
   ++cm->current_video_frame;
 }
 
-static double calc_correction_factor(double err_per_mb,
-                                     double err_divisor,
-                                     double pt_low,
-                                     double pt_high,
-                                     int q,
+static double calc_correction_factor(double err_per_mb, double err_divisor,
+                                     double pt_low, double pt_high, int q,
                                      vpx_bit_depth_t bit_depth) {
   const double error_term = err_per_mb / err_divisor;
 
@@ -1097,8 +1049,7 @@ static double calc_correction_factor(double err_per_mb,
       VPXMIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
 
   // Calculate correction factor.
-  if (power_term < 1.0)
-    assert(error_term >= 0.0);
+  if (power_term < 1.0) assert(error_term >= 0.0);
 
   return fclamp(pow(error_term, power_term), 0.05, 5.0);
 }
@@ -1122,35 +1073,31 @@ static int get_twopass_worst_quality(const VP10_COMP *cpi,
     return rc->worst_quality;  // Highest value allowed
   } else {
     const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
-                        ? cpi->initial_mbs : cpi->common.MBs;
+                            ? cpi->initial_mbs
+                            : cpi->common.MBs;
     const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
     const double av_err_per_mb = section_err / active_mbs;
     const double speed_term = 1.0 + 0.04 * oxcf->speed;
     const double ediv_size_correction = (double)num_mbs / EDIV_SIZE_FACTOR;
-    const int target_norm_bits_per_mb = ((uint64_t)section_target_bandwidth <<
-                                         BPER_MB_NORMBITS) / active_mbs;
+    const int target_norm_bits_per_mb =
+        ((uint64_t)section_target_bandwidth << BPER_MB_NORMBITS) / active_mbs;
 
     int q;
 
     // Try and pick a max Q that will be high enough to encode the
     // content at the given rate.
     for (q = rc->best_quality; q < rc->worst_quality; ++q) {
-      const double factor =
-          calc_correction_factor(av_err_per_mb,
-                                 ERR_DIVISOR - ediv_size_correction,
-                                 FACTOR_PT_LOW, FACTOR_PT_HIGH, q,
-                                 cpi->common.bit_depth);
-      const int bits_per_mb =
-        vp10_rc_bits_per_mb(INTER_FRAME, q,
-                           factor * speed_term * group_weight_factor,
-                           cpi->common.bit_depth);
-      if (bits_per_mb <= target_norm_bits_per_mb)
-        break;
+      const double factor = calc_correction_factor(
+          av_err_per_mb, ERR_DIVISOR - ediv_size_correction, FACTOR_PT_LOW,
+          FACTOR_PT_HIGH, q, cpi->common.bit_depth);
+      const int bits_per_mb = vp10_rc_bits_per_mb(
+          INTER_FRAME, q, factor * speed_term * group_weight_factor,
+          cpi->common.bit_depth);
+      if (bits_per_mb <= target_norm_bits_per_mb) break;
     }
 
     // Restriction on active max q for constrained quality mode.
-    if (cpi->oxcf.rc_mode == VPX_CQ)
-      q = VPXMAX(q, oxcf->cq_level);
+    if (cpi->oxcf.rc_mode == VPX_CQ) q = VPXMAX(q, oxcf->cq_level);
     return q;
   }
 }
@@ -1180,9 +1127,8 @@ void vp10_init_subsampling(VP10_COMP *cpi) {
   setup_rf_level_maxq(cpi);
 }
 
-void vp10_calculate_coded_size(VP10_COMP *cpi,
-                          int *scaled_frame_width,
-                          int *scaled_frame_height) {
+void vp10_calculate_coded_size(VP10_COMP *cpi, int *scaled_frame_width,
+                               int *scaled_frame_height) {
   RATE_CONTROL *const rc = &cpi->rc;
   *scaled_frame_width = rc->frame_width[rc->frame_size_selector];
   *scaled_frame_height = rc->frame_height[rc->frame_size_selector];
@@ -1197,8 +1143,7 @@ void vp10_init_second_pass(VP10_COMP *cpi) {
   zero_stats(&twopass->total_stats);
   zero_stats(&twopass->total_left_stats);
 
-  if (!twopass->stats_in_end)
-    return;
+  if (!twopass->stats_in_end) return;
 
   stats = &twopass->total_stats;
 
@@ -1212,8 +1157,8 @@ void vp10_init_second_pass(VP10_COMP *cpi) {
   // It is calculated based on the actual durations of all frames from the
   // first pass.
   vp10_new_framerate(cpi, frame_rate);
-  twopass->bits_left = (int64_t)(stats->duration * oxcf->target_bandwidth /
-                       10000000.0);
+  twopass->bits_left =
+      (int64_t)(stats->duration * oxcf->target_bandwidth / 10000000.0);
 
   // This variable monitors how far behind the second ref update is lagging.
   twopass->sr_update_lag = 1;
@@ -1221,14 +1166,14 @@ void vp10_init_second_pass(VP10_COMP *cpi) {
   // Scan the first pass file and calculate a modified total error based upon
   // the bias/power function used to allocate bits.
   {
-    const double avg_error = stats->coded_error /
-                             DOUBLE_DIVIDE_CHECK(stats->count);
+    const double avg_error =
+        stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count);
     const FIRSTPASS_STATS *s = twopass->stats_in;
     double modified_error_total = 0.0;
-    twopass->modified_error_min = (avg_error *
-                                      oxcf->two_pass_vbrmin_section) / 100;
-    twopass->modified_error_max = (avg_error *
-                                      oxcf->two_pass_vbrmax_section) / 100;
+    twopass->modified_error_min =
+        (avg_error * oxcf->two_pass_vbrmin_section) / 100;
+    twopass->modified_error_max =
+        (avg_error * oxcf->two_pass_vbrmax_section) / 100;
     while (s < twopass->stats_in_end) {
       modified_error_total += calculate_modified_err(cpi, twopass, oxcf, s);
       ++s;
@@ -1260,15 +1205,14 @@ void vp10_init_second_pass(VP10_COMP *cpi) {
 
 static double get_sr_decay_rate(const VP10_COMP *cpi,
                                 const FIRSTPASS_STATS *frame) {
-  const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
-                      ? cpi->initial_mbs : cpi->common.MBs;
-  double sr_diff =
-      (frame->sr_coded_error - frame->coded_error) / num_mbs;
+  const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
+                                                             : cpi->common.MBs;
+  double sr_diff = (frame->sr_coded_error - frame->coded_error) / num_mbs;
   double sr_decay = 1.0;
   double modified_pct_inter;
   double modified_pcnt_intra;
   const double motion_amplitude_factor =
-    frame->pcnt_motion * ((frame->mvc_abs + frame->mvr_abs) / 2);
+      frame->pcnt_motion * ((frame->mvc_abs + frame->mvr_abs) / 2);
 
   modified_pct_inter = frame->pcnt_inter;
   if ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) <
@@ -1277,7 +1221,6 @@ static double get_sr_decay_rate(const VP10_COMP *cpi,
   }
   modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
 
-
   if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
     sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX);
     sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) -
@@ -1291,8 +1234,7 @@ static double get_sr_decay_rate(const VP10_COMP *cpi,
 // quality is decaying from frame to frame.
 static double get_zero_motion_factor(const VP10_COMP *cpi,
                                      const FIRSTPASS_STATS *frame) {
-  const double zero_motion_pct = frame->pcnt_inter -
-                                 frame->pcnt_motion;
+  const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
   double sr_decay = get_sr_decay_rate(cpi, frame);
   return VPXMIN(sr_decay, zero_motion_pct);
 }
@@ -1303,8 +1245,8 @@ static double get_prediction_decay_rate(const VP10_COMP *cpi,
                                         const FIRSTPASS_STATS *next_frame) {
   const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame);
   const double zero_motion_factor =
-    (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion),
-                ZM_POWER_FACTOR));
+      (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion),
+                  ZM_POWER_FACTOR));
 
   return VPXMAX(zero_motion_factor,
                 (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
@@ -1313,8 +1255,8 @@ static double get_prediction_decay_rate(const VP10_COMP *cpi,
 // Function to test for a condition where a complex transition is followed
 // by a static section. For example in slide shows where there is a fade
 // between slides. This is to help with more optimal kf and gf positioning.
-static int detect_transition_to_still(VP10_COMP *cpi,
-                                      int frame_interval, int still_interval,
+static int detect_transition_to_still(VP10_COMP *cpi, int frame_interval,
+                                      int still_interval,
                                       double loop_decay_rate,
                                       double last_decay_rate) {
   TWO_PASS *const twopass = &cpi->twopass;
@@ -1323,19 +1265,16 @@ static int detect_transition_to_still(VP10_COMP *cpi,
   // Break clause to detect very still sections after motion
   // For example a static image after a fade or other transition
   // instead of a clean scene cut.
-  if (frame_interval > rc->min_gf_interval &&
-      loop_decay_rate >= 0.999 &&
+  if (frame_interval > rc->min_gf_interval && loop_decay_rate >= 0.999 &&
       last_decay_rate < 0.9) {
     int j;
 
     // Look ahead a few frames to see if static condition persists...
     for (j = 0; j < still_interval; ++j) {
       const FIRSTPASS_STATS *stats = &twopass->stats_in[j];
-      if (stats >= twopass->stats_in_end)
-        break;
+      if (stats >= twopass->stats_in_end) break;
 
-      if (stats->pcnt_inter - stats->pcnt_motion < 0.999)
-        break;
+      if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break;
     }
 
     // Only if it does do we signal a transition to still.
@@ -1377,30 +1316,28 @@ static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats,
   // Accumulate a measure of how uniform (or conversely how random) the motion
   // field is (a ratio of abs(mv) / mv).
   if (pct > 0.05) {
-    const double mvr_ratio = fabs(stats->mvr_abs) /
-                                 DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
-    const double mvc_ratio = fabs(stats->mvc_abs) /
-                                 DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
+    const double mvr_ratio =
+        fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
+    const double mvc_ratio =
+        fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
 
-    *mv_ratio_accumulator += pct * (mvr_ratio < stats->mvr_abs ?
-                                       mvr_ratio : stats->mvr_abs);
-    *mv_ratio_accumulator += pct * (mvc_ratio < stats->mvc_abs ?
-                                       mvc_ratio : stats->mvc_abs);
+    *mv_ratio_accumulator +=
+        pct * (mvr_ratio < stats->mvr_abs ? mvr_ratio : stats->mvr_abs);
+    *mv_ratio_accumulator +=
+        pct * (mvc_ratio < stats->mvc_abs ? mvc_ratio : stats->mvc_abs);
   }
 }
 
 #define BASELINE_ERR_PER_MB 1000.0
 static double calc_frame_boost(VP10_COMP *cpi,
                                const FIRSTPASS_STATS *this_frame,
-                               double this_frame_mv_in_out,
-                               double max_boost) {
+                               double this_frame_mv_in_out, double max_boost) {
   double frame_boost;
-  const double lq =
-    vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
-                            cpi->common.bit_depth);
+  const double lq = vp10_convert_qindex_to_q(
+      cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth);
   const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5);
-  int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
-                ? cpi->initial_mbs : cpi->common.MBs;
+  int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
+                                                       : cpi->common.MBs;
 
   // Correct for any inactive region in the image
   num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame));
@@ -1422,9 +1359,8 @@ static double calc_frame_boost(VP10_COMP *cpi,
   return VPXMIN(frame_boost, max_boost * boost_q_correction);
 }
 
-static int calc_arf_boost(VP10_COMP *cpi, int offset,
-                          int f_frames, int b_frames,
-                          int *f_boost, int *b_boost) {
+static int calc_arf_boost(VP10_COMP *cpi, int offset, int f_frames,
+                          int b_frames, int *f_boost, int *b_boost) {
   TWO_PASS *const twopass = &cpi->twopass;
   int i;
   double boost_score = 0.0;
@@ -1439,14 +1375,12 @@ static int calc_arf_boost(VP10_COMP *cpi, int offset,
   // Search forward from the proposed arf/next gf position.
   for (i = 0; i < f_frames; ++i) {
     const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset);
-    if (this_frame == NULL)
-      break;
+    if (this_frame == NULL) break;
 
     // Update the motion related elements to the boost calculation.
-    accumulate_frame_motion_stats(this_frame,
-                                  &this_frame_mv_in_out, &mv_in_out_accumulator,
-                                  &abs_mv_in_out_accumulator,
-                                  &mv_ratio_accumulator);
+    accumulate_frame_motion_stats(
+        this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+        &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
 
     // We want to discount the flash frame itself and the recovery
     // frame that follows as both will have poor scores.
@@ -1457,12 +1391,13 @@ static int calc_arf_boost(VP10_COMP *cpi, int offset,
     if (!flash_detected) {
       decay_accumulator *= get_prediction_decay_rate(cpi, this_frame);
       decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
-                          ? MIN_DECAY_FACTOR : decay_accumulator;
+                              ? MIN_DECAY_FACTOR
+                              : decay_accumulator;
     }
 
-    boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame,
-                                                        this_frame_mv_in_out,
-                                                        GF_MAX_BOOST);
+    boost_score +=
+        decay_accumulator *
+        calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST);
   }
 
   *f_boost = (int)boost_score;
@@ -1478,14 +1413,12 @@ static int calc_arf_boost(VP10_COMP *cpi, int offset,
   // Search backward towards last gf position.
   for (i = -1; i >= -b_frames; --i) {
     const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset);
-    if (this_frame == NULL)
-      break;
+    if (this_frame == NULL) break;
 
     // Update the motion related elements to the boost calculation.
-    accumulate_frame_motion_stats(this_frame,
-                                  &this_frame_mv_in_out, &mv_in_out_accumulator,
-                                  &abs_mv_in_out_accumulator,
-                                  &mv_ratio_accumulator);
+    accumulate_frame_motion_stats(
+        this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+        &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
 
     // We want to discount the the flash frame itself and the recovery
     // frame that follows as both will have poor scores.
@@ -1496,12 +1429,13 @@ static int calc_arf_boost(VP10_COMP *cpi, int offset,
     if (!flash_detected) {
       decay_accumulator *= get_prediction_decay_rate(cpi, this_frame);
       decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
-                              ? MIN_DECAY_FACTOR : decay_accumulator;
+                              ? MIN_DECAY_FACTOR
+                              : decay_accumulator;
     }
 
-    boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame,
-                                                        this_frame_mv_in_out,
-                                                        GF_MAX_BOOST);
+    boost_score +=
+        decay_accumulator *
+        calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST);
   }
   *b_boost = (int)boost_score;
 
@@ -1549,9 +1483,10 @@ static int64_t calculate_total_gf_group_bits(VP10_COMP *cpi,
   }
 
   // Clamp odd edge cases.
-  total_group_bits = (total_group_bits < 0) ?
-     0 : (total_group_bits > twopass->kf_group_bits) ?
-     twopass->kf_group_bits : total_group_bits;
+  total_group_bits =
+      (total_group_bits < 0) ? 0 : (total_group_bits > twopass->kf_group_bits)
+                                       ? twopass->kf_group_bits
+                                       : total_group_bits;
 
   // Clip based on user supplied data rate variability limit.
   if (total_group_bits > (int64_t)max_bits * rc->baseline_gf_interval)
@@ -1561,13 +1496,12 @@ static int64_t calculate_total_gf_group_bits(VP10_COMP *cpi,
 }
 
 // Calculate the number bits extra to assign to boosted frames in a group.
-static int calculate_boost_bits(int frame_count,
-                                int boost, int64_t total_group_bits) {
+static int calculate_boost_bits(int frame_count, int boost,
+                                int64_t total_group_bits) {
   int allocation_chunks;
 
   // return 0 for invalid inputs (could arise e.g. through rounding errors)
-  if (!boost || (total_group_bits <= 0) || (frame_count <= 0) )
-    return 0;
+  if (!boost || (total_group_bits <= 0) || (frame_count <= 0)) return 0;
 
   allocation_chunks = (frame_count * 100) + boost;
 
@@ -1637,14 +1571,12 @@ static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
     }
 
     // Step over the golden frame / overlay frame
-    if (EOF == input_stats(twopass, &frame_stats))
-      return;
+    if (EOF == input_stats(twopass, &frame_stats)) return;
   }
 
   // Deduct the boost bits for arf (or gf if it is not a key frame)
   // from the group total.
-  if (rc->source_alt_ref_pending || !key_frame)
-    total_group_bits -= gf_arf_bits;
+  if (rc->source_alt_ref_pending || !key_frame) total_group_bits -= gf_arf_bits;
 
   // Store the bits to spend on the ARF if there is one.
   if (rc->source_alt_ref_pending) {
@@ -1657,8 +1589,8 @@ static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
 
     gf_group->arf_update_idx[alt_frame_index] = arf_buffer_indices[0];
     gf_group->arf_ref_idx[alt_frame_index] =
-      arf_buffer_indices[cpi->multi_arf_last_grp_enabled &&
-                         rc->source_alt_ref_active];
+        arf_buffer_indices[cpi->multi_arf_last_grp_enabled &&
+                           rc->source_alt_ref_active];
     ++frame_index;
 
     if (cpi->multi_arf_enabled) {
@@ -1666,7 +1598,7 @@ static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
       gf_group->update_type[frame_index] = ARF_UPDATE;
       gf_group->rf_level[frame_index] = GF_ARF_LOW;
       gf_group->arf_src_offset[frame_index] =
-        (unsigned char)((rc->baseline_gf_interval >> 1) - 1);
+          (unsigned char)((rc->baseline_gf_interval >> 1) - 1);
       gf_group->arf_update_idx[frame_index] = arf_buffer_indices[1];
       gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
       ++frame_index;
@@ -1679,8 +1611,7 @@ static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
   // Allocate bits to the other frames in the group.
   for (i = 0; i < rc->baseline_gf_interval - rc->source_alt_ref_pending; ++i) {
     int arf_idx = 0;
-    if (EOF == input_stats(twopass, &frame_stats))
-      break;
+    if (EOF == input_stats(twopass, &frame_stats)) break;
 
     modified_err = calculate_modified_err(cpi, twopass, oxcf, &frame_stats);
 
@@ -1695,14 +1626,13 @@ static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
       mid_boost_bits += (target_frame_size >> 4);
       target_frame_size -= (target_frame_size >> 4);
 
-      if (frame_index <= mid_frame_idx)
-        arf_idx = 1;
+      if (frame_index <= mid_frame_idx) arf_idx = 1;
     }
     gf_group->arf_update_idx[frame_index] = arf_buffer_indices[arf_idx];
     gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx];
 
-    target_frame_size = clamp(target_frame_size, 0,
-                              VPXMIN(max_bits, (int)total_group_bits));
+    target_frame_size =
+        clamp(target_frame_size, 0, VPXMIN(max_bits, (int)total_group_bits));
 
     gf_group->update_type[frame_index] = LF_UPDATE;
     gf_group->rf_level[frame_index] = INTER_NORMAL;
@@ -1817,12 +1747,10 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   // Set a maximum and minimum interval for the GF group.
   // If the image appears almost completely static we can extend beyond this.
   {
-    int int_max_q =
-      (int)(vp10_convert_qindex_to_q(twopass->active_worst_quality,
-                                   cpi->common.bit_depth));
-    int int_lbq =
-      (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex,
-                                   cpi->common.bit_depth));
+    int int_max_q = (int)(vp10_convert_qindex_to_q(
+        twopass->active_worst_quality, cpi->common.bit_depth));
+    int int_lbq = (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex,
+                                                 cpi->common.bit_depth));
     active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200);
     if (active_min_gf_interval > rc->max_gf_interval)
       active_min_gf_interval = rc->max_gf_interval;
@@ -1858,18 +1786,16 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
     gf_group_skip_pct += this_frame->intra_skip_pct;
     gf_group_inactive_zone_rows += this_frame->inactive_zone_rows;
 
-    if (EOF == input_stats(twopass, &next_frame))
-      break;
+    if (EOF == input_stats(twopass, &next_frame)) break;
 
     // Test for the case where there is a brief flash but the prediction
     // quality back to an earlier frame is then restored.
     flash_detected = detect_flash(twopass, 0);
 
     // Update the motion related elements to the boost calculation.
-    accumulate_frame_motion_stats(&next_frame,
-                                  &this_frame_mv_in_out, &mv_in_out_accumulator,
-                                  &abs_mv_in_out_accumulator,
-                                  &mv_ratio_accumulator);
+    accumulate_frame_motion_stats(
+        &next_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+        &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
 
     // Accumulate the effect of prediction quality decay.
     if (!flash_detected) {
@@ -1892,23 +1818,23 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
     }
 
     // Calculate a boost number for this frame.
-    boost_score += decay_accumulator * calc_frame_boost(cpi, &next_frame,
-                                                        this_frame_mv_in_out,
-                                                        GF_MAX_BOOST);
+    boost_score +=
+        decay_accumulator *
+        calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out, GF_MAX_BOOST);
 
     // Break out conditions.
     if (
-      // Break at active_max_gf_interval unless almost totally static.
-      (i >= (active_max_gf_interval + arf_active_or_kf) &&
-            zero_motion_accumulator < 0.995) ||
-      (
-        // Don't break out with a very short interval.
-        (i >= active_min_gf_interval + arf_active_or_kf) &&
-        (!flash_detected) &&
-        ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
-         (abs_mv_in_out_accumulator > 3.0) ||
-         (mv_in_out_accumulator < -2.0) ||
-         ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) {
+        // Break at active_max_gf_interval unless almost totally static.
+        (i >= (active_max_gf_interval + arf_active_or_kf) &&
+         zero_motion_accumulator < 0.995) ||
+        (
+            // Don't break out with a very short interval.
+            (i >= active_min_gf_interval + arf_active_or_kf) &&
+            (!flash_detected) &&
+            ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
+             (abs_mv_in_out_accumulator > 3.0) ||
+             (mv_in_out_accumulator < -2.0) ||
+             ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) {
       boost_score = old_boost_score;
       break;
     }
@@ -1923,18 +1849,19 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   rc->constrained_gf_group = (i >= rc->frames_to_key) ? 1 : 0;
 
   // Should we use the alternate reference frame.
-  if (allow_alt_ref &&
-    (i < cpi->oxcf.lag_in_frames) &&
-    (i >= rc->min_gf_interval)) {
+  if (allow_alt_ref && (i < cpi->oxcf.lag_in_frames) &&
+      (i >= rc->min_gf_interval)) {
     // Calculate the boost for alt ref.
-    rc->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost,
-      &b_boost);
+    rc->gfu_boost =
+        calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
     rc->source_alt_ref_pending = 1;
 
     // Test to see if multi arf is appropriate.
     cpi->multi_arf_enabled =
-      (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) &&
-      (zero_motion_accumulator < 0.995)) ? 1 : 0;
+        (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) &&
+         (zero_motion_accumulator < 0.995))
+            ? 1
+            : 0;
   } else {
     rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST);
     rc->source_alt_ref_pending = 0;
@@ -1959,13 +1886,13 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   // of the allocated bit budget.
   if ((cpi->oxcf.rc_mode != VPX_Q) && (rc->baseline_gf_interval > 1)) {
     const int vbr_group_bits_per_frame =
-      (int)(gf_group_bits / rc->baseline_gf_interval);
-    const double group_av_err = gf_group_raw_error  / rc->baseline_gf_interval;
+        (int)(gf_group_bits / rc->baseline_gf_interval);
+    const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval;
     const double group_av_skip_pct =
-      gf_group_skip_pct / rc->baseline_gf_interval;
+        gf_group_skip_pct / rc->baseline_gf_interval;
     const double group_av_inactive_zone =
-      ((gf_group_inactive_zone_rows * 2) /
-       (rc->baseline_gf_interval * (double)cm->mb_rows));
+        ((gf_group_inactive_zone_rows * 2) /
+         (rc->baseline_gf_interval * (double)cm->mb_rows));
 
     int tmp_q;
     // rc factor is a weight factor that corrects for local rate control drift.
@@ -1977,19 +1904,17 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
       rc_factor = VPXMIN(RC_FACTOR_MAX,
                          (double)(100 - rc->rate_error_estimate) / 100.0);
     }
-    tmp_q =
-      get_twopass_worst_quality(cpi, group_av_err,
-                                (group_av_skip_pct + group_av_inactive_zone),
-                                vbr_group_bits_per_frame,
-                                twopass->kfgroup_inter_fraction * rc_factor);
+    tmp_q = get_twopass_worst_quality(
+        cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
+        vbr_group_bits_per_frame, twopass->kfgroup_inter_fraction * rc_factor);
     twopass->active_worst_quality =
-      VPXMAX(tmp_q, twopass->active_worst_quality >> 1);
+        VPXMAX(tmp_q, twopass->active_worst_quality >> 1);
   }
 #endif
 
   // Calculate the extra bits to be used for boosted frame(s)
-  gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval,
-                                     rc->gfu_boost, gf_group_bits);
+  gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval, rc->gfu_boost,
+                                     gf_group_bits);
 
   // Adjust KF group bits and error remaining.
   twopass->kf_group_error_left -= (int64_t)gf_group_err;
@@ -2016,9 +1941,8 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
 
   // Calculate a section intra ratio used in setting max loop filter.
   if (cpi->common.frame_type != KEY_FRAME) {
-    twopass->section_intra_rating =
-        calculate_section_intra_ratio(start_pos, twopass->stats_in_end,
-                                      rc->baseline_gf_interval);
+    twopass->section_intra_rating = calculate_section_intra_ratio(
+        start_pos, twopass->stats_in_end, rc->baseline_gf_interval);
   }
 
   if (oxcf->resize_mode == RESIZE_DYNAMIC) {
@@ -2060,7 +1984,7 @@ static int test_candidate_kf(TWO_PASS *twopass,
   int is_viable_kf = 0;
   double pcnt_intra = 1.0 - this_frame->pcnt_inter;
   double modified_pcnt_inter =
-    this_frame->pcnt_inter - this_frame->pcnt_neutral;
+      this_frame->pcnt_inter - this_frame->pcnt_neutral;
 
   // Does the frame satisfy the primary criteria of a key frame?
   // See above for an explanation of the test criteria.
@@ -2072,15 +1996,15 @@ static int test_candidate_kf(TWO_PASS *twopass,
         (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) &&
         ((this_frame->intra_error /
           DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) <
-          KF_II_ERR_THRESHOLD) &&
+         KF_II_ERR_THRESHOLD) &&
         ((fabs(last_frame->coded_error - this_frame->coded_error) /
-          DOUBLE_DIVIDE_CHECK(this_frame->coded_error) >
+              DOUBLE_DIVIDE_CHECK(this_frame->coded_error) >
           ERR_CHANGE_THRESHOLD) ||
          (fabs(last_frame->intra_error - this_frame->intra_error) /
-          DOUBLE_DIVIDE_CHECK(this_frame->intra_error) >
+              DOUBLE_DIVIDE_CHECK(this_frame->intra_error) >
           ERR_CHANGE_THRESHOLD) ||
          ((next_frame->intra_error /
-          DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) >
+           DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) >
           II_IMPROVEMENT_THRESHOLD))))) {
     int i;
     const FIRSTPASS_STATS *start_pos = twopass->stats_in;
@@ -2094,8 +2018,7 @@ static int test_candidate_kf(TWO_PASS *twopass,
       double next_iiratio = (BOOST_FACTOR * local_next_frame.intra_error /
                              DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error));
 
-      if (next_iiratio > KF_II_MAX)
-        next_iiratio = KF_II_MAX;
+      if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX;
 
       // Cumulative effect of decay in prediction quality.
       if (local_next_frame.pcnt_inter > 0.85)
@@ -2107,10 +2030,9 @@ static int test_candidate_kf(TWO_PASS *twopass,
       boost_score += (decay_accumulator * next_iiratio);
 
       // Test various breakout clauses.
-      if ((local_next_frame.pcnt_inter < 0.05) ||
-          (next_iiratio < 1.5) ||
-          (((local_next_frame.pcnt_inter -
-             local_next_frame.pcnt_neutral) < 0.20) &&
+      if ((local_next_frame.pcnt_inter < 0.05) || (next_iiratio < 1.5) ||
+          (((local_next_frame.pcnt_inter - local_next_frame.pcnt_neutral) <
+            0.20) &&
            (next_iiratio < 3.0)) ||
           ((boost_score - old_boost_score) < 3.0) ||
           (local_next_frame.intra_error < 200)) {
@@ -2120,8 +2042,7 @@ static int test_candidate_kf(TWO_PASS *twopass,
       old_boost_score = boost_score;
 
       // Get the next frame details
-      if (EOF == input_stats(twopass, &local_next_frame))
-        break;
+      if (EOF == input_stats(twopass, &local_next_frame)) break;
     }
 
     // If there is tolerable prediction for at least the next 3 frames then
@@ -2157,7 +2078,7 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   double boost_score = 0.0;
   double kf_mod_err = 0.0;
   double kf_group_err = 0.0;
-  double recent_loop_decay[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+  double recent_loop_decay[8] = { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 };
 
   vp10_zero(next_frame);
 
@@ -2212,8 +2133,7 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
       // quality since the last GF or KF.
       recent_loop_decay[i % 8] = loop_decay_rate;
       decay_accumulator = 1.0;
-      for (j = 0; j < 8; ++j)
-        decay_accumulator *= recent_loop_decay[j];
+      for (j = 0; j < 8; ++j) decay_accumulator *= recent_loop_decay[j];
 
       // Special check for transition or high motion followed by a
       // static scene.
@@ -2226,8 +2146,7 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
 
       // If we don't have a real key frame within the next two
       // key_freq intervals then break out of the loop.
-      if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq)
-        break;
+      if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq) break;
     } else {
       ++rc->frames_to_key;
     }
@@ -2238,8 +2157,7 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   // We already breakout of the loop above at 2x max.
   // This code centers the extra kf if the actual natural interval
   // is between 1x and 2x.
-  if (cpi->oxcf.auto_key &&
-      rc->frames_to_key > cpi->oxcf.key_freq) {
+  if (cpi->oxcf.auto_key && rc->frames_to_key > cpi->oxcf.key_freq) {
     FIRSTPASS_STATS tmp_frame = first_frame;
 
     rc->frames_to_key /= 2;
@@ -2278,8 +2196,8 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
 
     // Default allocation based on bits left and relative
     // complexity of the section.
-    twopass->kf_group_bits = (int64_t)(twopass->bits_left *
-       (kf_group_err / twopass->modified_error_left));
+    twopass->kf_group_bits = (int64_t)(
+        twopass->bits_left * (kf_group_err / twopass->modified_error_left));
 
     // Clip based on maximum per frame rate defined by the user.
     max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key;
@@ -2298,23 +2216,22 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   decay_accumulator = 1.0;
   boost_score = 0.0;
   for (i = 0; i < (rc->frames_to_key - 1); ++i) {
-    if (EOF == input_stats(twopass, &next_frame))
-      break;
+    if (EOF == input_stats(twopass, &next_frame)) break;
 
     // Monitor for static sections.
-    zero_motion_accumulator = VPXMIN(
-        zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame));
+    zero_motion_accumulator = VPXMIN(zero_motion_accumulator,
+                                     get_zero_motion_factor(cpi, &next_frame));
 
     // Not all frames in the group are necessarily used in calculating boost.
     if ((i <= rc->max_gf_interval) ||
         ((i <= (rc->max_gf_interval * 4)) && (decay_accumulator > 0.5))) {
       const double frame_boost =
-        calc_frame_boost(cpi, this_frame, 0, KF_MAX_BOOST);
+          calc_frame_boost(cpi, this_frame, 0, KF_MAX_BOOST);
 
       // How fast is prediction quality decaying.
       if (!detect_flash(twopass, 0)) {
         const double loop_decay_rate =
-          get_prediction_decay_rate(cpi, &next_frame);
+            get_prediction_decay_rate(cpi, &next_frame);
         decay_accumulator *= loop_decay_rate;
         decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR);
         av_decay_accumulator += decay_accumulator;
@@ -2331,9 +2248,8 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
 
   // Calculate a section intra ratio used in setting max loop filter.
-  twopass->section_intra_rating =
-      calculate_section_intra_ratio(start_position, twopass->stats_in_end,
-                                    rc->frames_to_key);
+  twopass->section_intra_rating = calculate_section_intra_ratio(
+      start_position, twopass->stats_in_end, rc->frames_to_key);
 
   // Apply various clamps for min and max boost
   rc->kf_boost = (int)(av_decay_accumulator * boost_score);
@@ -2341,15 +2257,15 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST);
 
   // Work out how many bits to allocate for the key frame itself.
-  kf_bits = calculate_boost_bits((rc->frames_to_key - 1),
-                                  rc->kf_boost, twopass->kf_group_bits);
+  kf_bits = calculate_boost_bits((rc->frames_to_key - 1), rc->kf_boost,
+                                 twopass->kf_group_bits);
 
   // Work out the fraction of the kf group bits reserved for the inter frames
   // within the group after discounting the bits for the kf itself.
   if (twopass->kf_group_bits) {
     twopass->kfgroup_inter_fraction =
-      (double)(twopass->kf_group_bits - kf_bits) /
-      (double)twopass->kf_group_bits;
+        (double)(twopass->kf_group_bits - kf_bits) /
+        (double)twopass->kf_group_bits;
   } else {
     twopass->kfgroup_inter_fraction = 1.0;
   }
@@ -2407,9 +2323,7 @@ static void configure_buffer_updates(VP10_COMP *cpi) {
       cpi->refresh_golden_frame = 0;
       cpi->refresh_alt_ref_frame = 1;
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
 
@@ -2421,13 +2335,15 @@ static int is_skippable_frame(const VP10_COMP *cpi) {
   const TWO_PASS *const twopass = &cpi->twopass;
 
   return (!frame_is_intra_only(&cpi->common) &&
-    twopass->stats_in - 2 > twopass->stats_in_start &&
-    twopass->stats_in < twopass->stats_in_end &&
-    (twopass->stats_in - 1)->pcnt_inter - (twopass->stats_in - 1)->pcnt_motion
-    == 1 &&
-    (twopass->stats_in - 2)->pcnt_inter - (twopass->stats_in - 2)->pcnt_motion
-    == 1 &&
-    twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
+          twopass->stats_in - 2 > twopass->stats_in_start &&
+          twopass->stats_in < twopass->stats_in_end &&
+          (twopass->stats_in - 1)->pcnt_inter -
+                  (twopass->stats_in - 1)->pcnt_motion ==
+              1 &&
+          (twopass->stats_in - 2)->pcnt_inter -
+                  (twopass->stats_in - 2)->pcnt_motion ==
+              1 &&
+          twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
 }
 
 void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
@@ -2440,11 +2356,9 @@ void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
 
   int target_rate;
 
-  frames_left = (int)(twopass->total_stats.count -
-                cm->current_video_frame);
+  frames_left = (int)(twopass->total_stats.count - cm->current_video_frame);
 
-  if (!twopass->stats_in)
-    return;
+  if (!twopass->stats_in) return;
 
   // If this is an arf frame then we dont want to read the stats file or
   // advance the input pointer as we already have what we need.
@@ -2472,20 +2386,19 @@ void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
     twopass->active_worst_quality = cpi->oxcf.cq_level;
   } else if (cm->current_video_frame == 0) {
     // Special case code for first frame.
-    const int section_target_bandwidth = (int)(twopass->bits_left /
-                                               frames_left);
+    const int section_target_bandwidth =
+        (int)(twopass->bits_left / frames_left);
     const double section_length = twopass->total_left_stats.count;
     const double section_error =
-      twopass->total_left_stats.coded_error / section_length;
+        twopass->total_left_stats.coded_error / section_length;
     const double section_intra_skip =
-      twopass->total_left_stats.intra_skip_pct / section_length;
+        twopass->total_left_stats.intra_skip_pct / section_length;
     const double section_inactive_zone =
-      (twopass->total_left_stats.inactive_zone_rows * 2) /
-      ((double)cm->mb_rows * section_length);
-    const int tmp_q =
-      get_twopass_worst_quality(cpi, section_error,
-                                section_intra_skip + section_inactive_zone,
-                                section_target_bandwidth, DEFAULT_GRP_WEIGHT);
+        (twopass->total_left_stats.inactive_zone_rows * 2) /
+        ((double)cm->mb_rows * section_length);
+    const int tmp_q = get_twopass_worst_quality(
+        cpi, section_error, section_intra_skip + section_inactive_zone,
+        section_target_bandwidth, DEFAULT_GRP_WEIGHT);
 
     twopass->active_worst_quality = tmp_q;
     twopass->baseline_active_worst_quality = tmp_q;
@@ -2497,8 +2410,7 @@ void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
     rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME];
   }
   vp10_zero(this_frame);
-  if (EOF == input_stats(twopass, &this_frame))
-    return;
+  if (EOF == input_stats(twopass, &this_frame)) return;
 
   // Set the frame content type flag.
   if (this_frame.intra_skip_pct >= FC_ANIMATION_THRESH)
@@ -2528,9 +2440,9 @@ void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
       FILE *fpfile;
       fpfile = fopen("arf.stt", "a");
       ++arf_count;
-      fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n",
-              cm->current_video_frame, rc->frames_till_gf_update_due,
-              rc->kf_boost, arf_count, rc->gfu_boost);
+      fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n", cm->current_video_frame,
+              rc->frames_till_gf_update_due, rc->kf_boost, arf_count,
+              rc->gfu_boost);
 
       fclose(fpfile);
     }
@@ -2555,11 +2467,12 @@ void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
 
   {
     const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
-                        ? cpi->initial_mbs : cpi->common.MBs;
+                            ? cpi->initial_mbs
+                            : cpi->common.MBs;
     // The multiplication by 256 reverses a scaling factor of (>> 8)
     // applied when combining MB error values for the frame.
     twopass->mb_av_energy =
-      log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0);
+        log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0);
   }
 
   // Update the total stats remaining structure.
@@ -2585,7 +2498,7 @@ void vp10_twopass_postencode_update(VP10_COMP *cpi) {
   // Calculate the pct rc error.
   if (rc->total_actual_bits) {
     rc->rate_error_estimate =
-      (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits);
+        (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits);
     rc->rate_error_estimate = clamp(rc->rate_error_estimate, -100, 100);
   } else {
     rc->rate_error_estimate = 0;
@@ -2605,7 +2518,7 @@ void vp10_twopass_postencode_update(VP10_COMP *cpi) {
       (cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD) &&
       !cpi->rc.is_src_frame_alt_ref) {
     const int maxq_adj_limit =
-      rc->worst_quality - twopass->active_worst_quality;
+        rc->worst_quality - twopass->active_worst_quality;
     const int minq_adj_limit =
         (cpi->oxcf.rc_mode == VPX_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
 
@@ -2614,7 +2527,7 @@ void vp10_twopass_postencode_update(VP10_COMP *cpi) {
       --twopass->extend_maxq;
       if (rc->rolling_target_bits >= rc->rolling_actual_bits)
         ++twopass->extend_minq;
-    // Overshoot.
+      // Overshoot.
     } else if (rc->rate_error_estimate < -cpi->oxcf.over_shoot_pct) {
       --twopass->extend_minq;
       if (rc->rolling_target_bits < rc->rolling_actual_bits)
@@ -2643,14 +2556,14 @@ void vp10_twopass_postencode_update(VP10_COMP *cpi) {
       int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO;
       if (rc->projected_frame_size < fast_extra_thresh) {
         rc->vbr_bits_off_target_fast +=
-          fast_extra_thresh - rc->projected_frame_size;
+            fast_extra_thresh - rc->projected_frame_size;
         rc->vbr_bits_off_target_fast =
-          VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
+            VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
 
         // Fast adaptation of minQ if necessary to use up the extra bits.
         if (rc->avg_frame_bandwidth) {
           twopass->extend_minq_fast =
-            (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
+              (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
         }
         twopass->extend_minq_fast = VPXMIN(
             twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
diff --git a/vp10/encoder/firstpass.h b/vp10/encoder/firstpass.h
index 68a88879c466d5ba75b4c1db3133d8bf3fbf1d8a..2fbd3f7daa744096053315811901454742a917ba 100644
--- a/vp10/encoder/firstpass.h
+++ b/vp10/encoder/firstpass.h
@@ -155,9 +155,8 @@ void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
 
 void vp10_init_subsampling(struct VP10_COMP *cpi);
 
-void vp10_calculate_coded_size(struct VP10_COMP *cpi,
-                          int *scaled_frame_width,
-                          int *scaled_frame_height);
+void vp10_calculate_coded_size(struct VP10_COMP *cpi, int *scaled_frame_width,
+                               int *scaled_frame_height);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/encoder/lookahead.c b/vp10/encoder/lookahead.c
index 6289fbc7585a755cbd6015639d0fcc139829e33f..19beee3116817642ffde6856161ee7037f332aa5 100644
--- a/vp10/encoder/lookahead.c
+++ b/vp10/encoder/lookahead.c
@@ -25,35 +25,31 @@ static struct lookahead_entry *pop(struct lookahead_ctx *ctx,
   struct lookahead_entry *buf = ctx->buf + index;
 
   assert(index < ctx->max_sz);
-  if (++index >= ctx->max_sz)
-    index -= ctx->max_sz;
+  if (++index >= ctx->max_sz) index -= ctx->max_sz;
   *idx = index;
   return buf;
 }
 
-
 void vp10_lookahead_destroy(struct lookahead_ctx *ctx) {
   if (ctx) {
     if (ctx->buf) {
       unsigned int i;
 
-      for (i = 0; i < ctx->max_sz; i++)
-        vpx_free_frame_buffer(&ctx->buf[i].img);
+      for (i = 0; i < ctx->max_sz; i++) vpx_free_frame_buffer(&ctx->buf[i].img);
       free(ctx->buf);
     }
     free(ctx);
   }
 }
 
-
 struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
-                                         unsigned int height,
-                                         unsigned int subsampling_x,
-                                         unsigned int subsampling_y,
+                                          unsigned int height,
+                                          unsigned int subsampling_x,
+                                          unsigned int subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
-                                         int use_highbitdepth,
+                                          int use_highbitdepth,
 #endif
-                                         unsigned int depth) {
+                                          unsigned int depth) {
   struct lookahead_ctx *ctx = NULL;
 
   // Clamp the lookahead queue depth
@@ -69,32 +65,30 @@ struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
     unsigned int i;
     ctx->max_sz = depth;
     ctx->buf = calloc(depth, sizeof(*ctx->buf));
-    if (!ctx->buf)
-      goto bail;
+    if (!ctx->buf) goto bail;
     for (i = 0; i < depth; i++)
-      if (vpx_alloc_frame_buffer(&ctx->buf[i].img,
-                                 width, height, subsampling_x, subsampling_y,
+      if (vpx_alloc_frame_buffer(
+              &ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
-                                 use_highbitdepth,
+              use_highbitdepth,
 #endif
-                                 VPX_ENC_BORDER_IN_PIXELS,
-                                 legacy_byte_alignment))
+              VPX_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
         goto bail;
   }
   return ctx;
- bail:
+bail:
   vp10_lookahead_destroy(ctx);
   return NULL;
 }
 
 #define USE_PARTIAL_COPY 0
 
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG   *src,
-                       int64_t ts_start, int64_t ts_end,
+int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+                        int64_t ts_start, int64_t ts_end,
 #if CONFIG_VPX_HIGHBITDEPTH
-                       int use_highbitdepth,
+                        int use_highbitdepth,
 #endif
-                       unsigned int flags) {
+                        unsigned int flags) {
   struct lookahead_entry *buf;
 #if USE_PARTIAL_COPY
   int row, col, active_end;
@@ -109,8 +103,7 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG   *src,
   int subsampling_y = src->subsampling_y;
   int larger_dimensions, new_dimensions;
 
-  if (ctx->sz + 1  + MAX_PRE_FRAMES > ctx->max_sz)
-    return 1;
+  if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz) return 1;
   ctx->sz++;
   buf = pop(ctx, &ctx->write_idx);
 
@@ -118,8 +111,7 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG   *src,
                    height != buf->img.y_crop_height ||
                    uv_width != buf->img.uv_crop_width ||
                    uv_height != buf->img.uv_crop_height;
-  larger_dimensions = width > buf->img.y_width ||
-                      height > buf->img.y_height ||
+  larger_dimensions = width > buf->img.y_width || height > buf->img.y_height ||
                       uv_width > buf->img.uv_width ||
                       uv_height > buf->img.uv_height;
   assert(!larger_dimensions || new_dimensions);
@@ -139,27 +131,22 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG   *src,
       while (1) {
         // Find the first active macroblock in this row.
         for (; col < mb_cols; ++col) {
-          if (active_map[col])
-            break;
+          if (active_map[col]) break;
         }
 
         // No more active macroblock in this row.
-        if (col == mb_cols)
-          break;
+        if (col == mb_cols) break;
 
         // Find the end of active region in this row.
         active_end = col;
 
         for (; active_end < mb_cols; ++active_end) {
-          if (!active_map[active_end])
-            break;
+          if (!active_map[active_end]) break;
         }
 
         // Only copy this active region.
-        vp10_copy_and_extend_frame_with_rect(src, &buf->img,
-                                            row << 4,
-                                            col << 4, 16,
-                                            (active_end - col) << 4);
+        vp10_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
+                                             16, (active_end - col) << 4);
 
         // Start again from the end of this active region.
         col = active_end;
@@ -172,14 +159,13 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG   *src,
     if (larger_dimensions) {
       YV12_BUFFER_CONFIG new_img;
       memset(&new_img, 0, sizeof(new_img));
-      if (vpx_alloc_frame_buffer(&new_img,
-                                 width, height, subsampling_x, subsampling_y,
+      if (vpx_alloc_frame_buffer(&new_img, width, height, subsampling_x,
+                                 subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                                  use_highbitdepth,
 #endif
-                                 VPX_ENC_BORDER_IN_PIXELS,
-                                 0))
-          return 1;
+                                 VPX_ENC_BORDER_IN_PIXELS, 0))
+        return 1;
       vpx_free_frame_buffer(&buf->img);
       buf->img = new_img;
     } else if (new_dimensions) {
@@ -202,9 +188,8 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG   *src,
   return 0;
 }
 
-
 struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
-                                          int drain) {
+                                           int drain) {
   struct lookahead_entry *buf = NULL;
 
   if (ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) {
@@ -214,25 +199,22 @@ struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
   return buf;
 }
 
-
 struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
-                                           int index) {
+                                            int index) {
   struct lookahead_entry *buf = NULL;
 
   if (index >= 0) {
     // Forward peek
     if (index < (int)ctx->sz) {
       index += ctx->read_idx;
-      if (index >= (int)ctx->max_sz)
-        index -= ctx->max_sz;
+      if (index >= (int)ctx->max_sz) index -= ctx->max_sz;
       buf = ctx->buf + index;
     }
   } else if (index < 0) {
     // Backward peek
     if (-index <= MAX_PRE_FRAMES) {
       index += ctx->read_idx;
-      if (index < 0)
-        index += ctx->max_sz;
+      if (index < 0) index += ctx->max_sz;
       buf = ctx->buf + index;
     }
   }
@@ -240,6 +222,4 @@ struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
   return buf;
 }
 
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) {
-  return ctx->sz;
-}
+unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
diff --git a/vp10/encoder/lookahead.h b/vp10/encoder/lookahead.h
index fb28c320f5fdc76efbb294f3e0402e5677360eba..4b5fe7f9c66d3a62d3b33648d85f57bf9c3cb02a 100644
--- a/vp10/encoder/lookahead.h
+++ b/vp10/encoder/lookahead.h
@@ -21,10 +21,10 @@ extern "C" {
 #define MAX_LAG_BUFFERS 25
 
 struct lookahead_entry {
-  YV12_BUFFER_CONFIG  img;
-  int64_t             ts_start;
-  int64_t             ts_end;
-  unsigned int        flags;
+  YV12_BUFFER_CONFIG img;
+  int64_t ts_start;
+  int64_t ts_end;
+  unsigned int flags;
 };
 
 // The max of past frames we want to keep in the queue.
@@ -44,20 +44,18 @@ struct lookahead_ctx {
  * may be done when buffers are enqueued.
  */
 struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
-                                         unsigned int height,
-                                         unsigned int subsampling_x,
-                                         unsigned int subsampling_y,
+                                          unsigned int height,
+                                          unsigned int subsampling_x,
+                                          unsigned int subsampling_y,
 #if CONFIG_VPX_HIGHBITDEPTH
-                                         int use_highbitdepth,
+                                          int use_highbitdepth,
 #endif
-                                         unsigned int depth);
-
+                                          unsigned int depth);
 
 /**\brief Destroys the lookahead stage
  */
 void vp10_lookahead_destroy(struct lookahead_ctx *ctx);
 
-
 /**\brief Enqueue a source buffer
  *
  * This function will copy the source image into a new framebuffer with
@@ -74,12 +72,11 @@ void vp10_lookahead_destroy(struct lookahead_ctx *ctx);
  * \param[in] active_map  Map that specifies which macroblock is active
  */
 int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
-                       int64_t ts_start, int64_t ts_end,
+                        int64_t ts_start, int64_t ts_end,
 #if CONFIG_VPX_HIGHBITDEPTH
-                       int use_highbitdepth,
+                        int use_highbitdepth,
 #endif
-                       unsigned int flags);
-
+                        unsigned int flags);
 
 /**\brief Get the next source buffer to encode
  *
@@ -92,8 +89,7 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
  * \retval NULL, if drain not set and queue not of the configured depth
  */
 struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
-                                          int drain);
-
+                                           int drain);
 
 /**\brief Get a future source buffer to encode
  *
@@ -103,8 +99,7 @@ struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
  * \retval NULL, if no buffer exists at the specified index
  */
 struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
-                                           int index);
-
+                                            int index);
 
 /**\brief Get the number of frames currently in the lookahead queue
  *
diff --git a/vp10/encoder/mbgraph.c b/vp10/encoder/mbgraph.c
index 98c532e15ca3f9169e2230a06cbd7152ce18787b..1579bf454e3e75fffba1bd7ef5f7271a34b7bbf7 100644
--- a/vp10/encoder/mbgraph.c
+++ b/vp10/encoder/mbgraph.c
@@ -22,11 +22,8 @@
 #include "vp10/common/reconinter.h"
 #include "vp10/common/reconintra.h"
 
-
-static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
-                                              const MV *ref_mv,
-                                              MV *dst_mv,
-                                              int mb_row,
+static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
+                                              MV *dst_mv, int mb_row,
                                               int mb_col) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -51,8 +48,7 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
 
   /*cpi->sf.search_method == HEX*/
   vp10_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
-                 cond_cost_list(cpi, cost_list),
-                 &v_fn_ptr, 0, ref_mv, dst_mv);
+                  cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv, dst_mv);
 
   // Try sub-pixel MC
   // if (bestsme > error_thresh && bestsme < INT_MAX)
@@ -62,9 +58,8 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
     cpi->find_fractional_mv_step(
         x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
         &v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
-        cond_cost_list(cpi, cost_list),
-        NULL, NULL,
-        &distortion, &sse, NULL, 0, 0);
+        cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0,
+        0);
   }
 
   xd->mi[0]->mbmi.mode = NEWMV;
@@ -107,10 +102,10 @@ static int do_16x16_motion_search(VP10_COMP *cpi, const MV *ref_mv,
   // based search as well.
   if (ref_mv->row != 0 || ref_mv->col != 0) {
     unsigned int tmp_err;
-    MV zero_ref_mv = {0, 0}, tmp_mv;
+    MV zero_ref_mv = { 0, 0 }, tmp_mv;
 
-    tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv,
-                                        mb_row, mb_col);
+    tmp_err =
+        do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv, mb_row, mb_col);
     if (tmp_err < err) {
       dst_mv->as_mv = tmp_mv;
       err = tmp_err;
@@ -135,7 +130,7 @@ static int do_16x16_zerozero_search(VP10_COMP *cpi, int_mv *dst_mv) {
   return err;
 }
 static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
-  MACROBLOCK   *const x  = &cpi->td.mb;
+  MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   PREDICTION_MODE best_mode = -1, mode;
   unsigned int best_err = INT_MAX;
@@ -146,38 +141,30 @@ static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
     unsigned int err;
 
     xd->mi[0]->mbmi.mode = mode;
-    vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode,
-                            x->plane[0].src.buf, x->plane[0].src.stride,
-                            xd->plane[0].dst.buf, xd->plane[0].dst.stride,
-                            0, 0, 0);
+    vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
+                             x->plane[0].src.stride, xd->plane[0].dst.buf,
+                             xd->plane[0].dst.stride, 0, 0, 0);
     err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
                        xd->plane[0].dst.buf, xd->plane[0].dst.stride);
 
     // find best
     if (err < best_err) {
-      best_err  = err;
+      best_err = err;
       best_mode = mode;
     }
   }
 
-  if (pbest_mode)
-    *pbest_mode = best_mode;
+  if (pbest_mode) *pbest_mode = best_mode;
 
   return best_err;
 }
 
-static void update_mbgraph_mb_stats
-(
-  VP10_COMP *cpi,
-  MBGRAPH_MB_STATS *stats,
-  YV12_BUFFER_CONFIG *buf,
-  int mb_y_offset,
-  YV12_BUFFER_CONFIG *golden_ref,
-  const MV *prev_golden_ref_mv,
-  YV12_BUFFER_CONFIG *alt_ref,
-  int mb_row,
-  int mb_col
-) {
+static void update_mbgraph_mb_stats(VP10_COMP *cpi, MBGRAPH_MB_STATS *stats,
+                                    YV12_BUFFER_CONFIG *buf, int mb_y_offset,
+                                    YV12_BUFFER_CONFIG *golden_ref,
+                                    const MV *prev_golden_ref_mv,
+                                    YV12_BUFFER_CONFIG *alt_ref, int mb_row,
+                                    int mb_col) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   int intra_error;
@@ -191,10 +178,8 @@ static void update_mbgraph_mb_stats
   xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride;
 
   // do intra 16x16 prediction
-  intra_error = find_best_16x16_intra(cpi,
-                                      &stats->ref[INTRA_FRAME].m.mode);
-  if (intra_error <= 0)
-    intra_error = 1;
+  intra_error = find_best_16x16_intra(cpi, &stats->ref[INTRA_FRAME].m.mode);
+  if (intra_error <= 0) intra_error = 1;
   stats->ref[INTRA_FRAME].err = intra_error;
 
   // Golden frame MV search, if it exists and is different than last frame
@@ -202,10 +187,9 @@ static void update_mbgraph_mb_stats
     int g_motion_error;
     xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
     xd->plane[0].pre[0].stride = golden_ref->y_stride;
-    g_motion_error = do_16x16_motion_search(cpi,
-                                            prev_golden_ref_mv,
-                                            &stats->ref[GOLDEN_FRAME].m.mv,
-                                            mb_row, mb_col);
+    g_motion_error =
+        do_16x16_motion_search(cpi, prev_golden_ref_mv,
+                               &stats->ref[GOLDEN_FRAME].m.mv, mb_row, mb_col);
     stats->ref[GOLDEN_FRAME].err = g_motion_error;
   } else {
     stats->ref[GOLDEN_FRAME].err = INT_MAX;
@@ -218,8 +202,8 @@ static void update_mbgraph_mb_stats
     int a_motion_error;
     xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
     xd->plane[0].pre[0].stride = alt_ref->y_stride;
-    a_motion_error = do_16x16_zerozero_search(cpi,
-                                              &stats->ref[ALTREF_FRAME].m.mv);
+    a_motion_error =
+        do_16x16_zerozero_search(cpi, &stats->ref[ALTREF_FRAME].m.mv);
 
     stats->ref[ALTREF_FRAME].err = a_motion_error;
   } else {
@@ -239,17 +223,17 @@ static void update_mbgraph_frame_stats(VP10_COMP *cpi,
 
   int mb_col, mb_row, offset = 0;
   int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
-  MV gld_top_mv = {0, 0};
+  MV gld_top_mv = { 0, 0 };
   MODE_INFO mi_local;
 
   vp10_zero(mi_local);
   // Set up limit values for motion vectors to prevent them extending outside
   // the UMV borders.
-  x->mv_row_min     = -BORDER_MV_PIXELS_B16;
-  x->mv_row_max     = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16;
-  xd->up_available  = 0;
-  xd->plane[0].dst.stride  = buf->y_stride;
-  xd->plane[0].pre[0].stride  = buf->y_stride;
+  x->mv_row_min = -BORDER_MV_PIXELS_B16;
+  x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16;
+  xd->up_available = 0;
+  xd->plane[0].dst.stride = buf->y_stride;
+  xd->plane[0].pre[0].stride = buf->y_stride;
   xd->plane[1].dst.stride = buf->uv_stride;
   xd->mi[0] = &mi_local;
   mi_local.mbmi.sb_type = BLOCK_16X16;
@@ -258,41 +242,39 @@ static void update_mbgraph_frame_stats(VP10_COMP *cpi,
 
   for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
     MV gld_left_mv = gld_top_mv;
-    int mb_y_in_offset  = mb_y_offset;
+    int mb_y_in_offset = mb_y_offset;
     int arf_y_in_offset = arf_y_offset;
     int gld_y_in_offset = gld_y_offset;
 
     // Set up limit values for motion vectors to prevent them extending outside
     // the UMV borders.
-    x->mv_col_min      = -BORDER_MV_PIXELS_B16;
-    x->mv_col_max      = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16;
+    x->mv_col_min = -BORDER_MV_PIXELS_B16;
+    x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16;
     xd->left_available = 0;
 
     for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
       MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
 
-      update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
-                              golden_ref, &gld_left_mv, alt_ref,
-                              mb_row, mb_col);
+      update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, golden_ref,
+                              &gld_left_mv, alt_ref, mb_row, mb_col);
       gld_left_mv = mb_stats->ref[GOLDEN_FRAME].m.mv.as_mv;
       if (mb_col == 0) {
         gld_top_mv = gld_left_mv;
       }
       xd->left_available = 1;
-      mb_y_in_offset    += 16;
-      gld_y_in_offset   += 16;
-      arf_y_in_offset   += 16;
-      x->mv_col_min     -= 16;
-      x->mv_col_max     -= 16;
+      mb_y_in_offset += 16;
+      gld_y_in_offset += 16;
+      arf_y_in_offset += 16;
+      x->mv_col_min -= 16;
+      x->mv_col_max -= 16;
     }
     xd->up_available = 1;
-    mb_y_offset     += buf->y_stride * 16;
-    gld_y_offset    += golden_ref->y_stride * 16;
-    if (alt_ref)
-      arf_y_offset    += alt_ref->y_stride * 16;
-    x->mv_row_min   -= 16;
-    x->mv_row_max   -= 16;
-    offset          += cm->mb_cols;
+    mb_y_offset += buf->y_stride * 16;
+    gld_y_offset += golden_ref->y_stride * 16;
+    if (alt_ref) arf_y_offset += alt_ref->y_stride * 16;
+    x->mv_row_min -= 16;
+    x->mv_row_max -= 16;
+    offset += cm->mb_cols;
   }
 }
 
@@ -306,9 +288,9 @@ static void separate_arf_mbs(VP10_COMP *cpi) {
 
   int *arf_not_zz;
 
-  CHECK_MEM_ERROR(cm, arf_not_zz,
-                  vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz),
-                             1));
+  CHECK_MEM_ERROR(
+      cm, arf_not_zz,
+      vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
 
   // We are not interested in results beyond the alt ref itself.
   if (n_frames > cpi->rc.frames_till_gf_update_due)
@@ -324,12 +306,11 @@ static void separate_arf_mbs(VP10_COMP *cpi) {
         MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col];
 
         int altref_err = mb_stats->ref[ALTREF_FRAME].err;
-        int intra_err  = mb_stats->ref[INTRA_FRAME ].err;
+        int intra_err = mb_stats->ref[INTRA_FRAME].err;
         int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
 
         // Test for altref vs intra and gf and that its mv was 0,0.
-        if (altref_err > 1000 ||
-            altref_err > intra_err ||
+        if (altref_err > 1000 || altref_err > intra_err ||
             altref_err > golden_err) {
           arf_not_zz[offset + mb_col]++;
         }
@@ -384,11 +365,9 @@ void vp10_update_mbgraph_stats(VP10_COMP *cpi) {
 
   // we need to look ahead beyond where the ARF transitions into
   // being a GF - so exit if we don't look ahead beyond that
-  if (n_frames <= cpi->rc.frames_till_gf_update_due)
-    return;
+  if (n_frames <= cpi->rc.frames_till_gf_update_due) return;
 
-  if (n_frames > MAX_LAG_BUFFERS)
-    n_frames = MAX_LAG_BUFFERS;
+  if (n_frames > MAX_LAG_BUFFERS) n_frames = MAX_LAG_BUFFERS;
 
   cpi->mbgraph_n_frames = n_frames;
   for (i = 0; i < n_frames; i++) {
@@ -407,8 +386,8 @@ void vp10_update_mbgraph_stats(VP10_COMP *cpi) {
 
     assert(q_cur != NULL);
 
-    update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
-                               golden_ref, cpi->Source);
+    update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, golden_ref,
+                               cpi->Source);
   }
 
   vpx_clear_system_state();
diff --git a/vp10/encoder/mbgraph.h b/vp10/encoder/mbgraph.h
index 3408464c555d34cef7abc00e3984b088f4d41ac3..e2daae563325b8e095733ee0459137c90a9231c2 100644
--- a/vp10/encoder/mbgraph.h
+++ b/vp10/encoder/mbgraph.h
@@ -25,9 +25,7 @@ typedef struct {
   } ref[MAX_REF_FRAMES];
 } MBGRAPH_MB_STATS;
 
-typedef struct {
-  MBGRAPH_MB_STATS *mb_stats;
-} MBGRAPH_FRAME_STATS;
+typedef struct { MBGRAPH_MB_STATS *mb_stats; } MBGRAPH_FRAME_STATS;
 
 struct VP10_COMP;
 
diff --git a/vp10/encoder/mcomp.c b/vp10/encoder/mcomp.c
index 8d5b6626d9948a378a67c164617579c8db088645..17cc2f39fa37f4e2512b6ebf0ade16dfaf8a4165 100644
--- a/vp10/encoder/mcomp.c
+++ b/vp10/encoder/mcomp.c
@@ -45,14 +45,10 @@ void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
 
   // Get intersection of UMV window and valid MV window to reduce # of checks
   // in diamond search.
-  if (x->mv_col_min < col_min)
-    x->mv_col_min = col_min;
-  if (x->mv_col_max > col_max)
-    x->mv_col_max = col_max;
-  if (x->mv_row_min < row_min)
-    x->mv_row_min = row_min;
-  if (x->mv_row_max > row_max)
-    x->mv_row_max = row_max;
+  if (x->mv_col_min < col_min) x->mv_col_min = col_min;
+  if (x->mv_col_max > col_max) x->mv_col_max = col_max;
+  if (x->mv_row_min < row_min) x->mv_row_min = row_min;
+  if (x->mv_row_max > row_max) x->mv_row_max = row_max;
 }
 
 int vp10_init_search_range(int size) {
@@ -60,44 +56,39 @@ int vp10_init_search_range(int size) {
   // Minimum search size no matter what the passed in value.
   size = VPXMAX(16, size);
 
-  while ((size << sr) < MAX_FULL_PEL_VAL)
-    sr++;
+  while ((size << sr) < MAX_FULL_PEL_VAL) sr++;
 
   sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2);
   return sr;
 }
 
-static INLINE int mv_cost(const MV *mv,
-                          const int *joint_cost, int *const comp_cost[2]) {
-  return joint_cost[vp10_get_mv_joint(mv)] +
-             comp_cost[0][mv->row] + comp_cost[1][mv->col];
+static INLINE int mv_cost(const MV *mv, const int *joint_cost,
+                          int *const comp_cost[2]) {
+  return joint_cost[vp10_get_mv_joint(mv)] + comp_cost[0][mv->row] +
+         comp_cost[1][mv->col];
 }
 
-int vp10_mv_bit_cost(const MV *mv, const MV *ref,
-                    const int *mvjcost, int *mvcost[2], int weight) {
-  const MV diff = { mv->row - ref->row,
-                    mv->col - ref->col };
+int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+                     int *mvcost[2], int weight) {
+  const MV diff = { mv->row - ref->row, mv->col - ref->col };
   return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
 }
 
-static int mv_err_cost(const MV *mv, const MV *ref,
-                       const int *mvjcost, int *mvcost[2],
-                       int error_per_bit) {
+static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
+                       int *mvcost[2], int error_per_bit) {
   if (mvcost) {
-    const MV diff = { mv->row - ref->row,
-                      mv->col - ref->col };
-    return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) *
-                                  error_per_bit, 13);
+    const MV diff = { mv->row - ref->row, mv->col - ref->col };
+    return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
+                              13);
   }
   return 0;
 }
 
 static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
                           int error_per_bit) {
-  const MV diff = { mv->row - ref->row,
-                    mv->col - ref->col };
-  return ROUND_POWER_OF_TWO(mv_cost(&diff, x->nmvjointsadcost,
-                                    x->nmvsadcost) * error_per_bit, 8);
+  const MV diff = { mv->row - ref->row, mv->col - ref->col };
+  return ROUND_POWER_OF_TWO(
+      mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) * error_per_bit, 8);
 }
 
 void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
@@ -108,7 +99,7 @@ void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
 
   for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
     // Generate offsets for 4 search sites per step.
-    const MV ss_mvs[] = {{-len, 0}, {len, 0}, {0, -len}, {0, len}};
+    const MV ss_mvs[] = { { -len, 0 }, { len, 0 }, { 0, -len }, { 0, len } };
     int i;
     for (i = 0; i < 4; ++i) {
       search_site *const ss = &cfg->ss[ss_count++];
@@ -129,10 +120,9 @@ void vp10_init3smotion_compensation(search_site_config *cfg, int stride) {
 
   for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
     // Generate offsets for 8 search sites per step.
-    const MV ss_mvs[8] = {
-      {-len,  0  }, {len,  0  }, { 0,   -len}, {0,    len},
-      {-len, -len}, {-len, len}, {len,  -len}, {len,  len}
-    };
+    const MV ss_mvs[8] = { { -len, 0 },   { len, 0 },     { 0, -len },
+                           { 0, len },    { -len, -len }, { -len, len },
+                           { len, -len }, { len, len } };
     int i;
     for (i = 0; i < 8; ++i) {
       search_site *const ss = &cfg->ss[ss_count++];
@@ -156,172 +146,147 @@ void vp10_init3smotion_compensation(search_site_config *cfg, int stride) {
  */
 
 /* estimated cost of a motion vector (r,c) */
-#define MVC(r, c)                                       \
-    (mvcost ?                                           \
-     ((mvjcost[((r) != rr) * 2 + ((c) != rc)] +         \
-       mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
-      error_per_bit + 4096) >> 13 : 0)
-
+#define MVC(r, c)                                                         \
+  (mvcost                                                                 \
+       ? ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + mvcost[0][((r)-rr)] + \
+           mvcost[1][((c)-rc)]) *                                         \
+              error_per_bit +                                             \
+          4096) >>                                                        \
+             13                                                           \
+       : 0)
 
 // convert motion vector component to offset for sv[a]f calc
-static INLINE int sp(int x) {
-  return x & 7;
-}
+static INLINE int sp(int x) { return x & 7; }
 
 static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
   return &buf[(r >> 3) * stride + (c >> 3)];
 }
 
 /* checks if (r, c) has better score than previous best */
-#define CHECK_BETTER(v, r, c) \
-  if (c >= minc && c <= maxc && r >= minr && r <= maxr) {              \
-    if (second_pred == NULL)                                           \
-      thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
-                             src_stride, &sse);                        \
-    else                                                               \
-      thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
-                              z, src_stride, &sse, second_pred);       \
-    if ((v = MVC(r, c) + thismse) < besterr) {                         \
-      besterr = v;                                                     \
-      br = r;                                                          \
-      bc = c;                                                          \
-      *distortion = thismse;                                           \
-      *sse1 = sse;                                                     \
-    }                                                                  \
-  } else {                                                             \
-    v = INT_MAX;                                                       \
+#define CHECK_BETTER(v, r, c)                                                \
+  if (c >= minc && c <= maxc && r >= minr && r <= maxr) {                    \
+    if (second_pred == NULL)                                                 \
+      thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z,  \
+                         src_stride, &sse);                                  \
+    else                                                                     \
+      thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+                          src_stride, &sse, second_pred);                    \
+    if ((v = MVC(r, c) + thismse) < besterr) {                               \
+      besterr = v;                                                           \
+      br = r;                                                                \
+      bc = c;                                                                \
+      *distortion = thismse;                                                 \
+      *sse1 = sse;                                                           \
+    }                                                                        \
+  } else {                                                                   \
+    v = INT_MAX;                                                             \
   }
 
-#define FIRST_LEVEL_CHECKS                              \
-  {                                                     \
-    unsigned int left, right, up, down, diag;           \
-    CHECK_BETTER(left, tr, tc - hstep);                 \
-    CHECK_BETTER(right, tr, tc + hstep);                \
-    CHECK_BETTER(up, tr - hstep, tc);                   \
-    CHECK_BETTER(down, tr + hstep, tc);                 \
-    whichdir = (left < right ? 0 : 1) +                 \
-               (up < down ? 0 : 2);                     \
-    switch (whichdir) {                                 \
-      case 0:                                           \
-        CHECK_BETTER(diag, tr - hstep, tc - hstep);     \
-        break;                                          \
-      case 1:                                           \
-        CHECK_BETTER(diag, tr - hstep, tc + hstep);     \
-        break;                                          \
-      case 2:                                           \
-        CHECK_BETTER(diag, tr + hstep, tc - hstep);     \
-        break;                                          \
-      case 3:                                           \
-        CHECK_BETTER(diag, tr + hstep, tc + hstep);     \
-        break;                                          \
-    }                                                   \
+#define FIRST_LEVEL_CHECKS                                       \
+  {                                                              \
+    unsigned int left, right, up, down, diag;                    \
+    CHECK_BETTER(left, tr, tc - hstep);                          \
+    CHECK_BETTER(right, tr, tc + hstep);                         \
+    CHECK_BETTER(up, tr - hstep, tc);                            \
+    CHECK_BETTER(down, tr + hstep, tc);                          \
+    whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);     \
+    switch (whichdir) {                                          \
+      case 0: CHECK_BETTER(diag, tr - hstep, tc - hstep); break; \
+      case 1: CHECK_BETTER(diag, tr - hstep, tc + hstep); break; \
+      case 2: CHECK_BETTER(diag, tr + hstep, tc - hstep); break; \
+      case 3: CHECK_BETTER(diag, tr + hstep, tc + hstep); break; \
+    }                                                            \
   }
 
-#define SECOND_LEVEL_CHECKS                             \
-  {                                                     \
-    int kr, kc;                                         \
-    unsigned int second;                                \
-    if (tr != br && tc != bc) {                         \
-      kr = br - tr;                                     \
-      kc = bc - tc;                                     \
-      CHECK_BETTER(second, tr + kr, tc + 2 * kc);       \
-      CHECK_BETTER(second, tr + 2 * kr, tc + kc);       \
-    } else if (tr == br && tc != bc) {                  \
-      kc = bc - tc;                                     \
-      CHECK_BETTER(second, tr + hstep, tc + 2 * kc);    \
-      CHECK_BETTER(second, tr - hstep, tc + 2 * kc);    \
-      switch (whichdir) {                               \
-        case 0:                                         \
-        case 1:                                         \
-          CHECK_BETTER(second, tr + hstep, tc + kc);    \
-          break;                                        \
-        case 2:                                         \
-        case 3:                                         \
-          CHECK_BETTER(second, tr - hstep, tc + kc);    \
-          break;                                        \
-      }                                                 \
-    } else if (tr != br && tc == bc) {                  \
-      kr = br - tr;                                     \
-      CHECK_BETTER(second, tr + 2 * kr, tc + hstep);    \
-      CHECK_BETTER(second, tr + 2 * kr, tc - hstep);    \
-      switch (whichdir) {                               \
-        case 0:                                         \
-        case 2:                                         \
-          CHECK_BETTER(second, tr + kr, tc + hstep);    \
-          break;                                        \
-        case 1:                                         \
-        case 3:                                         \
-          CHECK_BETTER(second, tr + kr, tc - hstep);    \
-          break;                                        \
-      }                                                 \
-    }                                                   \
+#define SECOND_LEVEL_CHECKS                                       \
+  {                                                               \
+    int kr, kc;                                                   \
+    unsigned int second;                                          \
+    if (tr != br && tc != bc) {                                   \
+      kr = br - tr;                                               \
+      kc = bc - tc;                                               \
+      CHECK_BETTER(second, tr + kr, tc + 2 * kc);                 \
+      CHECK_BETTER(second, tr + 2 * kr, tc + kc);                 \
+    } else if (tr == br && tc != bc) {                            \
+      kc = bc - tc;                                               \
+      CHECK_BETTER(second, tr + hstep, tc + 2 * kc);              \
+      CHECK_BETTER(second, tr - hstep, tc + 2 * kc);              \
+      switch (whichdir) {                                         \
+        case 0:                                                   \
+        case 1: CHECK_BETTER(second, tr + hstep, tc + kc); break; \
+        case 2:                                                   \
+        case 3: CHECK_BETTER(second, tr - hstep, tc + kc); break; \
+      }                                                           \
+    } else if (tr != br && tc == bc) {                            \
+      kr = br - tr;                                               \
+      CHECK_BETTER(second, tr + 2 * kr, tc + hstep);              \
+      CHECK_BETTER(second, tr + 2 * kr, tc - hstep);              \
+      switch (whichdir) {                                         \
+        case 0:                                                   \
+        case 2: CHECK_BETTER(second, tr + kr, tc + hstep); break; \
+        case 1:                                                   \
+        case 3: CHECK_BETTER(second, tr + kr, tc - hstep); break; \
+      }                                                           \
+    }                                                             \
   }
 
 // TODO(yunqingwang): SECOND_LEVEL_CHECKS_BEST was a rewrote of
 // SECOND_LEVEL_CHECKS, and SECOND_LEVEL_CHECKS should be rewritten
 // later in the same way.
-#define SECOND_LEVEL_CHECKS_BEST                        \
-  {                                                     \
-    unsigned int second;                                \
-    int br0 = br;                                       \
-    int bc0 = bc;                                       \
-    assert(tr == br || tc == bc);                       \
-    if (tr == br && tc != bc) {                         \
-      kc = bc - tc;                                     \
-    } else if (tr != br && tc == bc) {                  \
-      kr = br - tr;                                     \
-    }                                                   \
-    CHECK_BETTER(second, br0 + kr, bc0);                \
-    CHECK_BETTER(second, br0, bc0 + kc);                \
-    if (br0 != br || bc0 != bc) {                       \
-      CHECK_BETTER(second, br0 + kr, bc0 + kc);         \
-    }                                                   \
+#define SECOND_LEVEL_CHECKS_BEST                \
+  {                                             \
+    unsigned int second;                        \
+    int br0 = br;                               \
+    int bc0 = bc;                               \
+    assert(tr == br || tc == bc);               \
+    if (tr == br && tc != bc) {                 \
+      kc = bc - tc;                             \
+    } else if (tr != br && tc == bc) {          \
+      kr = br - tr;                             \
+    }                                           \
+    CHECK_BETTER(second, br0 + kr, bc0);        \
+    CHECK_BETTER(second, br0, bc0 + kc);        \
+    if (br0 != br || bc0 != bc) {               \
+      CHECK_BETTER(second, br0 + kr, bc0 + kc); \
+    }                                           \
   }
 
-#define SETUP_SUBPEL_SEARCH                                                \
-  const uint8_t *const z = x->plane[0].src.buf;                            \
-  const int src_stride = x->plane[0].src.stride;                           \
-  const MACROBLOCKD *xd = &x->e_mbd;                                       \
-  unsigned int besterr = INT_MAX;                                          \
-  unsigned int sse;                                                        \
-  unsigned int whichdir;                                                   \
-  int thismse;                                                             \
-  const unsigned int halfiters = iters_per_step;                           \
-  const unsigned int quarteriters = iters_per_step;                        \
-  const unsigned int eighthiters = iters_per_step;                         \
-  const int y_stride = xd->plane[0].pre[0].stride;                         \
-  const int offset = bestmv->row * y_stride + bestmv->col;                 \
-  const uint8_t *const y = xd->plane[0].pre[0].buf;                        \
-                                                                           \
-  int rr = ref_mv->row;                                                    \
-  int rc = ref_mv->col;                                                    \
-  int br = bestmv->row * 8;                                                \
-  int bc = bestmv->col * 8;                                                \
-  int hstep = 4;                                                           \
-  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);        \
-  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);        \
-  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);        \
-  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);        \
-  int tr = br;                                                             \
-  int tc = bc;                                                             \
-                                                                           \
-  bestmv->row *= 8;                                                        \
+#define SETUP_SUBPEL_SEARCH                                         \
+  const uint8_t *const z = x->plane[0].src.buf;                     \
+  const int src_stride = x->plane[0].src.stride;                    \
+  const MACROBLOCKD *xd = &x->e_mbd;                                \
+  unsigned int besterr = INT_MAX;                                   \
+  unsigned int sse;                                                 \
+  unsigned int whichdir;                                            \
+  int thismse;                                                      \
+  const unsigned int halfiters = iters_per_step;                    \
+  const unsigned int quarteriters = iters_per_step;                 \
+  const unsigned int eighthiters = iters_per_step;                  \
+  const int y_stride = xd->plane[0].pre[0].stride;                  \
+  const int offset = bestmv->row * y_stride + bestmv->col;          \
+  const uint8_t *const y = xd->plane[0].pre[0].buf;                 \
+                                                                    \
+  int rr = ref_mv->row;                                             \
+  int rc = ref_mv->col;                                             \
+  int br = bestmv->row * 8;                                         \
+  int bc = bestmv->col * 8;                                         \
+  int hstep = 4;                                                    \
+  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
+  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
+  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
+  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
+  int tr = br;                                                      \
+  int tc = bc;                                                      \
+                                                                    \
+  bestmv->row *= 8;                                                 \
   bestmv->col *= 8;
 
-static unsigned int setup_center_error(const MACROBLOCKD *xd,
-                                       const MV *bestmv,
-                                       const MV *ref_mv,
-                                       int error_per_bit,
-                                       const vpx_variance_fn_ptr_t *vfp,
-                                       const uint8_t *const src,
-                                       const int src_stride,
-                                       const uint8_t *const y,
-                                       int y_stride,
-                                       const uint8_t *second_pred,
-                                       int w, int h, int offset,
-                                       int *mvjcost, int *mvcost[2],
-                                       unsigned int *sse1,
-                                       int *distortion) {
+static unsigned int setup_center_error(
+    const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    const uint8_t *const src, const int src_stride, const uint8_t *const y,
+    int y_stride, const uint8_t *second_pred, int w, int h, int offset,
+    int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
   unsigned int besterr;
 #if CONFIG_VPX_HIGHBITDEPTH
   if (second_pred != NULL) {
@@ -329,8 +294,8 @@ static unsigned int setup_center_error(const MACROBLOCKD *xd,
       DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
       vpx_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
                                y_stride);
-      besterr = vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride,
-                        sse1);
+      besterr =
+          vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1);
     } else {
       DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
       vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
@@ -342,7 +307,7 @@ static unsigned int setup_center_error(const MACROBLOCKD *xd,
   *distortion = besterr;
   besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
 #else
-  (void) xd;
+  (void)xd;
   if (second_pred != NULL) {
     DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
     vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
@@ -361,10 +326,8 @@ static INLINE int divide_and_round(const int n, const int d) {
 }
 
 static INLINE int is_cost_list_wellbehaved(int *cost_list) {
-  return cost_list[0] < cost_list[1] &&
-         cost_list[0] < cost_list[2] &&
-         cost_list[0] < cost_list[3] &&
-         cost_list[0] < cost_list[4];
+  return cost_list[0] < cost_list[1] && cost_list[0] < cost_list[2] &&
+         cost_list[0] < cost_list[3] && cost_list[0] < cost_list[4];
 }
 
 // Returns surface minima estimate at given precision in 1/2^n bits.
@@ -375,8 +338,7 @@ static INLINE int is_cost_list_wellbehaved(int *cost_list) {
 // x0 = 1/2 (S1 - S3)/(S1 + S3 - 2*S0),
 // y0 = 1/2 (S4 - S2)/(S4 + S2 - 2*S0).
 // The code below is an integerized version of that.
-static void get_cost_surf_min(int *cost_list, int *ir, int *ic,
-                              int bits) {
+static void get_cost_surf_min(int *cost_list, int *ir, int *ic, int bits) {
   *ic = divide_and_round((cost_list[1] - cost_list[3]) * (1 << (bits - 1)),
                          (cost_list[1] - 2 * cost_list[0] + cost_list[3]));
   *ir = divide_and_round((cost_list[4] - cost_list[2]) * (1 << (bits - 1)),
@@ -384,37 +346,26 @@ static void get_cost_surf_min(int *cost_list, int *ir, int *ic,
 }
 
 int vp10_find_best_sub_pixel_tree_pruned_evenmore(
-    const MACROBLOCK *x,
-    MV *bestmv, const MV *ref_mv,
-    int allow_hp,
-    int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp,
-    int forced_stop,
-    int iters_per_step,
-    int *cost_list,
-    int *mvjcost, int *mvcost[2],
-    int *distortion,
-    unsigned int *sse1,
-    const uint8_t *second_pred,
-    int w, int h) {
+    const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+    int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+    int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
+    int h) {
   SETUP_SUBPEL_SEARCH;
-  besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
-                               z, src_stride, y, y_stride, second_pred,
-                               w, h, offset, mvjcost, mvcost,
-                               sse1, distortion);
-  (void) halfiters;
-  (void) quarteriters;
-  (void) eighthiters;
-  (void) whichdir;
-  (void) allow_hp;
-  (void) forced_stop;
-  (void) hstep;
-
-  if (cost_list &&
-      cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+  besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+                               src_stride, y, y_stride, second_pred, w, h,
+                               offset, mvjcost, mvcost, sse1, distortion);
+  (void)halfiters;
+  (void)quarteriters;
+  (void)eighthiters;
+  (void)whichdir;
+  (void)allow_hp;
+  (void)forced_stop;
+  (void)hstep;
+
+  if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
       cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
-      cost_list[4] != INT_MAX &&
-      is_cost_list_wellbehaved(cost_list)) {
+      cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
     int ir, ic;
     unsigned int minpt;
     get_cost_surf_min(cost_list, &ir, &ic, 2);
@@ -463,29 +414,19 @@ int vp10_find_best_sub_pixel_tree_pruned_evenmore(
   return besterr;
 }
 
-int vp10_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
-                                             MV *bestmv, const MV *ref_mv,
-                                             int allow_hp,
-                                             int error_per_bit,
-                                             const vpx_variance_fn_ptr_t *vfp,
-                                             int forced_stop,
-                                             int iters_per_step,
-                                             int *cost_list,
-                                             int *mvjcost, int *mvcost[2],
-                                             int *distortion,
-                                             unsigned int *sse1,
-                                             const uint8_t *second_pred,
-                                             int w, int h) {
+int vp10_find_best_sub_pixel_tree_pruned_more(
+    const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+    int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+    int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
+    int h) {
   SETUP_SUBPEL_SEARCH;
-  besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
-                               z, src_stride, y, y_stride, second_pred,
-                               w, h, offset, mvjcost, mvcost,
-                               sse1, distortion);
-  if (cost_list &&
-      cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+  besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+                               src_stride, y, y_stride, second_pred, w, h,
+                               offset, mvjcost, mvcost, sse1, distortion);
+  if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
       cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
-      cost_list[4] != INT_MAX &&
-      is_cost_list_wellbehaved(cost_list)) {
+      cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
     unsigned int minpt;
     int ir, ic;
     get_cost_surf_min(cost_list, &ir, &ic, 1);
@@ -524,8 +465,8 @@ int vp10_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
   }
   // These lines insure static analysis doesn't warn that
   // tr and tc aren't used after the above point.
-  (void) tr;
-  (void) tc;
+  (void)tr;
+  (void)tc;
 
   bestmv->row = br;
   bestmv->col = bc;
@@ -537,26 +478,17 @@ int vp10_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
   return besterr;
 }
 
-int vp10_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
-                                        MV *bestmv, const MV *ref_mv,
-                                        int allow_hp,
-                                        int error_per_bit,
-                                        const vpx_variance_fn_ptr_t *vfp,
-                                        int forced_stop,
-                                        int iters_per_step,
-                                        int *cost_list,
-                                        int *mvjcost, int *mvcost[2],
-                                        int *distortion,
-                                        unsigned int *sse1,
-                                        const uint8_t *second_pred,
-                                        int w, int h) {
+int vp10_find_best_sub_pixel_tree_pruned(
+    const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+    int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+    int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
+    int h) {
   SETUP_SUBPEL_SEARCH;
-  besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
-                               z, src_stride, y, y_stride, second_pred,
-                               w, h, offset, mvjcost, mvcost,
-                               sse1, distortion);
-  if (cost_list &&
-      cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+  besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+                               src_stride, y, y_stride, second_pred, w, h,
+                               offset, mvjcost, mvcost, sse1, distortion);
+  if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
       cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
       cost_list[4] != INT_MAX) {
     unsigned int left, right, up, down, diag;
@@ -619,8 +551,8 @@ int vp10_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
   }
   // These lines insure static analysis doesn't warn that
   // tr and tc aren't used after the above point.
-  (void) tr;
-  (void) tc;
+  (void)tr;
+  (void)tc;
 
   bestmv->row = br;
   bestmv->col = bc;
@@ -633,25 +565,19 @@ int vp10_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
 }
 
 static const MV search_step_table[12] = {
-    // left, right, up, down
-    {0, -4}, {0, 4}, {-4, 0}, {4, 0},
-    {0, -2}, {0, 2}, {-2, 0}, {2, 0},
-    {0, -1}, {0, 1}, {-1, 0}, {1, 0}
+  // left, right, up, down
+  { 0, -4 }, { 0, 4 }, { -4, 0 }, { 4, 0 }, { 0, -2 }, { 0, 2 },
+  { -2, 0 }, { 2, 0 }, { 0, -1 }, { 0, 1 }, { -1, 0 }, { 1, 0 }
 };
 
-int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x,
-                                 MV *bestmv, const MV *ref_mv,
-                                 int allow_hp,
-                                 int error_per_bit,
-                                 const vpx_variance_fn_ptr_t *vfp,
-                                 int forced_stop,
-                                 int iters_per_step,
-                                 int *cost_list,
-                                 int *mvjcost, int *mvcost[2],
-                                 int *distortion,
-                                 unsigned int *sse1,
-                                 const uint8_t *second_pred,
-                                 int w, int h) {
+int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
+                                  const MV *ref_mv, int allow_hp,
+                                  int error_per_bit,
+                                  const vpx_variance_fn_ptr_t *vfp,
+                                  int forced_stop, int iters_per_step,
+                                  int *cost_list, int *mvjcost, int *mvcost[2],
+                                  int *distortion, unsigned int *sse1,
+                                  const uint8_t *second_pred, int w, int h) {
   const uint8_t *const z = x->plane[0].src.buf;
   const uint8_t *const src_address = z;
   const int src_stride = x->plane[0].src.stride;
@@ -681,18 +607,16 @@ int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x,
   int kr, kc;
 
   if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
-    if (round == 3)
-      round = 2;
+    if (round == 3) round = 2;
 
   bestmv->row *= 8;
   bestmv->col *= 8;
 
-  besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
-                               z, src_stride, y, y_stride, second_pred,
-                               w, h, offset, mvjcost, mvcost,
-                               sse1, distortion);
+  besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+                               src_stride, y, y_stride, second_pred, w, h,
+                               offset, mvjcost, mvcost, sse1, distortion);
 
-  (void) cost_list;  // to silence compiler warning
+  (void)cost_list;  // to silence compiler warning
 
   for (iter = 0; iter < round; ++iter) {
     // Check vertical and horizontal sub-pixel positions.
@@ -705,13 +629,13 @@ int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x,
         this_mv.row = tr;
         this_mv.col = tc;
         if (second_pred == NULL)
-          thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
-                             src_address, src_stride, &sse);
+          thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+                             src_stride, &sse);
         else
           thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
                               src_address, src_stride, &sse, second_pred);
-        cost_array[idx] = thismse +
-            mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
+        cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
+                                                mvcost, error_per_bit);
 
         if (cost_array[idx] < besterr) {
           best_idx = idx;
@@ -732,15 +656,15 @@ int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x,
     tr = br + kr;
     if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
       const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
-      MV this_mv = {tr, tc};
+      MV this_mv = { tr, tc };
       if (second_pred == NULL)
-        thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
-                           src_address, src_stride, &sse);
+        thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+                           src_stride, &sse);
       else
-        thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
-                            src_address, src_stride, &sse, second_pred);
-      cost_array[4] = thismse +
-          mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
+        thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+                            src_stride, &sse, second_pred);
+      cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+                                            error_per_bit);
 
       if (cost_array[4] < besterr) {
         best_idx = 4;
@@ -760,8 +684,7 @@ int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x,
       bc = tc;
     }
 
-    if (iters_per_step > 1 && best_idx != -1)
-      SECOND_LEVEL_CHECKS_BEST;
+    if (iters_per_step > 1 && best_idx != -1) SECOND_LEVEL_CHECKS_BEST;
 
     tr = br;
     tc = bc;
@@ -776,8 +699,8 @@ int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x,
 
   // These lines insure static analysis doesn't warn that
   // tr and tc aren't used after the above point.
-  (void) tr;
-  (void) tc;
+  (void)tr;
+  (void)tc;
 
   bestmv->row = br;
   bestmv->col = bc;
@@ -795,10 +718,8 @@ int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x,
 
 static INLINE int check_bounds(const MACROBLOCK *x, int row, int col,
                                int range) {
-  return ((row - range) >= x->mv_row_min) &
-         ((row + range) <= x->mv_row_max) &
-         ((col - range) >= x->mv_col_min) &
-         ((col + range) <= x->mv_col_max);
+  return ((row - range) >= x->mv_row_min) & ((row + range) <= x->mv_row_max) &
+         ((col - range) >= x->mv_col_min) & ((col + range) <= x->mv_col_max);
 }
 
 static INLINE int is_mv_in(const MACROBLOCK *x, const MV *mv) {
@@ -806,33 +727,31 @@ static INLINE int is_mv_in(const MACROBLOCK *x, const MV *mv) {
          (mv->row >= x->mv_row_min) && (mv->row <= x->mv_row_max);
 }
 
-#define CHECK_BETTER \
-  {\
-    if (thissad < bestsad) {\
-      if (use_mvcost) \
-        thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);\
-      if (thissad < bestsad) {\
-        bestsad = thissad;\
-        best_site = i;\
-      }\
-    }\
+#define CHECK_BETTER                                                      \
+  {                                                                       \
+    if (thissad < bestsad) {                                              \
+      if (use_mvcost)                                                     \
+        thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); \
+      if (thissad < bestsad) {                                            \
+        bestsad = thissad;                                                \
+        best_site = i;                                                    \
+      }                                                                   \
+    }                                                                     \
   }
 
-#define MAX_PATTERN_SCALES         11
-#define MAX_PATTERN_CANDIDATES      8  // max number of canddiates per scale
-#define PATTERN_CANDIDATES_REF      3  // number of refinement candidates
+#define MAX_PATTERN_SCALES 11
+#define MAX_PATTERN_CANDIDATES 8  // max number of canddiates per scale
+#define PATTERN_CANDIDATES_REF 3  // number of refinement candidates
 
 // Calculate and return a sad+mvcost list around an integer best pel.
-static INLINE void calc_int_cost_list(const MACROBLOCK *x,
-                                      const MV *ref_mv,
+static INLINE void calc_int_cost_list(const MACROBLOCK *x, const MV *ref_mv,
                                       int sadpb,
                                       const vpx_variance_fn_ptr_t *fn_ptr,
-                                      const MV *best_mv,
-                                      int *cost_list) {
-  static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}};
+                                      const MV *best_mv, int *cost_list) {
+  static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &x->e_mbd.plane[0].pre[0];
-  const MV fcenter_mv = {ref_mv->row >> 3, ref_mv->col >> 3};
+  const MV fcenter_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
   int br = best_mv->row;
   int bc = best_mv->col;
   MV this_mv;
@@ -841,34 +760,32 @@ static INLINE void calc_int_cost_list(const MACROBLOCK *x,
 
   this_mv.row = br;
   this_mv.col = bc;
-  cost_list[0] = fn_ptr->vf(what->buf, what->stride,
-                            get_buf_from_mv(in_what, &this_mv),
-                            in_what->stride, &sse) +
+  cost_list[0] =
+      fn_ptr->vf(what->buf, what->stride, get_buf_from_mv(in_what, &this_mv),
+                 in_what->stride, &sse) +
       mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
   if (check_bounds(x, br, bc, 1)) {
     for (i = 0; i < 4; i++) {
-      const MV this_mv = {br + neighbors[i].row,
-        bc + neighbors[i].col};
+      const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
       cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
                                     get_buf_from_mv(in_what, &this_mv),
                                     in_what->stride, &sse) +
-          // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
-          mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost, x->mvcost,
-                      x->errorperbit);
+                         // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
+                         mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
+                                     x->mvcost, x->errorperbit);
     }
   } else {
     for (i = 0; i < 4; i++) {
-      const MV this_mv = {br + neighbors[i].row,
-        bc + neighbors[i].col};
+      const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
       if (!is_mv_in(x, &this_mv))
         cost_list[i + 1] = INT_MAX;
       else
         cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
                                       get_buf_from_mv(in_what, &this_mv),
                                       in_what->stride, &sse) +
-            // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
-            mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost, x->mvcost,
-                        x->errorperbit);
+                           // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
+                           mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
+                                       x->mvcost, x->errorperbit);
     }
   }
 }
@@ -878,19 +795,12 @@ static INLINE void calc_int_cost_list(const MACROBLOCK *x,
 // candidates as indicated in the num_candidates and candidates arrays
 // passed into this function
 //
-static int vp10_pattern_search(const MACROBLOCK *x,
-                              MV *ref_mv,
-                              int search_param,
-                              int sad_per_bit,
-                              int do_init_search,
-                              int *cost_list,
-                              const vpx_variance_fn_ptr_t *vfp,
-                              int use_mvcost,
-                              const MV *center_mv,
-                              MV *best_mv,
-                              const int num_candidates[MAX_PATTERN_SCALES],
-                              const MV candidates[MAX_PATTERN_SCALES]
-                                                 [MAX_PATTERN_CANDIDATES]) {
+static int vp10_pattern_search(
+    const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
+    int do_init_search, int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+    int use_mvcost, const MV *center_mv, MV *best_mv,
+    const int num_candidates[MAX_PATTERN_SCALES],
+    const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
     10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
@@ -902,7 +812,7 @@ static int vp10_pattern_search(const MACROBLOCK *x,
   int bestsad = INT_MAX;
   int thissad;
   int k = -1;
-  const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
   int best_init_s = search_param_to_steps[search_param];
   // adjust ref_mv to make sure it is within MV range
   clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
@@ -910,9 +820,9 @@ static int vp10_pattern_search(const MACROBLOCK *x,
   bc = ref_mv->col;
 
   // Work out the start point for the search
-  bestsad = vfp->sdf(what->buf, what->stride,
-                     get_buf_from_mv(in_what, ref_mv), in_what->stride) +
-      mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
+  bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                     in_what->stride) +
+            mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
 
   // Search all possible scales upto the search param around the center point
   // pick the scale of the point that is best as the starting scale of
@@ -924,22 +834,21 @@ static int vp10_pattern_search(const MACROBLOCK *x,
       int best_site = -1;
       if (check_bounds(x, br, bc, 1 << t)) {
         for (i = 0; i < num_candidates[t]; i++) {
-          const MV this_mv = {br + candidates[t][i].row,
-                              bc + candidates[t][i].col};
-          thissad = vfp->sdf(what->buf, what->stride,
-                             get_buf_from_mv(in_what, &this_mv),
-                             in_what->stride);
+          const MV this_mv = { br + candidates[t][i].row,
+                               bc + candidates[t][i].col };
+          thissad =
+              vfp->sdf(what->buf, what->stride,
+                       get_buf_from_mv(in_what, &this_mv), in_what->stride);
           CHECK_BETTER
         }
       } else {
         for (i = 0; i < num_candidates[t]; i++) {
-          const MV this_mv = {br + candidates[t][i].row,
-                              bc + candidates[t][i].col};
-          if (!is_mv_in(x, &this_mv))
-            continue;
-          thissad = vfp->sdf(what->buf, what->stride,
-                             get_buf_from_mv(in_what, &this_mv),
-                             in_what->stride);
+          const MV this_mv = { br + candidates[t][i].row,
+                               bc + candidates[t][i].col };
+          if (!is_mv_in(x, &this_mv)) continue;
+          thissad =
+              vfp->sdf(what->buf, what->stride,
+                       get_buf_from_mv(in_what, &this_mv), in_what->stride);
           CHECK_BETTER
         }
       }
@@ -967,22 +876,21 @@ static int vp10_pattern_search(const MACROBLOCK *x,
       if (!do_init_search || s != best_init_s) {
         if (check_bounds(x, br, bc, 1 << s)) {
           for (i = 0; i < num_candidates[s]; i++) {
-            const MV this_mv = {br + candidates[s][i].row,
-                                bc + candidates[s][i].col};
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         } else {
           for (i = 0; i < num_candidates[s]; i++) {
-            const MV this_mv = {br + candidates[s][i].row,
-                                bc + candidates[s][i].col};
-            if (!is_mv_in(x, &this_mv))
-              continue;
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            if (!is_mv_in(x, &this_mv)) continue;
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         }
@@ -1005,22 +913,25 @@ static int vp10_pattern_search(const MACROBLOCK *x,
 
         if (check_bounds(x, br, bc, 1 << s)) {
           for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
-            const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
-                                bc + candidates[s][next_chkpts_indices[i]].col};
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         } else {
           for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
-            const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
-                                bc + candidates[s][next_chkpts_indices[i]].col};
-            if (!is_mv_in(x, &this_mv))
-              continue;
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
+            if (!is_mv_in(x, &this_mv)) continue;
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         }
@@ -1053,19 +964,12 @@ static int vp10_pattern_search(const MACROBLOCK *x,
 // are 4 1-away neighbors, and cost_list is non-null
 // TODO(debargha): Merge this function with the one above. Also remove
 // use_mvcost option since it is always 1, to save unnecessary branches.
-static int vp10_pattern_search_sad(const MACROBLOCK *x,
-                                  MV *ref_mv,
-                                  int search_param,
-                                  int sad_per_bit,
-                                  int do_init_search,
-                                  int *cost_list,
-                                  const vpx_variance_fn_ptr_t *vfp,
-                                  int use_mvcost,
-                                  const MV *center_mv,
-                                  MV *best_mv,
-                                  const int num_candidates[MAX_PATTERN_SCALES],
-                                  const MV candidates[MAX_PATTERN_SCALES]
-                                                     [MAX_PATTERN_CANDIDATES]) {
+static int vp10_pattern_search_sad(
+    const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
+    int do_init_search, int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+    int use_mvcost, const MV *center_mv, MV *best_mv,
+    const int num_candidates[MAX_PATTERN_SCALES],
+    const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
     10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
@@ -1077,7 +981,7 @@ static int vp10_pattern_search_sad(const MACROBLOCK *x,
   int bestsad = INT_MAX;
   int thissad;
   int k = -1;
-  const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
   int best_init_s = search_param_to_steps[search_param];
   // adjust ref_mv to make sure it is within MV range
   clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
@@ -1089,9 +993,9 @@ static int vp10_pattern_search_sad(const MACROBLOCK *x,
   }
 
   // Work out the start point for the search
-  bestsad = vfp->sdf(what->buf, what->stride,
-                     get_buf_from_mv(in_what, ref_mv), in_what->stride) +
-      mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
+  bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                     in_what->stride) +
+            mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
 
   // Search all possible scales upto the search param around the center point
   // pick the scale of the point that is best as the starting scale of
@@ -1103,22 +1007,21 @@ static int vp10_pattern_search_sad(const MACROBLOCK *x,
       int best_site = -1;
       if (check_bounds(x, br, bc, 1 << t)) {
         for (i = 0; i < num_candidates[t]; i++) {
-          const MV this_mv = {br + candidates[t][i].row,
-                              bc + candidates[t][i].col};
-          thissad = vfp->sdf(what->buf, what->stride,
-                             get_buf_from_mv(in_what, &this_mv),
-                             in_what->stride);
+          const MV this_mv = { br + candidates[t][i].row,
+                               bc + candidates[t][i].col };
+          thissad =
+              vfp->sdf(what->buf, what->stride,
+                       get_buf_from_mv(in_what, &this_mv), in_what->stride);
           CHECK_BETTER
         }
       } else {
         for (i = 0; i < num_candidates[t]; i++) {
-          const MV this_mv = {br + candidates[t][i].row,
-                              bc + candidates[t][i].col};
-          if (!is_mv_in(x, &this_mv))
-            continue;
-          thissad = vfp->sdf(what->buf, what->stride,
-                             get_buf_from_mv(in_what, &this_mv),
-                             in_what->stride);
+          const MV this_mv = { br + candidates[t][i].row,
+                               bc + candidates[t][i].col };
+          if (!is_mv_in(x, &this_mv)) continue;
+          thissad =
+              vfp->sdf(what->buf, what->stride,
+                       get_buf_from_mv(in_what, &this_mv), in_what->stride);
           CHECK_BETTER
         }
       }
@@ -1146,22 +1049,21 @@ static int vp10_pattern_search_sad(const MACROBLOCK *x,
       if (!do_init_search || s != best_init_s) {
         if (check_bounds(x, br, bc, 1 << s)) {
           for (i = 0; i < num_candidates[s]; i++) {
-            const MV this_mv = {br + candidates[s][i].row,
-                                bc + candidates[s][i].col};
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         } else {
           for (i = 0; i < num_candidates[s]; i++) {
-            const MV this_mv = {br + candidates[s][i].row,
-                                bc + candidates[s][i].col};
-            if (!is_mv_in(x, &this_mv))
-              continue;
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            if (!is_mv_in(x, &this_mv)) continue;
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         }
@@ -1184,22 +1086,25 @@ static int vp10_pattern_search_sad(const MACROBLOCK *x,
 
         if (check_bounds(x, br, bc, 1 << s)) {
           for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
-            const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
-                                bc + candidates[s][next_chkpts_indices[i]].col};
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         } else {
           for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
-            const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
-                                bc + candidates[s][next_chkpts_indices[i]].col};
-            if (!is_mv_in(x, &this_mv))
-              continue;
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
+            if (!is_mv_in(x, &this_mv)) continue;
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         }
@@ -1218,24 +1123,21 @@ static int vp10_pattern_search_sad(const MACROBLOCK *x,
       if (!do_init_search || s != best_init_s) {
         if (check_bounds(x, br, bc, 1 << s)) {
           for (i = 0; i < num_candidates[s]; i++) {
-            const MV this_mv = {br + candidates[s][i].row,
-                                bc + candidates[s][i].col};
-            cost_list[i + 1] =
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            cost_list[i + 1] = thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         } else {
           for (i = 0; i < num_candidates[s]; i++) {
-            const MV this_mv = {br + candidates[s][i].row,
-                                bc + candidates[s][i].col};
-            if (!is_mv_in(x, &this_mv))
-              continue;
-            cost_list[i + 1] =
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            if (!is_mv_in(x, &this_mv)) continue;
+            cost_list[i + 1] = thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         }
@@ -1258,26 +1160,28 @@ static int vp10_pattern_search_sad(const MACROBLOCK *x,
 
         if (check_bounds(x, br, bc, 1 << s)) {
           for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
-            const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
-                                bc + candidates[s][next_chkpts_indices[i]].col};
-            cost_list[next_chkpts_indices[i] + 1] =
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
+            cost_list[next_chkpts_indices[i] + 1] = thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         } else {
           for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
-            const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
-                                bc + candidates[s][next_chkpts_indices[i]].col};
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
             if (!is_mv_in(x, &this_mv)) {
               cost_list[next_chkpts_indices[i] + 1] = INT_MAX;
               continue;
             }
-            cost_list[next_chkpts_indices[i] + 1] =
-            thissad = vfp->sdf(what->buf, what->stride,
-                               get_buf_from_mv(in_what, &this_mv),
-                               in_what->stride);
+            cost_list[next_chkpts_indices[i] + 1] = thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
             CHECK_BETTER
           }
         }
@@ -1298,34 +1202,31 @@ static int vp10_pattern_search_sad(const MACROBLOCK *x,
   // cost_list[3]: sad at delta { 0, 1} (right)  from the best integer pel
   // cost_list[4]: sad at delta {-1, 0} (top)    from the best integer pel
   if (cost_list) {
-    static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}};
+    static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
     if (cost_list[0] == INT_MAX) {
       cost_list[0] = bestsad;
       if (check_bounds(x, br, bc, 1)) {
         for (i = 0; i < 4; i++) {
-          const MV this_mv = { br + neighbors[i].row,
-                               bc + neighbors[i].col };
-          cost_list[i + 1] = vfp->sdf(what->buf, what->stride,
-                                     get_buf_from_mv(in_what, &this_mv),
-                                     in_what->stride);
+          const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
+          cost_list[i + 1] =
+              vfp->sdf(what->buf, what->stride,
+                       get_buf_from_mv(in_what, &this_mv), in_what->stride);
         }
       } else {
         for (i = 0; i < 4; i++) {
-          const MV this_mv = {br + neighbors[i].row,
-            bc + neighbors[i].col};
+          const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
           if (!is_mv_in(x, &this_mv))
             cost_list[i + 1] = INT_MAX;
           else
-            cost_list[i + 1] = vfp->sdf(what->buf, what->stride,
-                                       get_buf_from_mv(in_what, &this_mv),
-                                       in_what->stride);
+            cost_list[i + 1] =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
         }
       }
     } else {
       if (use_mvcost) {
         for (i = 0; i < 4; i++) {
-          const MV this_mv = {br + neighbors[i].row,
-            bc + neighbors[i].col};
+          const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
           if (cost_list[i + 1] != INT_MAX) {
             cost_list[i + 1] +=
                 mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
@@ -1339,183 +1240,320 @@ static int vp10_pattern_search_sad(const MACROBLOCK *x,
   return bestsad;
 }
 
-int vp10_get_mvpred_var(const MACROBLOCK *x,
-                       const MV *best_mv, const MV *center_mv,
-                       const vpx_variance_fn_ptr_t *vfp,
-                       int use_mvcost) {
+int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+                        const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
+                        int use_mvcost) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
-  const MV mv = {best_mv->row * 8, best_mv->col * 8};
+  const MV mv = { best_mv->row * 8, best_mv->col * 8 };
   unsigned int unused;
 
-  return vfp->vf(what->buf, what->stride,
-                 get_buf_from_mv(in_what, best_mv), in_what->stride, &unused) +
-      (use_mvcost ?  mv_err_cost(&mv, center_mv, x->nmvjointcost,
-                                 x->mvcost, x->errorperbit) : 0);
+  return vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
+                 in_what->stride, &unused) +
+         (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+                                   x->errorperbit)
+                     : 0);
 }
 
-int vp10_get_mvpred_av_var(const MACROBLOCK *x,
-                          const MV *best_mv, const MV *center_mv,
-                          const uint8_t *second_pred,
-                          const vpx_variance_fn_ptr_t *vfp,
-                          int use_mvcost) {
+int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+                           const MV *center_mv, const uint8_t *second_pred,
+                           const vpx_variance_fn_ptr_t *vfp, int use_mvcost) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
-  const MV mv = {best_mv->row * 8, best_mv->col * 8};
+  const MV mv = { best_mv->row * 8, best_mv->col * 8 };
   unsigned int unused;
 
   return vfp->svaf(get_buf_from_mv(in_what, best_mv), in_what->stride, 0, 0,
                    what->buf, what->stride, &unused, second_pred) +
-      (use_mvcost ?  mv_err_cost(&mv, center_mv, x->nmvjointcost,
-                                 x->mvcost, x->errorperbit) : 0);
+         (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+                                   x->errorperbit)
+                     : 0);
 }
 
-int vp10_hex_search(const MACROBLOCK *x,
-                   MV *ref_mv,
-                   int search_param,
-                   int sad_per_bit,
-                   int do_init_search,
-                   int *cost_list,
-                   const vpx_variance_fn_ptr_t *vfp,
-                   int use_mvcost,
-                   const MV *center_mv, MV *best_mv) {
+int vp10_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+                    int sad_per_bit, int do_init_search, int *cost_list,
+                    const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                    const MV *center_mv, MV *best_mv) {
   // First scale has 8-closest points, the rest have 6 points in hex shape
   // at increasing scales
-  static const int hex_num_candidates[MAX_PATTERN_SCALES] = {
-    8, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6
-  };
+  static const int hex_num_candidates[MAX_PATTERN_SCALES] = { 8, 6, 6, 6, 6, 6,
+                                                              6, 6, 6, 6, 6 };
   // Note that the largest candidate step at each scale is 2^scale
   static const MV hex_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
-    {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, { 0, 1}, { -1, 1}, {-1, 0}},
-    {{-1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0}},
-    {{-2, -4}, {2, -4}, {4, 0}, {2, 4}, { -2, 4}, { -4, 0}},
-    {{-4, -8}, {4, -8}, {8, 0}, {4, 8}, { -4, 8}, { -8, 0}},
-    {{-8, -16}, {8, -16}, {16, 0}, {8, 16}, { -8, 16}, { -16, 0}},
-    {{-16, -32}, {16, -32}, {32, 0}, {16, 32}, { -16, 32}, { -32, 0}},
-    {{-32, -64}, {32, -64}, {64, 0}, {32, 64}, { -32, 64}, { -64, 0}},
-    {{-64, -128}, {64, -128}, {128, 0}, {64, 128}, { -64, 128}, { -128, 0}},
-    {{-128, -256}, {128, -256}, {256, 0}, {128, 256}, { -128, 256}, { -256, 0}},
-    {{-256, -512}, {256, -512}, {512, 0}, {256, 512}, { -256, 512}, { -512, 0}},
-    {{-512, -1024}, {512, -1024}, {1024, 0}, {512, 1024}, { -512, 1024},
-      { -1024, 0}},
+    { { -1, -1 },
+      { 0, -1 },
+      { 1, -1 },
+      { 1, 0 },
+      { 1, 1 },
+      { 0, 1 },
+      { -1, 1 },
+      { -1, 0 } },
+    { { -1, -2 }, { 1, -2 }, { 2, 0 }, { 1, 2 }, { -1, 2 }, { -2, 0 } },
+    { { -2, -4 }, { 2, -4 }, { 4, 0 }, { 2, 4 }, { -2, 4 }, { -4, 0 } },
+    { { -4, -8 }, { 4, -8 }, { 8, 0 }, { 4, 8 }, { -4, 8 }, { -8, 0 } },
+    { { -8, -16 }, { 8, -16 }, { 16, 0 }, { 8, 16 }, { -8, 16 }, { -16, 0 } },
+    { { -16, -32 },
+      { 16, -32 },
+      { 32, 0 },
+      { 16, 32 },
+      { -16, 32 },
+      { -32, 0 } },
+    { { -32, -64 },
+      { 32, -64 },
+      { 64, 0 },
+      { 32, 64 },
+      { -32, 64 },
+      { -64, 0 } },
+    { { -64, -128 },
+      { 64, -128 },
+      { 128, 0 },
+      { 64, 128 },
+      { -64, 128 },
+      { -128, 0 } },
+    { { -128, -256 },
+      { 128, -256 },
+      { 256, 0 },
+      { 128, 256 },
+      { -128, 256 },
+      { -256, 0 } },
+    { { -256, -512 },
+      { 256, -512 },
+      { 512, 0 },
+      { 256, 512 },
+      { -256, 512 },
+      { -512, 0 } },
+    { { -512, -1024 },
+      { 512, -1024 },
+      { 1024, 0 },
+      { 512, 1024 },
+      { -512, 1024 },
+      { -1024, 0 } },
   };
-  return vp10_pattern_search(x, ref_mv, search_param, sad_per_bit,
-                            do_init_search, cost_list, vfp, use_mvcost,
-                            center_mv, best_mv,
-                            hex_num_candidates, hex_candidates);
+  return vp10_pattern_search(
+      x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
+      use_mvcost, center_mv, best_mv, hex_num_candidates, hex_candidates);
 }
 
-int vp10_bigdia_search(const MACROBLOCK *x,
-                      MV *ref_mv,
-                      int search_param,
-                      int sad_per_bit,
-                      int do_init_search,
-                      int *cost_list,
-                      const vpx_variance_fn_ptr_t *vfp,
-                      int use_mvcost,
-                      const MV *center_mv,
-                      MV *best_mv) {
+int vp10_bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+                       int sad_per_bit, int do_init_search, int *cost_list,
+                       const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                       const MV *center_mv, MV *best_mv) {
   // First scale has 4-closest points, the rest have 8 points in diamond
   // shape at increasing scales
   static const int bigdia_num_candidates[MAX_PATTERN_SCALES] = {
     4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   };
   // Note that the largest candidate step at each scale is 2^scale
-  static const MV bigdia_candidates[MAX_PATTERN_SCALES]
-                                   [MAX_PATTERN_CANDIDATES] = {
-    {{0, -1}, {1, 0}, { 0, 1}, {-1, 0}},
-    {{-1, -1}, {0, -2}, {1, -1}, {2, 0}, {1, 1}, {0, 2}, {-1, 1}, {-2, 0}},
-    {{-2, -2}, {0, -4}, {2, -2}, {4, 0}, {2, 2}, {0, 4}, {-2, 2}, {-4, 0}},
-    {{-4, -4}, {0, -8}, {4, -4}, {8, 0}, {4, 4}, {0, 8}, {-4, 4}, {-8, 0}},
-    {{-8, -8}, {0, -16}, {8, -8}, {16, 0}, {8, 8}, {0, 16}, {-8, 8}, {-16, 0}},
-    {{-16, -16}, {0, -32}, {16, -16}, {32, 0}, {16, 16}, {0, 32},
-      {-16, 16}, {-32, 0}},
-    {{-32, -32}, {0, -64}, {32, -32}, {64, 0}, {32, 32}, {0, 64},
-      {-32, 32}, {-64, 0}},
-    {{-64, -64}, {0, -128}, {64, -64}, {128, 0}, {64, 64}, {0, 128},
-      {-64, 64}, {-128, 0}},
-    {{-128, -128}, {0, -256}, {128, -128}, {256, 0}, {128, 128}, {0, 256},
-      {-128, 128}, {-256, 0}},
-    {{-256, -256}, {0, -512}, {256, -256}, {512, 0}, {256, 256}, {0, 512},
-      {-256, 256}, {-512, 0}},
-    {{-512, -512}, {0, -1024}, {512, -512}, {1024, 0}, {512, 512}, {0, 1024},
-      {-512, 512}, {-1024, 0}},
-  };
-  return vp10_pattern_search_sad(x, ref_mv, search_param, sad_per_bit,
-                                do_init_search, cost_list, vfp, use_mvcost,
-                                center_mv, best_mv,
-                                bigdia_num_candidates, bigdia_candidates);
+  static const MV
+      bigdia_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+        { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } },
+        { { -1, -1 },
+          { 0, -2 },
+          { 1, -1 },
+          { 2, 0 },
+          { 1, 1 },
+          { 0, 2 },
+          { -1, 1 },
+          { -2, 0 } },
+        { { -2, -2 },
+          { 0, -4 },
+          { 2, -2 },
+          { 4, 0 },
+          { 2, 2 },
+          { 0, 4 },
+          { -2, 2 },
+          { -4, 0 } },
+        { { -4, -4 },
+          { 0, -8 },
+          { 4, -4 },
+          { 8, 0 },
+          { 4, 4 },
+          { 0, 8 },
+          { -4, 4 },
+          { -8, 0 } },
+        { { -8, -8 },
+          { 0, -16 },
+          { 8, -8 },
+          { 16, 0 },
+          { 8, 8 },
+          { 0, 16 },
+          { -8, 8 },
+          { -16, 0 } },
+        { { -16, -16 },
+          { 0, -32 },
+          { 16, -16 },
+          { 32, 0 },
+          { 16, 16 },
+          { 0, 32 },
+          { -16, 16 },
+          { -32, 0 } },
+        { { -32, -32 },
+          { 0, -64 },
+          { 32, -32 },
+          { 64, 0 },
+          { 32, 32 },
+          { 0, 64 },
+          { -32, 32 },
+          { -64, 0 } },
+        { { -64, -64 },
+          { 0, -128 },
+          { 64, -64 },
+          { 128, 0 },
+          { 64, 64 },
+          { 0, 128 },
+          { -64, 64 },
+          { -128, 0 } },
+        { { -128, -128 },
+          { 0, -256 },
+          { 128, -128 },
+          { 256, 0 },
+          { 128, 128 },
+          { 0, 256 },
+          { -128, 128 },
+          { -256, 0 } },
+        { { -256, -256 },
+          { 0, -512 },
+          { 256, -256 },
+          { 512, 0 },
+          { 256, 256 },
+          { 0, 512 },
+          { -256, 256 },
+          { -512, 0 } },
+        { { -512, -512 },
+          { 0, -1024 },
+          { 512, -512 },
+          { 1024, 0 },
+          { 512, 512 },
+          { 0, 1024 },
+          { -512, 512 },
+          { -1024, 0 } },
+      };
+  return vp10_pattern_search_sad(
+      x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
+      use_mvcost, center_mv, best_mv, bigdia_num_candidates, bigdia_candidates);
 }
 
-int vp10_square_search(const MACROBLOCK *x,
-                      MV *ref_mv,
-                      int search_param,
-                      int sad_per_bit,
-                      int do_init_search,
-                      int *cost_list,
-                      const vpx_variance_fn_ptr_t *vfp,
-                      int use_mvcost,
-                      const MV *center_mv,
-                      MV *best_mv) {
+int vp10_square_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+                       int sad_per_bit, int do_init_search, int *cost_list,
+                       const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                       const MV *center_mv, MV *best_mv) {
   // All scales have 8 closest points in square shape
   static const int square_num_candidates[MAX_PATTERN_SCALES] = {
     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
   };
   // Note that the largest candidate step at each scale is 2^scale
-  static const MV square_candidates[MAX_PATTERN_SCALES]
-                                   [MAX_PATTERN_CANDIDATES] = {
-    {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}},
-    {{-2, -2}, {0, -2}, {2, -2}, {2, 0}, {2, 2}, {0, 2}, {-2, 2}, {-2, 0}},
-    {{-4, -4}, {0, -4}, {4, -4}, {4, 0}, {4, 4}, {0, 4}, {-4, 4}, {-4, 0}},
-    {{-8, -8}, {0, -8}, {8, -8}, {8, 0}, {8, 8}, {0, 8}, {-8, 8}, {-8, 0}},
-    {{-16, -16}, {0, -16}, {16, -16}, {16, 0}, {16, 16}, {0, 16},
-      {-16, 16}, {-16, 0}},
-    {{-32, -32}, {0, -32}, {32, -32}, {32, 0}, {32, 32}, {0, 32},
-      {-32, 32}, {-32, 0}},
-    {{-64, -64}, {0, -64}, {64, -64}, {64, 0}, {64, 64}, {0, 64},
-      {-64, 64}, {-64, 0}},
-    {{-128, -128}, {0, -128}, {128, -128}, {128, 0}, {128, 128}, {0, 128},
-      {-128, 128}, {-128, 0}},
-    {{-256, -256}, {0, -256}, {256, -256}, {256, 0}, {256, 256}, {0, 256},
-      {-256, 256}, {-256, 0}},
-    {{-512, -512}, {0, -512}, {512, -512}, {512, 0}, {512, 512}, {0, 512},
-      {-512, 512}, {-512, 0}},
-    {{-1024, -1024}, {0, -1024}, {1024, -1024}, {1024, 0}, {1024, 1024},
-      {0, 1024}, {-1024, 1024}, {-1024, 0}},
-  };
-  return vp10_pattern_search(x, ref_mv, search_param, sad_per_bit,
-                            do_init_search, cost_list, vfp, use_mvcost,
-                            center_mv, best_mv,
-                            square_num_candidates, square_candidates);
+  static const MV
+      square_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+        { { -1, -1 },
+          { 0, -1 },
+          { 1, -1 },
+          { 1, 0 },
+          { 1, 1 },
+          { 0, 1 },
+          { -1, 1 },
+          { -1, 0 } },
+        { { -2, -2 },
+          { 0, -2 },
+          { 2, -2 },
+          { 2, 0 },
+          { 2, 2 },
+          { 0, 2 },
+          { -2, 2 },
+          { -2, 0 } },
+        { { -4, -4 },
+          { 0, -4 },
+          { 4, -4 },
+          { 4, 0 },
+          { 4, 4 },
+          { 0, 4 },
+          { -4, 4 },
+          { -4, 0 } },
+        { { -8, -8 },
+          { 0, -8 },
+          { 8, -8 },
+          { 8, 0 },
+          { 8, 8 },
+          { 0, 8 },
+          { -8, 8 },
+          { -8, 0 } },
+        { { -16, -16 },
+          { 0, -16 },
+          { 16, -16 },
+          { 16, 0 },
+          { 16, 16 },
+          { 0, 16 },
+          { -16, 16 },
+          { -16, 0 } },
+        { { -32, -32 },
+          { 0, -32 },
+          { 32, -32 },
+          { 32, 0 },
+          { 32, 32 },
+          { 0, 32 },
+          { -32, 32 },
+          { -32, 0 } },
+        { { -64, -64 },
+          { 0, -64 },
+          { 64, -64 },
+          { 64, 0 },
+          { 64, 64 },
+          { 0, 64 },
+          { -64, 64 },
+          { -64, 0 } },
+        { { -128, -128 },
+          { 0, -128 },
+          { 128, -128 },
+          { 128, 0 },
+          { 128, 128 },
+          { 0, 128 },
+          { -128, 128 },
+          { -128, 0 } },
+        { { -256, -256 },
+          { 0, -256 },
+          { 256, -256 },
+          { 256, 0 },
+          { 256, 256 },
+          { 0, 256 },
+          { -256, 256 },
+          { -256, 0 } },
+        { { -512, -512 },
+          { 0, -512 },
+          { 512, -512 },
+          { 512, 0 },
+          { 512, 512 },
+          { 0, 512 },
+          { -512, 512 },
+          { -512, 0 } },
+        { { -1024, -1024 },
+          { 0, -1024 },
+          { 1024, -1024 },
+          { 1024, 0 },
+          { 1024, 1024 },
+          { 0, 1024 },
+          { -1024, 1024 },
+          { -1024, 0 } },
+      };
+  return vp10_pattern_search(
+      x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
+      use_mvcost, center_mv, best_mv, square_num_candidates, square_candidates);
 }
 
-int vp10_fast_hex_search(const MACROBLOCK *x,
-                        MV *ref_mv,
-                        int search_param,
-                        int sad_per_bit,
-                        int do_init_search,  // must be zero for fast_hex
-                        int *cost_list,
-                        const vpx_variance_fn_ptr_t *vfp,
-                        int use_mvcost,
-                        const MV *center_mv,
-                        MV *best_mv) {
+int vp10_fast_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+                         int sad_per_bit,
+                         int do_init_search,  // must be zero for fast_hex
+                         int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+                         int use_mvcost, const MV *center_mv, MV *best_mv) {
   return vp10_hex_search(
       x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
       do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv);
 }
 
-int vp10_fast_dia_search(const MACROBLOCK *x,
-                        MV *ref_mv,
-                        int search_param,
-                        int sad_per_bit,
-                        int do_init_search,
-                        int *cost_list,
-                        const vpx_variance_fn_ptr_t *vfp,
-                        int use_mvcost,
-                        const MV *center_mv,
-                        MV *best_mv) {
+int vp10_fast_dia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+                         int sad_per_bit, int do_init_search, int *cost_list,
+                         const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                         const MV *center_mv, MV *best_mv) {
   return vp10_bigdia_search(
       x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
       do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv);
@@ -1525,15 +1563,14 @@ int vp10_fast_dia_search(const MACROBLOCK *x,
 
 // Exhuastive motion search around a given centre position with a given
 // step size.
-static int exhuastive_mesh_search(const MACROBLOCK *x,
-                                  MV *ref_mv, MV *best_mv,
+static int exhuastive_mesh_search(const MACROBLOCK *x, MV *ref_mv, MV *best_mv,
                                   int range, int step, int sad_per_bit,
                                   const vpx_variance_fn_ptr_t *fn_ptr,
                                   const MV *center_mv) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
-  MV fcenter_mv = {center_mv->row, center_mv->col};
+  MV fcenter_mv = { center_mv->row, center_mv->col };
   unsigned int best_sad = INT_MAX;
   int r, c, i;
   int start_col, end_col, start_row, end_row;
@@ -1541,12 +1578,13 @@ static int exhuastive_mesh_search(const MACROBLOCK *x,
 
   assert(step >= 1);
 
-  clamp_mv(&fcenter_mv, x->mv_col_min, x->mv_col_max,
-           x->mv_row_min, x->mv_row_max);
+  clamp_mv(&fcenter_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
+           x->mv_row_max);
   *best_mv = fcenter_mv;
-  best_sad = fn_ptr->sdf(what->buf, what->stride,
-             get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
-             mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
+  best_sad =
+      fn_ptr->sdf(what->buf, what->stride,
+                  get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
+      mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
   start_row = VPXMAX(-range, x->mv_row_min - fcenter_mv.row);
   start_col = VPXMAX(-range, x->mv_col_min - fcenter_mv.col);
   end_row = VPXMIN(range, x->mv_row_max - fcenter_mv.row);
@@ -1556,9 +1594,10 @@ static int exhuastive_mesh_search(const MACROBLOCK *x,
     for (c = start_col; c <= end_col; c += col_step) {
       // Step > 1 means we are not checking every location in this pass.
       if (step > 1) {
-        const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c};
-        unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
-                           get_buf_from_mv(in_what, &mv), in_what->stride);
+        const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c };
+        unsigned int sad =
+            fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+                        in_what->stride);
         if (sad < best_sad) {
           sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
           if (sad < best_sad) {
@@ -1572,17 +1611,16 @@ static int exhuastive_mesh_search(const MACROBLOCK *x,
           unsigned int sads[4];
           const uint8_t *addrs[4];
           for (i = 0; i < 4; ++i) {
-            const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i};
+            const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
             addrs[i] = get_buf_from_mv(in_what, &mv);
           }
-          fn_ptr->sdx4df(what->buf, what->stride, addrs,
-                         in_what->stride, sads);
+          fn_ptr->sdx4df(what->buf, what->stride, addrs, in_what->stride, sads);
 
           for (i = 0; i < 4; ++i) {
             if (sads[i] < best_sad) {
-              const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i};
-              const unsigned int sad = sads[i] +
-                  mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
+              const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
+              const unsigned int sad =
+                  sads[i] + mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
               if (sad < best_sad) {
                 best_sad = sad;
                 *best_mv = mv;
@@ -1591,9 +1629,10 @@ static int exhuastive_mesh_search(const MACROBLOCK *x,
           }
         } else {
           for (i = 0; i < end_col - c; ++i) {
-            const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i};
-            unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
-                get_buf_from_mv(in_what, &mv), in_what->stride);
+            const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
+            unsigned int sad =
+                fn_ptr->sdf(what->buf, what->stride,
+                            get_buf_from_mv(in_what, &mv), in_what->stride);
             if (sad < best_sad) {
               sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
               if (sad < best_sad) {
@@ -1611,11 +1650,10 @@ static int exhuastive_mesh_search(const MACROBLOCK *x,
 }
 
 int vp10_diamond_search_sad_c(const MACROBLOCK *x,
-                             const search_site_config *cfg,
-                             MV *ref_mv, MV *best_mv, int search_param,
-                             int sad_per_bit, int *num00,
-                             const vpx_variance_fn_ptr_t *fn_ptr,
-                             const MV *center_mv) {
+                              const search_site_config *cfg, MV *ref_mv,
+                              MV *best_mv, int search_param, int sad_per_bit,
+                              int *num00, const vpx_variance_fn_ptr_t *fn_ptr,
+                              const MV *center_mv) {
   int i, j, step;
 
   const MACROBLOCKD *const xd = &x->e_mbd;
@@ -1640,7 +1678,7 @@ int vp10_diamond_search_sad_c(const MACROBLOCK *x,
   const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step];
   const int tot_steps = (cfg->ss_count / cfg->searches_per_step) - search_param;
 
-  const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
   clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
   ref_row = ref_mv->row;
   ref_col = ref_mv->col;
@@ -1653,8 +1691,8 @@ int vp10_diamond_search_sad_c(const MACROBLOCK *x,
   best_address = in_what;
 
   // Check the starting position
-  bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride)
-                + mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
+  bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) +
+            mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
 
   i = 1;
 
@@ -1685,10 +1723,10 @@ int vp10_diamond_search_sad_c(const MACROBLOCK *x,
 
         for (t = 0; t < 4; t++, i++) {
           if (sad_array[t] < bestsad) {
-            const MV this_mv = {best_mv->row + ss[i].mv.row,
-                                best_mv->col + ss[i].mv.col};
-            sad_array[t] += mvsad_err_cost(x, &this_mv, &fcenter_mv,
-                                           sad_per_bit);
+            const MV this_mv = { best_mv->row + ss[i].mv.row,
+                                 best_mv->col + ss[i].mv.col };
+            sad_array[t] +=
+                mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
             if (sad_array[t] < bestsad) {
               bestsad = sad_array[t];
               best_site = i;
@@ -1699,13 +1737,13 @@ int vp10_diamond_search_sad_c(const MACROBLOCK *x,
     } else {
       for (j = 0; j < cfg->searches_per_step; j++) {
         // Trap illegal vectors
-        const MV this_mv = {best_mv->row + ss[i].mv.row,
-                            best_mv->col + ss[i].mv.col};
+        const MV this_mv = { best_mv->row + ss[i].mv.row,
+                             best_mv->col + ss[i].mv.col };
 
         if (is_mv_in(x, &this_mv)) {
           const uint8_t *const check_here = ss[i].offset + best_address;
-          unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here,
-                                             in_what_stride);
+          unsigned int thissad =
+              fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
 
           if (thissad < bestsad) {
             thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
@@ -1725,12 +1763,12 @@ int vp10_diamond_search_sad_c(const MACROBLOCK *x,
       last_site = best_site;
 #if defined(NEW_DIAMOND_SEARCH)
       while (1) {
-        const MV this_mv = {best_mv->row + ss[best_site].mv.row,
-                            best_mv->col + ss[best_site].mv.col};
+        const MV this_mv = { best_mv->row + ss[best_site].mv.row,
+                             best_mv->col + ss[best_site].mv.col };
         if (is_mv_in(x, &this_mv)) {
           const uint8_t *const check_here = ss[best_site].offset + best_address;
-          unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here,
-                                             in_what_stride);
+          unsigned int thissad =
+              fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
           if (thissad < bestsad) {
             thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
             if (thissad < bestsad) {
@@ -1770,8 +1808,7 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) {
   for (d = -8; d <= 8; d += 16) {
     int this_pos = offset + d;
     // check limit
-    if (this_pos < 0 || this_pos > bw)
-      continue;
+    if (this_pos < 0 || this_pos > bw) continue;
     this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
     if (this_sad < best_sad) {
       best_sad = this_sad;
@@ -1783,8 +1820,7 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) {
   for (d = -4; d <= 4; d += 8) {
     int this_pos = offset + d;
     // check limit
-    if (this_pos < 0 || this_pos > bw)
-      continue;
+    if (this_pos < 0 || this_pos > bw) continue;
     this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
     if (this_sad < best_sad) {
       best_sad = this_sad;
@@ -1796,8 +1832,7 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) {
   for (d = -2; d <= 2; d += 4) {
     int this_pos = offset + d;
     // check limit
-    if (this_pos < 0 || this_pos > bw)
-      continue;
+    if (this_pos < 0 || this_pos > bw) continue;
     this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
     if (this_sad < best_sad) {
       best_sad = this_sad;
@@ -1809,8 +1844,7 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) {
   for (d = -1; d <= 1; d += 2) {
     int this_pos = offset + d;
     // check limit
-    if (this_pos < 0 || this_pos > bw)
-      continue;
+    if (this_pos < 0 || this_pos > bw) continue;
     this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
     if (this_sad < best_sad) {
       best_sad = this_sad;
@@ -1822,15 +1856,15 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) {
 }
 
 static const MV search_pos[4] = {
-    {-1, 0}, {0, -1}, {0, 1}, {1, 0},
+  { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
 };
 
 unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
-                                           BLOCK_SIZE bsize,
-                                           int mi_row, int mi_col) {
+                                            BLOCK_SIZE bsize, int mi_row,
+                                            int mi_col) {
   MACROBLOCKD *xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-  struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
+  struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
   DECLARE_ALIGNED(16, int16_t, hbuf[128]);
   DECLARE_ALIGNED(16, int16_t, vbuf[128]);
   DECLARE_ALIGNED(16, int16_t, src_hbuf[64]);
@@ -1855,8 +1889,7 @@ unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
     // Swap out the reference frame for a version that's been scaled to
     // match the resolution of the current frame, allowing the existing
     // motion search code to be used without additional modifications.
-    for (i = 0; i < MAX_MB_PLANE; i++)
-      backup_yv12[i] = xd->plane[i].pre[0];
+    for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
     vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
   }
 
@@ -1870,8 +1903,7 @@ unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
 
     if (scaled_ref_frame) {
       int i;
-      for (i = 0; i < MAX_MB_PLANE; i++)
-        xd->plane[i].pre[0] = backup_yv12[i];
+      for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
     }
     return this_sad;
   }
@@ -1912,11 +1944,8 @@ unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
   best_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
 
   {
-    const uint8_t * const pos[4] = {
-        ref_buf - ref_stride,
-        ref_buf - 1,
-        ref_buf + 1,
-        ref_buf + ref_stride,
+    const uint8_t *const pos[4] = {
+      ref_buf - ref_stride, ref_buf - 1, ref_buf + 1, ref_buf + ref_stride,
     };
 
     cpi->fn_ptr[bsize].sdx4df(src_buf, src_stride, pos, ref_stride, this_sad);
@@ -1942,8 +1971,7 @@ unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
 
   ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col;
 
-  tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride,
-                                   ref_buf, ref_stride);
+  tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
   if (best_sad > tmp_sad) {
     *tmp_mv = this_mv;
     best_sad = tmp_sad;
@@ -1954,8 +1982,7 @@ unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
 
   if (scaled_ref_frame) {
     int i;
-    for (i = 0; i < MAX_MB_PLANE; i++)
-      xd->plane[i].pre[0] = backup_yv12[i];
+    for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
   }
 
   return best_sad;
@@ -1964,25 +1991,22 @@ unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
 /* do_refine: If last step (1-away) of n-step search doesn't pick the center
               point as the best match, we will do a final 1-away diamond
               refining search  */
-int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
-                           MV *mvp_full, int step_param,
-                           int sadpb, int further_steps, int do_refine,
-                           int *cost_list,
-                           const vpx_variance_fn_ptr_t *fn_ptr,
-                           const MV *ref_mv, MV *dst_mv) {
+int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
+                            int step_param, int sadpb, int further_steps,
+                            int do_refine, int *cost_list,
+                            const vpx_variance_fn_ptr_t *fn_ptr,
+                            const MV *ref_mv, MV *dst_mv) {
   MV temp_mv;
   int thissme, n, num00 = 0;
   int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
-                                        step_param, sadpb, &n,
-                                        fn_ptr, ref_mv);
+                                        step_param, sadpb, &n, fn_ptr, ref_mv);
   if (bestsme < INT_MAX)
     bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
   *dst_mv = temp_mv;
 
   // If there won't be more n-step search, check to see if refining search is
   // needed.
-  if (n > further_steps)
-    do_refine = 0;
+  if (n > further_steps) do_refine = 0;
 
   while (n < further_steps) {
     ++n;
@@ -1991,14 +2015,13 @@ int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
       num00--;
     } else {
       thissme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
-                                        step_param + n, sadpb, &num00,
-                                        fn_ptr, ref_mv);
+                                        step_param + n, sadpb, &num00, fn_ptr,
+                                        ref_mv);
       if (thissme < INT_MAX)
         thissme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
 
       // check to see if refining search is needed.
-      if (num00 > further_steps - n)
-        do_refine = 0;
+      if (num00 > further_steps - n) do_refine = 0;
 
       if (thissme < bestsme) {
         bestsme = thissme;
@@ -2011,8 +2034,8 @@ int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
   if (do_refine) {
     const int search_range = 8;
     MV best_mv = *dst_mv;
-    thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range,
-                                       fn_ptr, ref_mv);
+    thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
+                                       ref_mv);
     if (thissme < INT_MAX)
       thissme = vp10_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
     if (thissme < bestsme) {
@@ -2034,12 +2057,12 @@ int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
 // Runs an limited range exhaustive mesh search using a pattern set
 // according to the encode speed profile.
 static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
-                                 MV *centre_mv_full, int sadpb,  int *cost_list,
+                                 MV *centre_mv_full, int sadpb, int *cost_list,
                                  const vpx_variance_fn_ptr_t *fn_ptr,
                                  const MV *ref_mv, MV *dst_mv) {
   const SPEED_FEATURES *const sf = &cpi->sf;
-  MV temp_mv = {centre_mv_full->row, centre_mv_full->col};
-  MV f_ref_mv = {ref_mv->row >> 3, ref_mv->col >> 3};
+  MV temp_mv = { centre_mv_full->row, centre_mv_full->col };
+  MV f_ref_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
   int bestsme;
   int i;
   int interval = sf->mesh_patterns[0].interval;
@@ -2050,8 +2073,8 @@ static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
   ++(*x->ex_search_count_ptr);
 
   // Trap illegal values for interval and range for this function.
-  if ((range < MIN_RANGE) || (range > MAX_RANGE) ||
-      (interval < MIN_INTERVAL) || (interval > range))
+  if ((range < MIN_RANGE) || (range > MAX_RANGE) || (interval < MIN_INTERVAL) ||
+      (interval > range))
     return INT_MAX;
 
   baseline_interval_divisor = range / interval;
@@ -2063,21 +2086,19 @@ static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
   interval = VPXMAX(interval, range / baseline_interval_divisor);
 
   // initial search
-  bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range,
-                                  interval, sadpb, fn_ptr, &temp_mv);
+  bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range, interval,
+                                   sadpb, fn_ptr, &temp_mv);
 
   if ((interval > MIN_INTERVAL) && (range > MIN_RANGE)) {
     // Progressive searches with range and step size decreasing each time
     // till we reach a step size of 1. Then break out.
     for (i = 1; i < MAX_MESH_STEP; ++i) {
       // First pass with coarser step and longer range
-      bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv,
-                                       sf->mesh_patterns[i].range,
-                                       sf->mesh_patterns[i].interval,
-                                       sadpb, fn_ptr, &temp_mv);
+      bestsme = exhuastive_mesh_search(
+          x, &f_ref_mv, &temp_mv, sf->mesh_patterns[i].range,
+          sf->mesh_patterns[i].interval, sadpb, fn_ptr, &temp_mv);
 
-      if (sf->mesh_patterns[i].interval == 1)
-        break;
+      if (sf->mesh_patterns[i].interval == 1) break;
     }
   }
 
@@ -2093,9 +2114,9 @@ static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
 }
 
 int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
-                          int sad_per_bit, int distance,
-                          const vpx_variance_fn_ptr_t *fn_ptr,
-                          const MV *center_mv, MV *best_mv) {
+                           int sad_per_bit, int distance,
+                           const vpx_variance_fn_ptr_t *fn_ptr,
+                           const MV *center_mv, MV *best_mv) {
   int r, c;
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
@@ -2104,18 +2125,20 @@ int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
   const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
   const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
   const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
-  const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
-  int best_sad = fn_ptr->sdf(what->buf, what->stride,
-      get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  int best_sad =
+      fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                  in_what->stride) +
       mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
   *best_mv = *ref_mv;
 
   for (r = row_min; r < row_max; ++r) {
     for (c = col_min; c < col_max; ++c) {
-      const MV mv = {r, c};
-      const int sad = fn_ptr->sdf(what->buf, what->stride,
-          get_buf_from_mv(in_what, &mv), in_what->stride) +
-              mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+      const MV mv = { r, c };
+      const int sad =
+          fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+                      in_what->stride) +
+          mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
       if (sad < best_sad) {
         best_sad = sad;
         *best_mv = mv;
@@ -2126,9 +2149,9 @@ int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
 }
 
 int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
-                          int sad_per_bit, int distance,
-                          const vpx_variance_fn_ptr_t *fn_ptr,
-                          const MV *center_mv, MV *best_mv) {
+                           int sad_per_bit, int distance,
+                           const vpx_variance_fn_ptr_t *fn_ptr,
+                           const MV *center_mv, MV *best_mv) {
   int r;
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
@@ -2137,9 +2160,10 @@ int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
   const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
   const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
   const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
-  const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
-  unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride,
-      get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  unsigned int best_sad =
+      fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                  in_what->stride) +
       mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
   *best_mv = *ref_mv;
 
@@ -2158,7 +2182,7 @@ int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
         for (i = 0; i < 3; ++i) {
           unsigned int sad = sads[i];
           if (sad < best_sad) {
-            const MV mv = {r, c};
+            const MV mv = { r, c };
             sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
             if (sad < best_sad) {
               best_sad = sad;
@@ -2172,10 +2196,10 @@ int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
     }
 
     while (c < col_max) {
-      unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
-                                     check_here, in_what->stride);
+      unsigned int sad =
+          fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride);
       if (sad < best_sad) {
-        const MV mv = {r, c};
+        const MV mv = { r, c };
         sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
         if (sad < best_sad) {
           best_sad = sad;
@@ -2191,9 +2215,9 @@ int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
 }
 
 int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
-                          int sad_per_bit, int distance,
-                          const vpx_variance_fn_ptr_t *fn_ptr,
-                          const MV *center_mv, MV *best_mv) {
+                           int sad_per_bit, int distance,
+                           const vpx_variance_fn_ptr_t *fn_ptr,
+                           const MV *center_mv, MV *best_mv) {
   int r;
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
@@ -2202,9 +2226,10 @@ int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
   const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
   const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
   const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
-  const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
-  unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride,
-      get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  unsigned int best_sad =
+      fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                  in_what->stride) +
       mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
   *best_mv = *ref_mv;
 
@@ -2223,7 +2248,7 @@ int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
         for (i = 0; i < 8; ++i) {
           unsigned int sad = sads[i];
           if (sad < best_sad) {
-            const MV mv = {r, c};
+            const MV mv = { r, c };
             sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
             if (sad < best_sad) {
               best_sad = sad;
@@ -2247,7 +2272,7 @@ int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
         for (i = 0; i < 3; ++i) {
           unsigned int sad = sads[i];
           if (sad < best_sad) {
-            const MV mv = {r, c};
+            const MV mv = { r, c };
             sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
             if (sad < best_sad) {
               best_sad = sad;
@@ -2261,10 +2286,10 @@ int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
     }
 
     while (c < col_max) {
-      unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
-                                     check_here, in_what->stride);
+      unsigned int sad =
+          fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride);
       if (sad < best_sad) {
-        const MV mv = {r, c};
+        const MV mv = { r, c };
         sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
         if (sad < best_sad) {
           best_sad = sad;
@@ -2279,19 +2304,18 @@ int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
   return best_sad;
 }
 
-int vp10_refining_search_sad(const MACROBLOCK *x,
-                            MV *ref_mv, int error_per_bit,
-                            int search_range,
-                            const vpx_variance_fn_ptr_t *fn_ptr,
-                            const MV *center_mv) {
+int vp10_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
+                             int search_range,
+                             const vpx_variance_fn_ptr_t *fn_ptr,
+                             const MV *center_mv) {
   const MACROBLOCKD *const xd = &x->e_mbd;
-  const MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
+  const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
-  const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
   const uint8_t *best_address = get_buf_from_mv(in_what, ref_mv);
-  unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, best_address,
-                                    in_what->stride) +
+  unsigned int best_sad =
+      fn_ptr->sdf(what->buf, what->stride, best_address, in_what->stride) +
       mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
   int i, j;
 
@@ -2304,19 +2328,16 @@ int vp10_refining_search_sad(const MACROBLOCK *x,
 
     if (all_in) {
       unsigned int sads[4];
-      const uint8_t *const positions[4] = {
-        best_address - in_what->stride,
-        best_address - 1,
-        best_address + 1,
-        best_address + in_what->stride
-      };
+      const uint8_t *const positions[4] = { best_address - in_what->stride,
+                                            best_address - 1, best_address + 1,
+                                            best_address + in_what->stride };
 
       fn_ptr->sdx4df(what->buf, what->stride, positions, in_what->stride, sads);
 
       for (j = 0; j < 4; ++j) {
         if (sads[j] < best_sad) {
-          const MV mv = {ref_mv->row + neighbors[j].row,
-                         ref_mv->col + neighbors[j].col};
+          const MV mv = { ref_mv->row + neighbors[j].row,
+                          ref_mv->col + neighbors[j].col };
           sads[j] += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
           if (sads[j] < best_sad) {
             best_sad = sads[j];
@@ -2326,13 +2347,13 @@ int vp10_refining_search_sad(const MACROBLOCK *x,
       }
     } else {
       for (j = 0; j < 4; ++j) {
-        const MV mv = {ref_mv->row + neighbors[j].row,
-                       ref_mv->col + neighbors[j].col};
+        const MV mv = { ref_mv->row + neighbors[j].row,
+                        ref_mv->col + neighbors[j].col };
 
         if (is_mv_in(x, &mv)) {
-          unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
-                                         get_buf_from_mv(in_what, &mv),
-                                         in_what->stride);
+          unsigned int sad =
+              fn_ptr->sdf(what->buf, what->stride,
+                          get_buf_from_mv(in_what, &mv), in_what->stride);
           if (sad < best_sad) {
             sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
             if (sad < best_sad) {
@@ -2358,20 +2379,19 @@ int vp10_refining_search_sad(const MACROBLOCK *x,
 
 // This function is called when we do joint motion search in comp_inter_inter
 // mode.
-int vp10_refining_search_8p_c(const MACROBLOCK *x,
-                             MV *ref_mv, int error_per_bit,
-                             int search_range,
-                             const vpx_variance_fn_ptr_t *fn_ptr,
-                             const MV *center_mv,
-                             const uint8_t *second_pred) {
-  const MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0},
-                           {-1, -1}, {1, -1}, {-1, 1}, {1, 1}};
+int vp10_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
+                              int error_per_bit, int search_range,
+                              const vpx_variance_fn_ptr_t *fn_ptr,
+                              const MV *center_mv, const uint8_t *second_pred) {
+  const MV neighbors[8] = { { -1, 0 },  { 0, -1 }, { 0, 1 },  { 1, 0 },
+                            { -1, -1 }, { 1, -1 }, { -1, 1 }, { 1, 1 } };
   const MACROBLOCKD *const xd = &x->e_mbd;
   const struct buf_2d *const what = &x->plane[0].src;
   const struct buf_2d *const in_what = &xd->plane[0].pre[0];
-  const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
-  unsigned int best_sad = fn_ptr->sdaf(what->buf, what->stride,
-      get_buf_from_mv(in_what, ref_mv), in_what->stride, second_pred) +
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  unsigned int best_sad =
+      fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                   in_what->stride, second_pred) +
       mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
   int i, j;
 
@@ -2379,12 +2399,13 @@ int vp10_refining_search_8p_c(const MACROBLOCK *x,
     int best_site = -1;
 
     for (j = 0; j < 8; ++j) {
-      const MV mv = {ref_mv->row + neighbors[j].row,
-                     ref_mv->col + neighbors[j].col};
+      const MV mv = { ref_mv->row + neighbors[j].row,
+                      ref_mv->col + neighbors[j].col };
 
       if (is_mv_in(x, &mv)) {
-        unsigned int sad = fn_ptr->sdaf(what->buf, what->stride,
-            get_buf_from_mv(in_what, &mv), in_what->stride, second_pred);
+        unsigned int sad =
+            fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+                         in_what->stride, second_pred);
         if (sad < best_sad) {
           sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
           if (sad < best_sad) {
@@ -2408,21 +2429,19 @@ int vp10_refining_search_8p_c(const MACROBLOCK *x,
 #define MIN_EX_SEARCH_LIMIT 128
 static int is_exhaustive_allowed(VP10_COMP *cpi, MACROBLOCK *x) {
   const SPEED_FEATURES *const sf = &cpi->sf;
-  const int max_ex = VPXMAX(MIN_EX_SEARCH_LIMIT,
-      (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100);
+  const int max_ex =
+      VPXMAX(MIN_EX_SEARCH_LIMIT,
+             (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100);
 
   return sf->allow_exhaustive_searches &&
-      (sf->exhaustive_searches_thresh < INT_MAX) &&
-      (*x->ex_search_count_ptr <= max_ex) &&
-      !cpi->rc.is_src_frame_alt_ref;
+         (sf->exhaustive_searches_thresh < INT_MAX) &&
+         (*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
 }
 
-int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x,
-                          BLOCK_SIZE bsize, MV *mvp_full,
-                          int step_param, int error_per_bit,
-                          int *cost_list,
-                          const MV *ref_mv, MV *tmp_mv,
-                          int var_max, int rd) {
+int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+                           MV *mvp_full, int step_param, int error_per_bit,
+                           int *cost_list, const MV *ref_mv, MV *tmp_mv,
+                           int var_max, int rd) {
   const SPEED_FEATURES *const sf = &cpi->sf;
   const SEARCH_METHODS method = sf->mv.search_method;
   vpx_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
@@ -2441,42 +2460,41 @@ int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x,
   switch (method) {
     case FAST_DIAMOND:
       var = vp10_fast_dia_search(x, mvp_full, step_param, error_per_bit, 0,
-                                cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+                                 cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case FAST_HEX:
       var = vp10_fast_hex_search(x, mvp_full, step_param, error_per_bit, 0,
-                                cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+                                 cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case HEX:
       var = vp10_hex_search(x, mvp_full, step_param, error_per_bit, 1,
-                           cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+                            cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case SQUARE:
       var = vp10_square_search(x, mvp_full, step_param, error_per_bit, 1,
-                              cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+                               cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case BIGDIA:
       var = vp10_bigdia_search(x, mvp_full, step_param, error_per_bit, 1,
-                              cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+                               cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case NSTEP:
       var = vp10_full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit,
-                                   MAX_MVSEARCH_STEPS - 1 - step_param,
-                                   1, cost_list, fn_ptr, ref_mv, tmp_mv);
+                                    MAX_MVSEARCH_STEPS - 1 - step_param, 1,
+                                    cost_list, fn_ptr, ref_mv, tmp_mv);
 
       // Should we allow a follow on exhaustive search?
       if (is_exhaustive_allowed(cpi, x)) {
         int64_t exhuastive_thr = sf->exhaustive_searches_thresh;
-        exhuastive_thr >>= 8 - (b_width_log2_lookup[bsize] +
-                                b_height_log2_lookup[bsize]);
+        exhuastive_thr >>=
+            8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
 
         // Threshold variance for an exhaustive full search.
         if (var > exhuastive_thr) {
-            int var_ex;
+          int var_ex;
           MV tmp_mv_ex;
-          var_ex = full_pixel_exhaustive(cpi, x, tmp_mv,
-                                         error_per_bit, cost_list, fn_ptr,
-                                         ref_mv, &tmp_mv_ex);
+          var_ex = full_pixel_exhaustive(cpi, x, tmp_mv, error_per_bit,
+                                         cost_list, fn_ptr, ref_mv, &tmp_mv_ex);
 
           if (var_ex < var) {
             var = var_ex;
@@ -2487,8 +2505,7 @@ int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x,
       break;
 
       break;
-    default:
-      assert(0 && "Invalid search method.");
+    default: assert(0 && "Invalid search method.");
   }
 
   if (method != NSTEP && rd && var < var_max)
diff --git a/vp10/encoder/mcomp.h b/vp10/encoder/mcomp.h
index 75979819a88ffd312f37e865968e28acf361f97f..e17264bf08d470853e8aed114d2279f0e46cbd4f 100644
--- a/vp10/encoder/mcomp.h
+++ b/vp10/encoder/mcomp.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_MCOMP_H_
 #define VP10_ENCODER_MCOMP_H_
 
@@ -26,7 +25,7 @@ extern "C" {
 // Enable the use of motion vector in range [-1023, 1023].
 #define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS - 1)) - 1)
 // Maximum size of the first step in full pel units
-#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1))
+#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1))
 // Allowed motion vector pixel distance outside image border
 // for Block_16x16
 #define BORDER_MV_PIXELS_B16 (16 + VPX_INTERP_EXTEND)
@@ -44,59 +43,48 @@ typedef struct search_site_config {
 } search_site_config;
 
 void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride);
-void vp10_init3smotion_compensation(search_site_config *cfg,  int stride);
+void vp10_init3smotion_compensation(search_site_config *cfg, int stride);
 
 void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv);
-int vp10_mv_bit_cost(const MV *mv, const MV *ref,
-                    const int *mvjcost, int *mvcost[2], int weight);
+int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+                     int *mvcost[2], int weight);
 
 // Utility to compute variance + MV rate cost for a given MV
-int vp10_get_mvpred_var(const MACROBLOCK *x,
-                       const MV *best_mv, const MV *center_mv,
-                       const vpx_variance_fn_ptr_t *vfp,
-                       int use_mvcost);
-int vp10_get_mvpred_av_var(const MACROBLOCK *x,
-                          const MV *best_mv, const MV *center_mv,
-                          const uint8_t *second_pred,
-                          const vpx_variance_fn_ptr_t *vfp,
-                          int use_mvcost);
+int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+                        const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
+                        int use_mvcost);
+int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+                           const MV *center_mv, const uint8_t *second_pred,
+                           const vpx_variance_fn_ptr_t *vfp, int use_mvcost);
 
 struct VP10_COMP;
 struct SPEED_FEATURES;
 
 int vp10_init_search_range(int size);
 
-int vp10_refining_search_sad(const struct macroblock *x,
-                            struct mv *ref_mv,
-                            int sad_per_bit, int distance,
-                            const struct vpx_variance_vtable *fn_ptr,
-                            const struct mv *center_mv);
+int vp10_refining_search_sad(const struct macroblock *x, struct mv *ref_mv,
+                             int sad_per_bit, int distance,
+                             const struct vpx_variance_vtable *fn_ptr,
+                             const struct mv *center_mv);
 
 // Runs sequence of diamond searches in smaller steps for RD.
 int vp10_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
-                           MV *mvp_full, int step_param,
-                           int sadpb, int further_steps, int do_refine,
-                           int *cost_list,
-                           const vpx_variance_fn_ptr_t *fn_ptr,
-                           const MV *ref_mv, MV *dst_mv);
+                            MV *mvp_full, int step_param, int sadpb,
+                            int further_steps, int do_refine, int *cost_list,
+                            const vpx_variance_fn_ptr_t *fn_ptr,
+                            const MV *ref_mv, MV *dst_mv);
 
 // Perform integral projection based motion estimation.
 unsigned int vp10_int_pro_motion_estimation(const struct VP10_COMP *cpi,
-                                           MACROBLOCK *x,
-                                           BLOCK_SIZE bsize,
-                                           int mi_row, int mi_col);
-
-typedef int (integer_mv_pattern_search_fn) (
-    const MACROBLOCK *x,
-    MV *ref_mv,
-    int search_param,
-    int error_per_bit,
-    int do_init_search,
-    int *cost_list,
-    const vpx_variance_fn_ptr_t *vf,
-    int use_mvcost,
-    const MV *center_mv,
-    MV *best_mv);
+                                            MACROBLOCK *x, BLOCK_SIZE bsize,
+                                            int mi_row, int mi_col);
+
+typedef int(integer_mv_pattern_search_fn)(const MACROBLOCK *x, MV *ref_mv,
+                                          int search_param, int error_per_bit,
+                                          int do_init_search, int *cost_list,
+                                          const vpx_variance_fn_ptr_t *vf,
+                                          int use_mvcost, const MV *center_mv,
+                                          MV *best_mv);
 
 integer_mv_pattern_search_fn vp10_hex_search;
 integer_mv_pattern_search_fn vp10_bigdia_search;
@@ -104,59 +92,45 @@ integer_mv_pattern_search_fn vp10_square_search;
 integer_mv_pattern_search_fn vp10_fast_hex_search;
 integer_mv_pattern_search_fn vp10_fast_dia_search;
 
-typedef int (fractional_mv_step_fp) (
-    const MACROBLOCK *x,
-    MV *bestmv, const MV *ref_mv,
-    int allow_hp,
-    int error_per_bit,
-    const vpx_variance_fn_ptr_t *vfp,
+typedef int(fractional_mv_step_fp)(
+    const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
     int forced_stop,  // 0 - full, 1 - qtr only, 2 - half only
-    int iters_per_step,
-    int *cost_list,
-    int *mvjcost, int *mvcost[2],
-    int *distortion, unsigned int *sse1,
-    const uint8_t *second_pred,
-    int w, int h);
+    int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+    int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
+    int h);
 
 extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree;
 extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned;
 extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_more;
 extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_evenmore;
 
-typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x,
-                                    const MV *ref_mv, int sad_per_bit,
-                                    int distance,
-                                    const vpx_variance_fn_ptr_t *fn_ptr,
-                                    const MV *center_mv, MV *best_mv);
-
-typedef int (*vp10_refining_search_fn_t)(const MACROBLOCK *x,
-                                        MV *ref_mv, int sad_per_bit,
-                                        int distance,
-                                        const vpx_variance_fn_ptr_t *fn_ptr,
-                                        const MV *center_mv);
-
-typedef int (*vp10_diamond_search_fn_t)(const MACROBLOCK *x,
-                                       const search_site_config *cfg,
-                                       MV *ref_mv, MV *best_mv,
-                                       int search_param, int sad_per_bit,
-                                       int *num00,
-                                       const vpx_variance_fn_ptr_t *fn_ptr,
-                                       const MV *center_mv);
-
-int vp10_refining_search_8p_c(const MACROBLOCK *x,
-                             MV *ref_mv, int error_per_bit,
-                             int search_range,
-                             const vpx_variance_fn_ptr_t *fn_ptr,
-                             const MV *center_mv, const uint8_t *second_pred);
+typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
+                                     int sad_per_bit, int distance,
+                                     const vpx_variance_fn_ptr_t *fn_ptr,
+                                     const MV *center_mv, MV *best_mv);
+
+typedef int (*vp10_refining_search_fn_t)(const MACROBLOCK *x, MV *ref_mv,
+                                         int sad_per_bit, int distance,
+                                         const vpx_variance_fn_ptr_t *fn_ptr,
+                                         const MV *center_mv);
+
+typedef int (*vp10_diamond_search_fn_t)(
+    const MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv,
+    int search_param, int sad_per_bit, int *num00,
+    const vpx_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
+
+int vp10_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
+                              int error_per_bit, int search_range,
+                              const vpx_variance_fn_ptr_t *fn_ptr,
+                              const MV *center_mv, const uint8_t *second_pred);
 
 struct VP10_COMP;
 
 int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x,
-                          BLOCK_SIZE bsize, MV *mvp_full,
-                          int step_param, int error_per_bit,
-                          int *cost_list,
-                          const MV *ref_mv, MV *tmp_mv,
-                          int var_max, int rd);
+                           BLOCK_SIZE bsize, MV *mvp_full, int step_param,
+                           int error_per_bit, int *cost_list, const MV *ref_mv,
+                           MV *tmp_mv, int var_max, int rd);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/encoder/mips/msa/error_msa.c b/vp10/encoder/mips/msa/error_msa.c
index dacca32c05ad1abfb503ea853fa1f0e9a4c3ce7c..9a098eaea09ee10374b30783c5800d4f4bf8f2e9 100644
--- a/vp10/encoder/mips/msa/error_msa.c
+++ b/vp10/encoder/mips/msa/error_msa.c
@@ -11,100 +11,93 @@
 #include "./vp10_rtcd.h"
 #include "vpx_dsp/mips/macros_msa.h"
 
-#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize)                                   \
-static int64_t block_error_##BSize##size_msa(const int16_t *coeff_ptr,     \
-                                             const int16_t *dq_coeff_ptr,  \
-                                             int64_t *ssz) {               \
-  int64_t err = 0;                                                         \
-  uint32_t loop_cnt;                                                       \
-  v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h;                             \
-  v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w;                              \
-  v2i64 sq_coeff_r, sq_coeff_l;                                            \
-  v2i64 err0, err_dup0, err1, err_dup1;                                    \
-                                                                           \
-  coeff = LD_SH(coeff_ptr);                                                \
-  dq_coeff = LD_SH(dq_coeff_ptr);                                          \
-  UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                                \
-  ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                      \
-  HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                       \
-  DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w,                  \
-              sq_coeff_r, sq_coeff_l);                                     \
-  DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1);                 \
-                                                                           \
-  coeff = LD_SH(coeff_ptr + 8);                                            \
-  dq_coeff = LD_SH(dq_coeff_ptr + 8);                                      \
-  UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                                \
-  ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                      \
-  HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                       \
-  DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l);              \
-  DPADD_SD2_SD(diff_r, diff_l, err0, err1);                                \
-                                                                           \
-  coeff_ptr += 16;                                                         \
-  dq_coeff_ptr += 16;                                                      \
-                                                                           \
-  for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) {                       \
-    coeff = LD_SH(coeff_ptr);                                              \
-    dq_coeff = LD_SH(dq_coeff_ptr);                                        \
-    UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                              \
-    ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                    \
-    HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                     \
-    DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l);            \
-    DPADD_SD2_SD(diff_r, diff_l, err0, err1);                              \
-                                                                           \
-    coeff = LD_SH(coeff_ptr + 8);                                          \
-    dq_coeff = LD_SH(dq_coeff_ptr + 8);                                    \
-    UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                              \
-    ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                    \
-    HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                     \
-    DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l);            \
-    DPADD_SD2_SD(diff_r, diff_l, err0, err1);                              \
-                                                                           \
-    coeff_ptr += 16;                                                       \
-    dq_coeff_ptr += 16;                                                    \
-  }                                                                        \
-                                                                           \
-  err_dup0 = __msa_splati_d(sq_coeff_r, 1);                                \
-  err_dup1 = __msa_splati_d(sq_coeff_l, 1);                                \
-  sq_coeff_r += err_dup0;                                                  \
-  sq_coeff_l += err_dup1;                                                  \
-  *ssz = __msa_copy_s_d(sq_coeff_r, 0);                                    \
-  *ssz += __msa_copy_s_d(sq_coeff_l, 0);                                   \
-                                                                           \
-  err_dup0 = __msa_splati_d(err0, 1);                                      \
-  err_dup1 = __msa_splati_d(err1, 1);                                      \
-  err0 += err_dup0;                                                        \
-  err1 += err_dup1;                                                        \
-  err = __msa_copy_s_d(err0, 0);                                           \
-  err += __msa_copy_s_d(err1, 0);                                          \
-                                                                           \
-  return err;                                                              \
-}
+#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize)                                     \
+  static int64_t block_error_##BSize##size_msa(                              \
+      const int16_t *coeff_ptr, const int16_t *dq_coeff_ptr, int64_t *ssz) { \
+    int64_t err = 0;                                                         \
+    uint32_t loop_cnt;                                                       \
+    v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h;                             \
+    v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w;                              \
+    v2i64 sq_coeff_r, sq_coeff_l;                                            \
+    v2i64 err0, err_dup0, err1, err_dup1;                                    \
+                                                                             \
+    coeff = LD_SH(coeff_ptr);                                                \
+    dq_coeff = LD_SH(dq_coeff_ptr);                                          \
+    UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                                \
+    ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                      \
+    HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                       \
+    DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w, sq_coeff_r,      \
+                sq_coeff_l);                                                 \
+    DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1);                 \
+                                                                             \
+    coeff = LD_SH(coeff_ptr + 8);                                            \
+    dq_coeff = LD_SH(dq_coeff_ptr + 8);                                      \
+    UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                                \
+    ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                      \
+    HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                       \
+    DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l);              \
+    DPADD_SD2_SD(diff_r, diff_l, err0, err1);                                \
+                                                                             \
+    coeff_ptr += 16;                                                         \
+    dq_coeff_ptr += 16;                                                      \
+                                                                             \
+    for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) {                       \
+      coeff = LD_SH(coeff_ptr);                                              \
+      dq_coeff = LD_SH(dq_coeff_ptr);                                        \
+      UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                              \
+      ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                    \
+      HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                     \
+      DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l);            \
+      DPADD_SD2_SD(diff_r, diff_l, err0, err1);                              \
+                                                                             \
+      coeff = LD_SH(coeff_ptr + 8);                                          \
+      dq_coeff = LD_SH(dq_coeff_ptr + 8);                                    \
+      UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                              \
+      ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                    \
+      HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                     \
+      DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l);            \
+      DPADD_SD2_SD(diff_r, diff_l, err0, err1);                              \
+                                                                             \
+      coeff_ptr += 16;                                                       \
+      dq_coeff_ptr += 16;                                                    \
+    }                                                                        \
+                                                                             \
+    err_dup0 = __msa_splati_d(sq_coeff_r, 1);                                \
+    err_dup1 = __msa_splati_d(sq_coeff_l, 1);                                \
+    sq_coeff_r += err_dup0;                                                  \
+    sq_coeff_l += err_dup1;                                                  \
+    *ssz = __msa_copy_s_d(sq_coeff_r, 0);                                    \
+    *ssz += __msa_copy_s_d(sq_coeff_l, 0);                                   \
+                                                                             \
+    err_dup0 = __msa_splati_d(err0, 1);                                      \
+    err_dup1 = __msa_splati_d(err1, 1);                                      \
+    err0 += err_dup0;                                                        \
+    err1 += err_dup1;                                                        \
+    err = __msa_copy_s_d(err0, 0);                                           \
+    err += __msa_copy_s_d(err1, 0);                                          \
+                                                                             \
+    return err;                                                              \
+  }
 
-BLOCK_ERROR_BLOCKSIZE_MSA(16);
-BLOCK_ERROR_BLOCKSIZE_MSA(64);
-BLOCK_ERROR_BLOCKSIZE_MSA(256);
-BLOCK_ERROR_BLOCKSIZE_MSA(1024);
+/* clang-format off */
+BLOCK_ERROR_BLOCKSIZE_MSA(16)
+BLOCK_ERROR_BLOCKSIZE_MSA(64)
+BLOCK_ERROR_BLOCKSIZE_MSA(256)
+BLOCK_ERROR_BLOCKSIZE_MSA(1024)
+/* clang-format on */
 
 int64_t vp10_block_error_msa(const tran_low_t *coeff_ptr,
-                            const tran_low_t *dq_coeff_ptr,
-                            intptr_t blk_size, int64_t *ssz) {
+                             const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
+                             int64_t *ssz) {
   int64_t err;
   const int16_t *coeff = (const int16_t *)coeff_ptr;
   const int16_t *dq_coeff = (const int16_t *)dq_coeff_ptr;
 
   switch (blk_size) {
-    case 16:
-      err = block_error_16size_msa(coeff, dq_coeff, ssz);
-      break;
-    case 64:
-      err = block_error_64size_msa(coeff, dq_coeff, ssz);
-      break;
-    case 256:
-      err = block_error_256size_msa(coeff, dq_coeff, ssz);
-      break;
-    case 1024:
-      err = block_error_1024size_msa(coeff, dq_coeff, ssz);
-      break;
+    case 16: err = block_error_16size_msa(coeff, dq_coeff, ssz); break;
+    case 64: err = block_error_64size_msa(coeff, dq_coeff, ssz); break;
+    case 256: err = block_error_256size_msa(coeff, dq_coeff, ssz); break;
+    case 1024: err = block_error_1024size_msa(coeff, dq_coeff, ssz); break;
     default:
       err = vp10_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
       break;
diff --git a/vp10/encoder/mips/msa/fdct16x16_msa.c b/vp10/encoder/mips/msa/fdct16x16_msa.c
index d78fc6473e766ca32b8957324c5fd73726d51259..c39a292f8a0e38cecfc0f53a858b96b0d5d5c227 100644
--- a/vp10/encoder/mips/msa/fdct16x16_msa.c
+++ b/vp10/encoder/mips/msa/fdct16x16_msa.c
@@ -159,8 +159,8 @@ static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) {
 
   /* load input data */
   LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
-  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
-                     r0, r1, r2, r3, r4, r5, r6, r7);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+                     r7);
   FDCT_POSTPROC_2V_NEG_H(r0, r1);
   FDCT_POSTPROC_2V_NEG_H(r2, r3);
   FDCT_POSTPROC_2V_NEG_H(r4, r5);
@@ -169,8 +169,8 @@ static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) {
   out += 64;
 
   LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
-  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
-                     r8, r9, r10, r11, r12, r13, r14, r15);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+                     r12, r13, r14, r15);
   FDCT_POSTPROC_2V_NEG_H(r8, r9);
   FDCT_POSTPROC_2V_NEG_H(r10, r11);
   FDCT_POSTPROC_2V_NEG_H(r12, r13);
@@ -181,8 +181,8 @@ static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) {
   /* load input data */
   input += 128;
   LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
-  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
-                     r0, r1, r2, r3, r4, r5, r6, r7);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+                     r7);
   FDCT_POSTPROC_2V_NEG_H(r0, r1);
   FDCT_POSTPROC_2V_NEG_H(r2, r3);
   FDCT_POSTPROC_2V_NEG_H(r4, r5);
@@ -191,8 +191,8 @@ static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) {
   out += 64;
 
   LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
-  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
-                     r8, r9, r10, r11, r12, r13, r14, r15);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+                     r12, r13, r14, r15);
   FDCT_POSTPROC_2V_NEG_H(r8, r9);
   FDCT_POSTPROC_2V_NEG_H(r10, r11);
   FDCT_POSTPROC_2V_NEG_H(r12, r13);
@@ -339,24 +339,24 @@ static void fadst16_transpose_msa(int16_t *input, int16_t *out) {
   v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
 
   /* load input data */
-  LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11,
-          l4, l12, l5, l13, l6, l14, l7, l15);
-  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
-                     r0, r1, r2, r3, r4, r5, r6, r7);
-  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
-                     r8, r9, r10, r11, r12, r13, r14, r15);
+  LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
+          l7, l15);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+                     r7);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+                     r12, r13, r14, r15);
   ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
   ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
   out += 16 * 8;
 
   /* load input data */
   input += 128;
-  LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11,
-          l4, l12, l5, l13, l6, l14, l7, l15);
-  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
-                     r0, r1, r2, r3, r4, r5, r6, r7);
-  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
-                     r8, r9, r10, r11, r12, r13, r14, r15);
+  LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
+          l7, l15);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+                     r7);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+                     r12, r13, r14, r15);
   ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
   ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
 }
@@ -371,10 +371,10 @@ static void postproc_fdct16x8_1d_row(int16_t *intermediate, int16_t *output) {
   LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7);
   temp = intermediate + 8;
   LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
-                     in8, in9, in10, in11, in12, in13, in14, in15);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+                     in10, in11, in12, in13, in14, in15);
   FDCT_POSTPROC_2V_NEG_H(in0, in1);
   FDCT_POSTPROC_2V_NEG_H(in2, in3);
   FDCT_POSTPROC_2V_NEG_H(in4, in5);
@@ -383,29 +383,28 @@ static void postproc_fdct16x8_1d_row(int16_t *intermediate, int16_t *output) {
   FDCT_POSTPROC_2V_NEG_H(in10, in11);
   FDCT_POSTPROC_2V_NEG_H(in12, in13);
   FDCT_POSTPROC_2V_NEG_H(in14, in15);
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
+               in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6,
+               tmp7, in8, in9, in10, in11, in12, in13, in14, in15);
   temp = intermediate;
   ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16);
-  FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
-                tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+  FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1,
+                tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
   temp = intermediate;
   LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
-  FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15,
-               in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3,
-                     tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3);
+  FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3,
+               in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, tmp0, in0,
+                     tmp1, in1, tmp2, in2, tmp3, in3);
   ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16);
-  TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7,
-                     tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7);
+  TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4,
+                     tmp5, in5, tmp6, in6, tmp7, in7);
   out = output + 8;
   ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
 }
 
-void vp10_fht16x16_msa(const int16_t *input, int16_t *output,
-                      int32_t stride, int32_t tx_type) {
+void vp10_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
+                       int32_t tx_type) {
   DECLARE_ALIGNED(32, int16_t, tmp[256]);
   DECLARE_ALIGNED(32, int16_t, trans_buf[256]);
   DECLARE_ALIGNED(32, int16_t, tmp_buf[128]);
@@ -413,35 +412,31 @@ void vp10_fht16x16_msa(const int16_t *input, int16_t *output,
   int16_t *ptmpbuf = &tmp_buf[0];
   int16_t *trans = &trans_buf[0];
   const int32_t const_arr[29 * 4] = {
-    52707308, 52707308, 52707308, 52707308,
-    -1072430300, -1072430300, -1072430300, -1072430300,
-    795618043, 795618043, 795618043, 795618043,
-    -721080468, -721080468, -721080468, -721080468,
-    459094491, 459094491, 459094491, 459094491,
-    -970646691, -970646691, -970646691, -970646691,
-    1010963856, 1010963856, 1010963856, 1010963856,
-    -361743294, -361743294, -361743294, -361743294,
-    209469125, 209469125, 209469125, 209469125,
-    -1053094788, -1053094788, -1053094788, -1053094788,
-    1053160324, 1053160324, 1053160324, 1053160324,
-    639644520, 639644520, 639644520, 639644520,
-    -862444000, -862444000, -862444000, -862444000,
-    1062144356, 1062144356, 1062144356, 1062144356,
-    -157532337, -157532337, -157532337, -157532337,
-    260914709, 260914709, 260914709, 260914709,
-    -1041559667, -1041559667, -1041559667, -1041559667,
-    920985831, 920985831, 920985831, 920985831,
-    -551995675, -551995675, -551995675, -551995675,
-    596522295, 596522295, 596522295, 596522295,
-    892853362, 892853362, 892853362, 892853362,
-    -892787826, -892787826, -892787826, -892787826,
-    410925857, 410925857, 410925857, 410925857,
-    -992012162, -992012162, -992012162, -992012162,
-    992077698, 992077698, 992077698, 992077698,
-    759246145, 759246145, 759246145, 759246145,
-    -759180609, -759180609, -759180609, -759180609,
-    -759222975, -759222975, -759222975, -759222975,
-    759288511, 759288511, 759288511, 759288511 };
+    52707308,    52707308,    52707308,    52707308,    -1072430300,
+    -1072430300, -1072430300, -1072430300, 795618043,   795618043,
+    795618043,   795618043,   -721080468,  -721080468,  -721080468,
+    -721080468,  459094491,   459094491,   459094491,   459094491,
+    -970646691,  -970646691,  -970646691,  -970646691,  1010963856,
+    1010963856,  1010963856,  1010963856,  -361743294,  -361743294,
+    -361743294,  -361743294,  209469125,   209469125,   209469125,
+    209469125,   -1053094788, -1053094788, -1053094788, -1053094788,
+    1053160324,  1053160324,  1053160324,  1053160324,  639644520,
+    639644520,   639644520,   639644520,   -862444000,  -862444000,
+    -862444000,  -862444000,  1062144356,  1062144356,  1062144356,
+    1062144356,  -157532337,  -157532337,  -157532337,  -157532337,
+    260914709,   260914709,   260914709,   260914709,   -1041559667,
+    -1041559667, -1041559667, -1041559667, 920985831,   920985831,
+    920985831,   920985831,   -551995675,  -551995675,  -551995675,
+    -551995675,  596522295,   596522295,   596522295,   596522295,
+    892853362,   892853362,   892853362,   892853362,   -892787826,
+    -892787826,  -892787826,  -892787826,  410925857,   410925857,
+    410925857,   410925857,   -992012162,  -992012162,  -992012162,
+    -992012162,  992077698,   992077698,   992077698,   992077698,
+    759246145,   759246145,   759246145,   759246145,   -759180609,
+    -759180609,  -759180609,  -759180609,  -759222975,  -759222975,
+    -759222975,  -759222975,  759288511,   759288511,   759288511,
+    759288511
+  };
 
   switch (tx_type) {
     case DCT_DCT:
@@ -500,8 +495,6 @@ void vp10_fht16x16_msa(const int16_t *input, int16_t *output,
 
       fadst16_transpose_msa(tmp, output);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
diff --git a/vp10/encoder/mips/msa/fdct4x4_msa.c b/vp10/encoder/mips/msa/fdct4x4_msa.c
index f9028f096a1134f8a11f98cab57a5325aa45a7fc..def9b45f9fe29bd8b60e73212495f9fad0012f1a 100644
--- a/vp10/encoder/mips/msa/fdct4x4_msa.c
+++ b/vp10/encoder/mips/msa/fdct4x4_msa.c
@@ -14,7 +14,7 @@
 #include "vp10/encoder/mips/msa/fdct_msa.h"
 
 void vp10_fwht4x4_msa(const int16_t *input, int16_t *output,
-                     int32_t src_stride) {
+                      int32_t src_stride) {
   v8i16 in0, in1, in2, in3, in4;
 
   LD_SH4(input, src_stride, in0, in1, in2, in3);
@@ -46,7 +46,7 @@ void vp10_fwht4x4_msa(const int16_t *input, int16_t *output,
 }
 
 void vp10_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
-                    int32_t tx_type) {
+                     int32_t tx_type) {
   v8i16 in0, in1, in2, in3;
 
   LD_SH4(input, stride, in0, in1, in2, in3);
@@ -86,9 +86,7 @@ void vp10_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
       TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
       VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 
   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
diff --git a/vp10/encoder/mips/msa/fdct8x8_msa.c b/vp10/encoder/mips/msa/fdct8x8_msa.c
index 8e1e111b3684cea7bfb4358524b06f5acf100c91..7843a624c4ac197c2e20bb6b73950307ddce6dda 100644
--- a/vp10/encoder/mips/msa/fdct8x8_msa.c
+++ b/vp10/encoder/mips/msa/fdct8x8_msa.c
@@ -14,7 +14,7 @@
 #include "vp10/encoder/mips/msa/fdct_msa.h"
 
 void vp10_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
-                    int32_t tx_type) {
+                     int32_t tx_type) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
   LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7);
@@ -23,44 +23,42 @@ void vp10_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
 
   switch (tx_type) {
     case DCT_DCT:
-      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
-      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                         in0, in1, in2, in3, in4, in5, in6, in7);
-      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
+      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
       break;
     case ADST_DCT:
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
-      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                         in0, in1, in2, in3, in4, in5, in6, in7);
-      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
       break;
     case DCT_ADST:
-      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
-      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                         in0, in1, in2, in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
+      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
       break;
     case ADST_ADST:
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
-      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                         in0, in1, in2, in3, in4, in5, in6, in7);
-      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
-                in0, in1, in2, in3, in4, in5, in6, in7);
-      break;
-    default:
-      assert(0);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
       break;
+    default: assert(0); break;
   }
 
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
   SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
   ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
 }
diff --git a/vp10/encoder/mips/msa/fdct_msa.h b/vp10/encoder/mips/msa/fdct_msa.h
index bf56251eef78ff476ba6fd0bf5424f46da60365e..9a14625971529a4e58f858175e5aab1c39c26c1c 100644
--- a/vp10/encoder/mips/msa/fdct_msa.h
+++ b/vp10/encoder/mips/msa/fdct_msa.h
@@ -15,103 +15,102 @@
 #include "vpx_dsp/mips/txfm_macros_msa.h"
 #include "vpx_ports/mem.h"
 
-#define VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,                   \
-                  out0, out1, out2, out3, out4, out5, out6, out7) {         \
-  v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m;                        \
-  v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m;                         \
-  v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64,      \
-                     cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 };  \
-  v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64,    \
-                     cospi_24_64, -cospi_24_64, 0, 0 };                     \
-                                                                            \
-  SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m);                           \
-  cnst2_m = -cnst0_m;                                                       \
-  ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);        \
-  SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m);                           \
-  cnst4_m = -cnst2_m;                                                       \
-  ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);        \
-                                                                            \
-  ILVRL_H2_SH(in0, in7, vec1_m, vec0_m);                                    \
-  ILVRL_H2_SH(in4, in3, vec3_m, vec2_m);                                    \
-  DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,            \
-                        cnst1_m, cnst2_m, cnst3_m, in7, in0,                \
-                        in4, in3);                                          \
-                                                                            \
-  SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m);                           \
-  cnst2_m = -cnst0_m;                                                       \
-  ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);        \
-  SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m);                           \
-  cnst4_m = -cnst2_m;                                                       \
-  ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);        \
+#define VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
+                  out3, out4, out5, out6, out7)                              \
+  {                                                                          \
+    v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m;                       \
+    v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m;                        \
+    v8i16 coeff0_m = { cospi_2_64,  cospi_6_64,  cospi_10_64, cospi_14_64,   \
+                       cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
+    v8i16 coeff1_m = { cospi_8_64,  -cospi_8_64,  cospi_16_64, -cospi_16_64, \
+                       cospi_24_64, -cospi_24_64, 0,           0 };          \
+                                                                             \
+    SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m);                          \
+    cnst2_m = -cnst0_m;                                                      \
+    ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);       \
+    SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m);                          \
+    cnst4_m = -cnst2_m;                                                      \
+    ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);       \
+                                                                             \
+    ILVRL_H2_SH(in0, in7, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(in4, in3, vec3_m, vec2_m);                                   \
+    DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m,  \
+                          cnst2_m, cnst3_m, in7, in0, in4, in3);             \
+                                                                             \
+    SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m);                          \
+    cnst2_m = -cnst0_m;                                                      \
+    ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);       \
+    SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m);                          \
+    cnst4_m = -cnst2_m;                                                      \
+    ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);       \
+                                                                             \
+    ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                   \
+                                                                             \
+    DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m,  \
+                          cnst2_m, cnst3_m, in5, in2, in6, in1);             \
+    BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5);                   \
+    out7 = -s0_m;                                                            \
+    out0 = s1_m;                                                             \
+                                                                             \
+    SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m);  \
+                                                                             \
+    ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m);       \
+    cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                               \
+    cnst1_m = cnst0_m;                                                       \
+                                                                             \
+    ILVRL_H2_SH(in4, in3, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                   \
+    DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst2_m,  \
+                          cnst3_m, cnst1_m, out1, out6, s0_m, s1_m);         \
+                                                                             \
+    SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m);                          \
+    cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                               \
+                                                                             \
+    ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m);                                 \
+    out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                   \
+    out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);                   \
+    out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m);                   \
+    out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m);                   \
+                                                                             \
+    out1 = -out1;                                                            \
+    out3 = -out3;                                                            \
+    out5 = -out5;                                                            \
+  }
+
+#define VPX_FADST4(in0, in1, in2, in3, out0, out1, out2, out3)              \
+  {                                                                         \
+    v4i32 s0_m, s1_m, s2_m, s3_m, constant_m;                               \
+    v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m;                               \
                                                                             \
-  ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                    \
-  ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                    \
+    UNPCK_R_SH_SW(in0, in0_r_m);                                            \
+    UNPCK_R_SH_SW(in1, in1_r_m);                                            \
+    UNPCK_R_SH_SW(in2, in2_r_m);                                            \
+    UNPCK_R_SH_SW(in3, in3_r_m);                                            \
                                                                             \
-  DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,            \
-                        cnst1_m, cnst2_m, cnst3_m, in5, in2,                \
-                        in6, in1);                                          \
-  BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5);                    \
-  out7 = -s0_m;                                                             \
-  out0 = s1_m;                                                              \
+    constant_m = __msa_fill_w(sinpi_4_9);                                   \
+    MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m);             \
                                                                             \
-  SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m);   \
+    constant_m = __msa_fill_w(sinpi_1_9);                                   \
+    s0_m += in0_r_m * constant_m;                                           \
+    s1_m -= in1_r_m * constant_m;                                           \
                                                                             \
-  ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m);        \
-  cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                                \
-  cnst1_m = cnst0_m;                                                        \
+    constant_m = __msa_fill_w(sinpi_2_9);                                   \
+    s0_m += in1_r_m * constant_m;                                           \
+    s1_m += in3_r_m * constant_m;                                           \
                                                                             \
-  ILVRL_H2_SH(in4, in3, vec1_m, vec0_m);                                    \
-  ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                    \
-  DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,            \
-                        cnst2_m, cnst3_m, cnst1_m, out1, out6,              \
-                        s0_m, s1_m);                                        \
+    s2_m = in0_r_m + in1_r_m - in3_r_m;                                     \
                                                                             \
-  SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m);                           \
-  cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                                \
+    constant_m = __msa_fill_w(sinpi_3_9);                                   \
+    MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m);             \
                                                                             \
-  ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                    \
-  ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m);                                  \
-  out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                    \
-  out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);                    \
-  out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m);                    \
-  out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m);                    \
+    in0_r_m = s0_m + s3_m;                                                  \
+    s2_m = s1_m - s3_m;                                                     \
+    s3_m = s1_m - s0_m + s3_m;                                              \
                                                                             \
-  out1 = -out1;                                                             \
-  out3 = -out3;                                                             \
-  out5 = -out5;                                                             \
-}
-
-#define VPX_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) {  \
-  v4i32 s0_m, s1_m, s2_m, s3_m, constant_m;                       \
-  v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m;                       \
-                                                                  \
-  UNPCK_R_SH_SW(in0, in0_r_m);                                    \
-  UNPCK_R_SH_SW(in1, in1_r_m);                                    \
-  UNPCK_R_SH_SW(in2, in2_r_m);                                    \
-  UNPCK_R_SH_SW(in3, in3_r_m);                                    \
-                                                                  \
-  constant_m = __msa_fill_w(sinpi_4_9);                           \
-  MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m);     \
-                                                                  \
-  constant_m = __msa_fill_w(sinpi_1_9);                           \
-  s0_m += in0_r_m * constant_m;                                   \
-  s1_m -= in1_r_m * constant_m;                                   \
-                                                                  \
-  constant_m = __msa_fill_w(sinpi_2_9);                           \
-  s0_m += in1_r_m * constant_m;                                   \
-  s1_m += in3_r_m * constant_m;                                   \
-                                                                  \
-  s2_m = in0_r_m + in1_r_m - in3_r_m;                             \
-                                                                  \
-  constant_m = __msa_fill_w(sinpi_3_9);                           \
-  MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m);     \
-                                                                  \
-  in0_r_m = s0_m + s3_m;                                          \
-  s2_m = s1_m - s3_m;                                             \
-  s3_m = s1_m - s0_m + s3_m;                                      \
-                                                                  \
-  SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS);      \
-  PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m,     \
-              s3_m, s3_m, out0, out1, out2, out3);                \
-}
-#endif   // VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+    SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS);              \
+    PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
+                out0, out1, out2, out3);                                    \
+  }
+#endif  // VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
diff --git a/vp10/encoder/mips/msa/temporal_filter_msa.c b/vp10/encoder/mips/msa/temporal_filter_msa.c
index 5d4558b94cb68e691e846c256fad095abf656999..5feeab3e73ef05b812251333f40fddc20f72874d 100644
--- a/vp10/encoder/mips/msa/temporal_filter_msa.c
+++ b/vp10/encoder/mips/msa/temporal_filter_msa.c
@@ -11,12 +11,9 @@
 #include "./vp10_rtcd.h"
 #include "vpx_dsp/mips/macros_msa.h"
 
-static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
-                                            uint32_t stride,
-                                            uint8_t *frm2_ptr,
-                                            int32_t filt_sth,
-                                            int32_t filt_wgt,
-                                            uint32_t *acc,
+static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, uint32_t stride,
+                                            uint8_t *frm2_ptr, int32_t filt_sth,
+                                            int32_t filt_wgt, uint32_t *acc,
                                             uint16_t *cnt) {
   uint32_t row;
   uint64_t f0, f1, f2, f3;
@@ -54,10 +51,10 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
     HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
     UNPCK_SH_SW(diff0, diff0_r, diff0_l);
     UNPCK_SH_SW(diff1, diff1_r, diff1_l);
-    MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l,
-         diff1_l, mod0_w, mod1_w, mod2_w, mod3_w);
-    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
+    MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
          mod0_w, mod1_w, mod2_w, mod3_w);
+    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+         mod1_w, mod2_w, mod3_w);
     SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
 
     diff0_r = (mod0_w < cnst16);
@@ -65,8 +62,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
     diff1_r = (mod2_w < cnst16);
     diff1_l = (mod3_w < cnst16);
 
-    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+         mod1_w, mod2_w, mod3_w);
 
     mod0_w = diff0_r & mod0_w;
     mod1_w = diff0_l & mod1_w;
@@ -85,8 +82,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
     UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
     MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
          mod0_w, mod1_w, mod2_w, mod3_w);
-    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+         mod2_w, mod3_w);
 
     ST_SW2(mod0_w, mod1_w, acc, 4);
     acc += 8;
@@ -101,10 +98,10 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
     HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
     UNPCK_SH_SW(diff0, diff0_r, diff0_l);
     UNPCK_SH_SW(diff1, diff1_r, diff1_l);
-    MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l,
-         diff1_l, mod0_w, mod1_w, mod2_w, mod3_w);
-    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
+    MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
          mod0_w, mod1_w, mod2_w, mod3_w);
+    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+         mod1_w, mod2_w, mod3_w);
     SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
 
     diff0_r = (mod0_w < cnst16);
@@ -112,8 +109,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
     diff1_r = (mod2_w < cnst16);
     diff1_l = (mod3_w < cnst16);
 
-    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+         mod1_w, mod2_w, mod3_w);
 
     mod0_w = diff0_r & mod0_w;
     mod1_w = diff0_l & mod1_w;
@@ -131,8 +128,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
     UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
     MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
          mod0_w, mod1_w, mod2_w, mod3_w);
-    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+         mod2_w, mod3_w);
 
     ST_SW2(mod0_w, mod1_w, acc, 4);
     acc += 8;
@@ -141,13 +138,10 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
   }
 }
 
-static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
-                                             uint32_t stride,
+static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, uint32_t stride,
                                              uint8_t *frm2_ptr,
-                                             int32_t filt_sth,
-                                             int32_t filt_wgt,
-                                             uint32_t *acc,
-                                             uint16_t *cnt) {
+                                             int32_t filt_sth, int32_t filt_wgt,
+                                             uint32_t *acc, uint16_t *cnt) {
   uint32_t row;
   v16i8 frm1, frm2, frm3, frm4;
   v16u8 frm_r, frm_l;
@@ -183,8 +177,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
     UNPCK_SH_SW(diff1, diff1_r, diff1_l);
     MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
          mod0_w, mod1_w, mod2_w, mod3_w);
-    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+         mod1_w, mod2_w, mod3_w);
     SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
 
     diff0_r = (mod0_w < cnst16);
@@ -192,8 +186,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
     diff1_r = (mod2_w < cnst16);
     diff1_l = (mod3_w < cnst16);
 
-    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+         mod1_w, mod2_w, mod3_w);
 
     mod0_w = diff0_r & mod0_w;
     mod1_w = diff0_l & mod1_w;
@@ -212,8 +206,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
     UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
     MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
          mod0_w, mod1_w, mod2_w, mod3_w);
-    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+         mod2_w, mod3_w);
 
     ST_SW2(mod0_w, mod1_w, acc, 4);
     acc += 8;
@@ -230,8 +224,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
     UNPCK_SH_SW(diff1, diff1_r, diff1_l);
     MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
          mod0_w, mod1_w, mod2_w, mod3_w);
-    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+         mod1_w, mod2_w, mod3_w);
     SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
 
     diff0_r = (mod0_w < cnst16);
@@ -239,8 +233,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
     diff1_r = (mod2_w < cnst16);
     diff1_l = (mod3_w < cnst16);
 
-    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+         mod1_w, mod2_w, mod3_w);
 
     mod0_w = diff0_r & mod0_w;
     mod1_w = diff0_l & mod1_w;
@@ -259,8 +253,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
     UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
     MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
          mod0_w, mod1_w, mod2_w, mod3_w);
-    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
-         mod0_w, mod1_w, mod2_w, mod3_w);
+    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+         mod2_w, mod3_w);
     ST_SW2(mod0_w, mod1_w, acc, 4);
     acc += 8;
     ST_SW2(mod2_w, mod3_w, acc, 4);
@@ -272,18 +266,18 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
 }
 
 void vp10_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
-                                   uint8_t *frame2_ptr, uint32_t blk_w,
-                                   uint32_t blk_h, int32_t strength,
-                                   int32_t filt_wgt, uint32_t *accu,
-                                   uint16_t *cnt) {
+                                    uint8_t *frame2_ptr, uint32_t blk_w,
+                                    uint32_t blk_h, int32_t strength,
+                                    int32_t filt_wgt, uint32_t *accu,
+                                    uint16_t *cnt) {
   if (8 == (blk_w * blk_h)) {
-    temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr,
-                                    strength, filt_wgt, accu, cnt);
+    temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, strength,
+                                    filt_wgt, accu, cnt);
   } else if (16 == (blk_w * blk_h)) {
-    temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr,
-                                     strength, filt_wgt, accu, cnt);
+    temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength,
+                                     filt_wgt, accu, cnt);
   } else {
     vp10_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
-                                strength, filt_wgt, accu, cnt);
+                                 strength, filt_wgt, accu, cnt);
   }
 }
diff --git a/vp10/encoder/picklpf.c b/vp10/encoder/picklpf.c
index b8e692ea5fcb0a63154ccf91c32bbbdc14cf411c..0550df85f36bef3e7d8255d6dfff1881f4a1cf6f 100644
--- a/vp10/encoder/picklpf.c
+++ b/vp10/encoder/picklpf.c
@@ -34,20 +34,19 @@ static int get_max_filter_level(const VP10_COMP *cpi) {
   }
 }
 
-
 static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
-                                VP10_COMP *const cpi,
-                                int filt_level, int partial_frame) {
+                                VP10_COMP *const cpi, int filt_level,
+                                int partial_frame) {
   VP10_COMMON *const cm = &cpi->common;
   int64_t filt_err;
 
   if (cpi->num_workers > 1)
     vp10_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
-                             filt_level, 1, partial_frame,
-                             cpi->workers, cpi->num_workers, &cpi->lf_row_sync);
+                              filt_level, 1, partial_frame, cpi->workers,
+                              cpi->num_workers, &cpi->lf_row_sync);
   else
     vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
-                          1, partial_frame);
+                           1, partial_frame);
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (cm->use_highbitdepth) {
@@ -103,8 +102,7 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
       bias = (bias * cpi->twopass.section_intra_rating) / 20;
 
     // yx, bias less for large block size
-    if (cm->tx_mode != ONLY_4X4)
-      bias >>= 1;
+    if (cm->tx_mode != ONLY_4X4) bias >>= 1;
 
     if (filt_direction <= 0 && filt_low != filt_mid) {
       // Get Low filter error score
@@ -115,8 +113,7 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
       // filter value.
       if ((ss_err[filt_low] - bias) < best_err) {
         // Was it actually better than the previous best?
-        if (ss_err[filt_low] < best_err)
-          best_err = ss_err[filt_low];
+        if (ss_err[filt_low] < best_err) best_err = ss_err[filt_low];
 
         filt_best = filt_low;
       }
@@ -148,21 +145,20 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
 }
 
 void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
-                           LPF_PICK_METHOD method) {
+                            LPF_PICK_METHOD method) {
   VP10_COMMON *const cm = &cpi->common;
   struct loopfilter *const lf = &cm->lf;
 
-  lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0
-                                                    : cpi->oxcf.sharpness;
+  lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness;
 
   if (method == LPF_PICK_MINIMAL_LPF && lf->filter_level) {
-      lf->filter_level = 0;
+    lf->filter_level = 0;
   } else if (method >= LPF_PICK_FROM_Q) {
     const int min_filter_level = 0;
     const int max_filter_level = get_max_filter_level(cpi);
     const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
-    // These values were determined by linear fitting the result of the
-    // searched level, filt_guess = q * 0.316206 + 3.87252
+// These values were determined by linear fitting the result of the
+// searched level, filt_guess = q * 0.316206 + 3.87252
 #if CONFIG_VPX_HIGHBITDEPTH
     int filt_guess;
     switch (cm->bit_depth) {
@@ -176,18 +172,18 @@ void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
         filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22);
         break;
       default:
-        assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
-                    "or VPX_BITS_12");
+        assert(0 &&
+               "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
+               "or VPX_BITS_12");
         return;
     }
 #else
     int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
-    if (cm->frame_type == KEY_FRAME)
-      filt_guess -= 4;
+    if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
     lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
   } else {
-    lf->filter_level = search_filter_level(sd, cpi,
-                                           method == LPF_PICK_FROM_SUBIMAGE);
+    lf->filter_level =
+        search_filter_level(sd, cpi, method == LPF_PICK_FROM_SUBIMAGE);
   }
 }
diff --git a/vp10/encoder/picklpf.h b/vp10/encoder/picklpf.h
index 21a8758ef414adc6946cd92cbe959818b701654f..7d5f167fded52e231c908867a32f7253f8d42501 100644
--- a/vp10/encoder/picklpf.h
+++ b/vp10/encoder/picklpf.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_PICKLPF_H_
 #define VP10_ENCODER_PICKLPF_H_
 
@@ -22,7 +21,7 @@ struct yv12_buffer_config;
 struct VP10_COMP;
 
 void vp10_pick_filter_level(const struct yv12_buffer_config *sd,
-                           struct VP10_COMP *cpi, LPF_PICK_METHOD method);
+                            struct VP10_COMP *cpi, LPF_PICK_METHOD method);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/vp10/encoder/quantize.c b/vp10/encoder/quantize.c
index 66a8e5f8c47d7cdf480d834c530a9c70f574fd7e..0688a69ca78bb549409718556c2a9385926eef00 100644
--- a/vp10/encoder/quantize.c
+++ b/vp10/encoder/quantize.c
@@ -21,13 +21,12 @@
 #include "vp10/encoder/rd.h"
 
 void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                       int skip_block,
-                       const int16_t *zbin_ptr, const int16_t *round_ptr,
-                       const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
-                       tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                       const int16_t *dequant_ptr,
-                       uint16_t *eob_ptr,
-                       const int16_t *scan, const int16_t *iscan) {
+                        int skip_block, const int16_t *zbin_ptr,
+                        const int16_t *round_ptr, const int16_t *quant_ptr,
+                        const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+                        tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                        uint16_t *eob_ptr, const int16_t *scan,
+                        const int16_t *iscan) {
   int i, eob = -1;
   // TODO(jingning) Decide the need of these arguments after the
   // quantization process is completed.
@@ -53,27 +52,21 @@ void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
       qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
       dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
 
-      if (tmp)
-        eob = i;
+      if (tmp) eob = i;
     }
   }
   *eob_ptr = eob + 1;
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr,
-                              intptr_t count,
-                              int skip_block,
-                              const int16_t *zbin_ptr,
-                              const int16_t *round_ptr,
-                              const int16_t *quant_ptr,
-                              const int16_t *quant_shift_ptr,
-                              tran_low_t *qcoeff_ptr,
-                              tran_low_t *dqcoeff_ptr,
-                              const int16_t *dequant_ptr,
-                              uint16_t *eob_ptr,
-                              const int16_t *scan,
-                              const int16_t *iscan) {
+void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
+                               int skip_block, const int16_t *zbin_ptr,
+                               const int16_t *round_ptr,
+                               const int16_t *quant_ptr,
+                               const int16_t *quant_shift_ptr,
+                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                               const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                               const int16_t *scan, const int16_t *iscan) {
   int i;
   int eob = -1;
   // TODO(jingning) Decide the need of these arguments after the
@@ -97,8 +90,7 @@ void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr,
       const uint32_t abs_qcoeff = (uint32_t)((tmp * quant_ptr[rc != 0]) >> 16);
       qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
       dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
-      if (abs_qcoeff)
-        eob = i;
+      if (abs_qcoeff) eob = i;
     }
   }
   *eob_ptr = eob + 1;
@@ -108,14 +100,13 @@ void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr,
 // TODO(jingning) Refactor this file and combine functions with similar
 // operations.
 void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                             int skip_block,
-                             const int16_t *zbin_ptr, const int16_t *round_ptr,
-                             const int16_t *quant_ptr,
-                             const int16_t *quant_shift_ptr,
-                             tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                             const int16_t *dequant_ptr,
-                             uint16_t *eob_ptr,
-                             const int16_t *scan, const int16_t *iscan) {
+                              int skip_block, const int16_t *zbin_ptr,
+                              const int16_t *round_ptr,
+                              const int16_t *quant_ptr,
+                              const int16_t *quant_shift_ptr,
+                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                              const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                              const int16_t *scan, const int16_t *iscan) {
   int i, eob = -1;
   (void)zbin_ptr;
   (void)quant_shift_ptr;
@@ -140,25 +131,19 @@ void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
         dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
       }
 
-      if (tmp)
-        eob = i;
+      if (tmp) eob = i;
     }
   }
   *eob_ptr = eob + 1;
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_32x32_c(const tran_low_t *coeff_ptr,
-                                    intptr_t n_coeffs, int skip_block,
-                                    const int16_t *zbin_ptr,
-                                    const int16_t *round_ptr,
-                                    const int16_t *quant_ptr,
-                                    const int16_t *quant_shift_ptr,
-                                    tran_low_t *qcoeff_ptr,
-                                    tran_low_t *dqcoeff_ptr,
-                                    const int16_t *dequant_ptr,
-                                    uint16_t *eob_ptr,
-                                    const int16_t *scan, const int16_t *iscan) {
+void vp10_highbd_quantize_fp_32x32_c(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+    const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
+    const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+    const int16_t *scan, const int16_t *iscan) {
   int i, eob = -1;
   (void)zbin_ptr;
   (void)quant_shift_ptr;
@@ -176,15 +161,14 @@ void vp10_highbd_quantize_fp_32x32_c(const tran_low_t *coeff_ptr,
       const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
 
       if (abs_coeff >= (dequant_ptr[rc != 0] >> 2)) {
-        const int64_t tmp = abs_coeff
-                           + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
-        abs_qcoeff = (uint32_t) ((tmp * quant_ptr[rc != 0]) >> 15);
+        const int64_t tmp =
+            abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
+        abs_qcoeff = (uint32_t)((tmp * quant_ptr[rc != 0]) >> 15);
         qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
         dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
       }
 
-      if (abs_qcoeff)
-        eob = i;
+      if (abs_qcoeff) eob = i;
     }
   }
   *eob_ptr = eob + 1;
@@ -192,37 +176,33 @@ void vp10_highbd_quantize_fp_32x32_c(const tran_low_t *coeff_ptr,
 #endif
 
 void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
-                                const int16_t *scan, const int16_t *iscan) {
+                                 const int16_t *scan, const int16_t *iscan) {
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblock_plane *p = &x->plane[plane];
   struct macroblockd_plane *pd = &xd->plane[plane];
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block),
-                          16, x->skip_block,
+    vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block,
                           p->zbin, p->round, p->quant, p->quant_shift,
                           BLOCK_OFFSET(p->qcoeff, block),
-                          BLOCK_OFFSET(pd->dqcoeff, block),
-                          pd->dequant, &p->eobs[block],
-                          scan, iscan);
+                          BLOCK_OFFSET(pd->dqcoeff, block), pd->dequant,
+                          &p->eobs[block], scan, iscan);
     return;
   }
 #endif
-  vpx_quantize_b(BLOCK_OFFSET(p->coeff, block),
-                 16, x->skip_block,
-                 p->zbin, p->round, p->quant, p->quant_shift,
+  vpx_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block, p->zbin,
+                 p->round, p->quant, p->quant_shift,
                  BLOCK_OFFSET(p->qcoeff, block),
-                 BLOCK_OFFSET(pd->dqcoeff, block),
-                 pd->dequant, &p->eobs[block], scan, iscan);
+                 BLOCK_OFFSET(pd->dqcoeff, block), pd->dequant, &p->eobs[block],
+                 scan, iscan);
 }
 
 static void invert_quant(int16_t *quant, int16_t *shift, int d) {
   unsigned t;
   int l;
   t = d;
-  for (l = 0; t > 1; l++)
-    t >>= 1;
+  for (l = 0; t > 1; l++) t >>= 1;
   t = 1 + (1 << (16 + l)) / d;
   *quant = (int16_t)(t - (1 << 16));
   *shift = 1 << (16 - l);
@@ -232,18 +212,15 @@ static int get_qzbin_factor(int q, vpx_bit_depth_t bit_depth) {
   const int quant = vp10_dc_quant(q, 0, bit_depth);
 #if CONFIG_VPX_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8:
-      return q == 0 ? 64 : (quant < 148 ? 84 : 80);
-    case VPX_BITS_10:
-      return q == 0 ? 64 : (quant < 592 ? 84 : 80);
-    case VPX_BITS_12:
-      return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
+    case VPX_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80);
+    case VPX_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80);
+    case VPX_BITS_12: return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
     default:
       assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
       return -1;
   }
 #else
-  (void) bit_depth;
+  (void)bit_depth;
   return q == 0 ? 64 : (quant < 148 ? 84 : 80);
 #endif
 }
@@ -259,8 +236,7 @@ void vp10_init_quantizer(VP10_COMP *cpi) {
 
     for (i = 0; i < 2; ++i) {
       int qrounding_factor_fp = i == 0 ? 48 : 42;
-      if (q == 0)
-        qrounding_factor_fp = 64;
+      if (q == 0) qrounding_factor_fp = 64;
 
       // y
       quant = i == 0 ? vp10_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
@@ -275,8 +251,8 @@ void vp10_init_quantizer(VP10_COMP *cpi) {
       // uv
       quant = i == 0 ? vp10_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
                      : vp10_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
-      invert_quant(&quants->uv_quant[q][i],
-                   &quants->uv_quant_shift[q][i], quant);
+      invert_quant(&quants->uv_quant[q][i], &quants->uv_quant_shift[q][i],
+                   quant);
       quants->uv_quant_fp[q][i] = (1 << 16) / quant;
       quants->uv_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7;
       quants->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7);
@@ -364,14 +340,11 @@ void vp10_set_quantizer(VP10_COMMON *cm, int q) {
 // Table that converts 0-63 Q-range values passed in outside to the Qindex
 // range used internally.
 static const int quantizer_to_qindex[] = {
-  0,    4,   8,  12,  16,  20,  24,  28,
-  32,   36,  40,  44,  48,  52,  56,  60,
-  64,   68,  72,  76,  80,  84,  88,  92,
-  96,  100, 104, 108, 112, 116, 120, 124,
-  128, 132, 136, 140, 144, 148, 152, 156,
-  160, 164, 168, 172, 176, 180, 184, 188,
-  192, 196, 200, 204, 208, 212, 216, 220,
-  224, 228, 232, 236, 240, 244, 249, 255,
+  0,   4,   8,   12,  16,  20,  24,  28,  32,  36,  40,  44,  48,
+  52,  56,  60,  64,  68,  72,  76,  80,  84,  88,  92,  96,  100,
+  104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152,
+  156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 196, 200, 204,
+  208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255,
 };
 
 int vp10_quantizer_to_qindex(int quantizer) {
@@ -382,8 +355,7 @@ int vp10_qindex_to_quantizer(int qindex) {
   int quantizer;
 
   for (quantizer = 0; quantizer < 64; ++quantizer)
-    if (quantizer_to_qindex[quantizer] >= qindex)
-      return quantizer;
+    if (quantizer_to_qindex[quantizer] >= qindex) return quantizer;
 
   return 63;
 }
diff --git a/vp10/encoder/quantize.h b/vp10/encoder/quantize.h
index b44088ecc679a558f29a40c25b4b2ae470cb5159..29f044c666528047d83e2695de6ea083e20cc115 100644
--- a/vp10/encoder/quantize.h
+++ b/vp10/encoder/quantize.h
@@ -38,7 +38,7 @@ typedef struct {
 } QUANTS;
 
 void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
-                                const int16_t *scan, const int16_t *iscan);
+                                 const int16_t *scan, const int16_t *iscan);
 
 struct VP10_COMP;
 struct VP10Common;
diff --git a/vp10/encoder/ratectrl.c b/vp10/encoder/ratectrl.c
index 1666b2ea6ba3193b64d209a13ad3ffccb6cde394..73593cb29fac2a08f9ec259ea16afaa91a4f1a6b 100644
--- a/vp10/encoder/ratectrl.c
+++ b/vp10/encoder/ratectrl.c
@@ -46,29 +46,24 @@
 #define FRAME_OVERHEAD_BITS 200
 
 #if CONFIG_VPX_HIGHBITDEPTH
-#define ASSIGN_MINQ_TABLE(bit_depth, name) \
-  do { \
-    switch (bit_depth) { \
-      case VPX_BITS_8: \
-        name = name##_8; \
-        break; \
-      case VPX_BITS_10: \
-        name = name##_10; \
-        break; \
-      case VPX_BITS_12: \
-        name = name##_12; \
-        break; \
-      default: \
-        assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10" \
-                    " or VPX_BITS_12"); \
-        name = NULL; \
-    } \
+#define ASSIGN_MINQ_TABLE(bit_depth, name)                   \
+  do {                                                       \
+    switch (bit_depth) {                                     \
+      case VPX_BITS_8: name = name##_8; break;               \
+      case VPX_BITS_10: name = name##_10; break;             \
+      case VPX_BITS_12: name = name##_12; break;             \
+      default:                                               \
+        assert(0 &&                                          \
+               "bit_depth should be VPX_BITS_8, VPX_BITS_10" \
+               " or VPX_BITS_12");                           \
+        name = NULL;                                         \
+    }                                                        \
   } while (0)
 #else
 #define ASSIGN_MINQ_TABLE(bit_depth, name) \
-  do { \
-    (void) bit_depth; \
-    name = name##_8; \
+  do {                                     \
+    (void) bit_depth;                      \
+    name = name##_8;                       \
   } while (0)
 #endif
 
@@ -111,20 +106,18 @@ static int get_minq_index(double maxq, double x3, double x2, double x1,
 
   // Special case handling to deal with the step from q2.0
   // down to lossless mode represented by q 1.0.
-  if (minqtarget <= 2.0)
-    return 0;
+  if (minqtarget <= 2.0) return 0;
 
   for (i = 0; i < QINDEX_RANGE; i++) {
-    if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth))
-      return i;
+    if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth)) return i;
   }
 
   return QINDEX_RANGE - 1;
 }
 
-static void init_minq_luts(int *kf_low_m, int *kf_high_m,
-                           int *arfgf_low, int *arfgf_high,
-                           int *inter, int *rtc, vpx_bit_depth_t bit_depth) {
+static void init_minq_luts(int *kf_low_m, int *kf_high_m, int *arfgf_low,
+                           int *arfgf_high, int *inter, int *rtc,
+                           vpx_bit_depth_t bit_depth) {
   int i;
   for (i = 0; i < QINDEX_RANGE; i++) {
     const double maxq = vp10_convert_qindex_to_q(i, bit_depth);
@@ -155,15 +148,12 @@ void vp10_rc_init_minq_luts(void) {
 // quantizer tables easier. If necessary they can be replaced by lookup
 // tables if and when things settle down in the experimental bitstream
 double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) {
-  // Convert the index to a real Q value (scaled down to match old Q values)
+// Convert the index to a real Q value (scaled down to match old Q values)
 #if CONFIG_VPX_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8:
-      return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
-    case VPX_BITS_10:
-      return vp10_ac_quant(qindex, 0, bit_depth) / 16.0;
-    case VPX_BITS_12:
-      return vp10_ac_quant(qindex, 0, bit_depth) / 64.0;
+    case VPX_BITS_8: return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
+    case VPX_BITS_10: return vp10_ac_quant(qindex, 0, bit_depth) / 16.0;
+    case VPX_BITS_12: return vp10_ac_quant(qindex, 0, bit_depth) / 64.0;
     default:
       assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
       return -1.0;
@@ -174,8 +164,7 @@ double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) {
 }
 
 int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
-                       double correction_factor,
-                       vpx_bit_depth_t bit_depth) {
+                        double correction_factor, vpx_bit_depth_t bit_depth) {
   const double q = vp10_convert_qindex_to_q(qindex, bit_depth);
   int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000;
 
@@ -188,10 +177,10 @@ int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
 }
 
 int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
-                           double correction_factor,
-                           vpx_bit_depth_t bit_depth) {
-  const int bpm = (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor,
-                                           bit_depth));
+                            double correction_factor,
+                            vpx_bit_depth_t bit_depth) {
+  const int bpm =
+      (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
   return VPXMAX(FRAME_OVERHEAD_BITS,
                 (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS);
 }
@@ -199,10 +188,9 @@ int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
 int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) {
   const RATE_CONTROL *rc = &cpi->rc;
   const VP10EncoderConfig *oxcf = &cpi->oxcf;
-  const int min_frame_target = VPXMAX(rc->min_frame_bandwidth,
-                                      rc->avg_frame_bandwidth >> 5);
-  if (target < min_frame_target)
-    target = min_frame_target;
+  const int min_frame_target =
+      VPXMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
+  if (target < min_frame_target) target = min_frame_target;
   if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) {
     // If there is an active ARF at this location use the minimum
     // bits on this frame even if it is a constructed arf.
@@ -211,11 +199,10 @@ int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) {
     target = min_frame_target;
   }
   // Clip the frame target to the maximum allowed value.
-  if (target > rc->max_frame_bandwidth)
-    target = rc->max_frame_bandwidth;
+  if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
   if (oxcf->rc_max_inter_bitrate_pct) {
-    const int max_rate = rc->avg_frame_bandwidth *
-                         oxcf->rc_max_inter_bitrate_pct / 100;
+    const int max_rate =
+        rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
     target = VPXMIN(target, max_rate);
   }
   return target;
@@ -225,12 +212,11 @@ int vp10_rc_clamp_iframe_target_size(const VP10_COMP *const cpi, int target) {
   const RATE_CONTROL *rc = &cpi->rc;
   const VP10EncoderConfig *oxcf = &cpi->oxcf;
   if (oxcf->rc_max_intra_bitrate_pct) {
-    const int max_rate = rc->avg_frame_bandwidth *
-                             oxcf->rc_max_intra_bitrate_pct / 100;
+    const int max_rate =
+        rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100;
     target = VPXMIN(target, max_rate);
   }
-  if (target > rc->max_frame_bandwidth)
-    target = rc->max_frame_bandwidth;
+  if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
   return target;
 }
 
@@ -251,8 +237,8 @@ static void update_buffer_level(VP10_COMP *cpi, int encoded_frame_size) {
   rc->buffer_level = rc->bits_off_target;
 }
 
-int vp10_rc_get_default_min_gf_interval(
-    int width, int height, double framerate) {
+int vp10_rc_get_default_min_gf_interval(int width, int height,
+                                        double framerate) {
   // Assume we do not need any constraint lower than 4K 20 fps
   static const double factor_safe = 3840 * 2160 * 20.0;
   const double factor = width * height * framerate;
@@ -283,20 +269,20 @@ void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
     rc->avg_frame_qindex[KEY_FRAME] = oxcf->worst_allowed_q;
     rc->avg_frame_qindex[INTER_FRAME] = oxcf->worst_allowed_q;
   } else {
-    rc->avg_frame_qindex[KEY_FRAME] = (oxcf->worst_allowed_q +
-                                       oxcf->best_allowed_q) / 2;
-    rc->avg_frame_qindex[INTER_FRAME] = (oxcf->worst_allowed_q +
-                                         oxcf->best_allowed_q) / 2;
+    rc->avg_frame_qindex[KEY_FRAME] =
+        (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
+    rc->avg_frame_qindex[INTER_FRAME] =
+        (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
   }
 
   rc->last_q[KEY_FRAME] = oxcf->best_allowed_q;
   rc->last_q[INTER_FRAME] = oxcf->worst_allowed_q;
 
-  rc->buffer_level =    rc->starting_buffer_level;
+  rc->buffer_level = rc->starting_buffer_level;
   rc->bits_off_target = rc->starting_buffer_level;
 
-  rc->rolling_target_bits      = rc->avg_frame_bandwidth;
-  rc->rolling_actual_bits      = rc->avg_frame_bandwidth;
+  rc->rolling_target_bits = rc->avg_frame_bandwidth;
+  rc->rolling_actual_bits = rc->avg_frame_bandwidth;
   rc->long_rolling_target_bits = rc->avg_frame_bandwidth;
   rc->long_rolling_actual_bits = rc->avg_frame_bandwidth;
 
@@ -346,13 +332,11 @@ int vp10_rc_drop_frame(VP10_COMP *cpi) {
     } else {
       // If buffer is below drop_mark, for now just drop every other frame
       // (starting with the next frame) until it increases back over drop_mark.
-      int drop_mark = (int)(oxcf->drop_frames_water_mark *
-          rc->optimal_buffer_level / 100);
-      if ((rc->buffer_level > drop_mark) &&
-          (rc->decimation_factor > 0)) {
+      int drop_mark =
+          (int)(oxcf->drop_frames_water_mark * rc->optimal_buffer_level / 100);
+      if ((rc->buffer_level > drop_mark) && (rc->decimation_factor > 0)) {
         --rc->decimation_factor;
-      } else if (rc->buffer_level <= drop_mark &&
-          rc->decimation_factor == 0) {
+      } else if (rc->buffer_level <= drop_mark && rc->decimation_factor == 0) {
         rc->decimation_factor = 1;
       }
       if (rc->decimation_factor > 0) {
@@ -379,7 +363,7 @@ static double get_rate_correction_factor(const VP10_COMP *cpi) {
     rcf = rc->rate_correction_factors[KF_STD];
   } else if (cpi->oxcf.pass == 2) {
     RATE_FACTOR_LEVEL rf_lvl =
-      cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
+        cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
     rcf = rc->rate_correction_factors[rf_lvl];
   } else {
     if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
@@ -405,7 +389,7 @@ static void set_rate_correction_factor(VP10_COMP *cpi, double factor) {
     rc->rate_correction_factors[KF_STD] = factor;
   } else if (cpi->oxcf.pass == 2) {
     RATE_FACTOR_LEVEL rf_lvl =
-      cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
+        cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
     rc->rate_correction_factors[rf_lvl] = factor;
   } else {
     if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
@@ -426,8 +410,7 @@ void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
   int projected_size_based_on_q = 0;
 
   // Do not update the rate factors for arf overlay frames.
-  if (cpi->rc.is_src_frame_alt_ref)
-    return;
+  if (cpi->rc.is_src_frame_alt_ref) return;
 
   // Clear down mmx registers to allow floating point in what follows
   vpx_clear_system_state();
@@ -439,21 +422,19 @@ void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
     projected_size_based_on_q =
         vp10_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
   } else {
-    projected_size_based_on_q = vp10_estimate_bits_at_q(cpi->common.frame_type,
-                                                       cm->base_qindex,
-                                                       cm->MBs,
-                                                       rate_correction_factor,
-                                                       cm->bit_depth);
+    projected_size_based_on_q =
+        vp10_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex,
+                                cm->MBs, rate_correction_factor, cm->bit_depth);
   }
   // Work out a size correction factor.
   if (projected_size_based_on_q > FRAME_OVERHEAD_BITS)
     correction_factor = (int)((100 * (int64_t)cpi->rc.projected_frame_size) /
-                        projected_size_based_on_q);
+                              projected_size_based_on_q);
 
   // More heavily damped adjustment used if we have been oscillating either side
   // of target.
-  adjustment_limit = 0.25 +
-      0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor)));
+  adjustment_limit =
+      0.25 + 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor)));
 
   cpi->rc.q_2_frame = cpi->rc.q_1_frame;
   cpi->rc.q_1_frame = cm->base_qindex;
@@ -467,16 +448,16 @@ void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
 
   if (correction_factor > 102) {
     // We are not already at the worst allowable quality
-    correction_factor = (int)(100 + ((correction_factor - 100) *
-                                  adjustment_limit));
+    correction_factor =
+        (int)(100 + ((correction_factor - 100) * adjustment_limit));
     rate_correction_factor = (rate_correction_factor * correction_factor) / 100;
     // Keep rate_correction_factor within limits
     if (rate_correction_factor > MAX_BPB_FACTOR)
       rate_correction_factor = MAX_BPB_FACTOR;
   } else if (correction_factor < 99) {
     // We are not already at the best allowable quality
-    correction_factor = (int)(100 - ((100 - correction_factor) *
-                                  adjustment_limit));
+    correction_factor =
+        (int)(100 - ((100 - correction_factor) * adjustment_limit));
     rate_correction_factor = (rate_correction_factor * correction_factor) / 100;
 
     // Keep rate_correction_factor within limits
@@ -487,9 +468,8 @@ void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
   set_rate_correction_factor(cpi, rate_correction_factor);
 }
 
-
 int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame,
-                      int active_best_quality, int active_worst_quality) {
+                       int active_best_quality, int active_worst_quality) {
   const VP10_COMMON *const cm = &cpi->common;
   int q = active_worst_quality;
   int last_error = INT_MAX;
@@ -508,9 +488,8 @@ int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame,
       bits_per_mb_at_this_q =
           (int)vp10_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
     } else {
-      bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(cm->frame_type, i,
-                                                      correction_factor,
-                                                      cm->bit_depth);
+      bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(
+          cm->frame_type, i, correction_factor, cm->bit_depth);
     }
 
     if (bits_per_mb_at_this_q <= target_bits_per_mb) {
@@ -577,13 +556,13 @@ static int calc_active_worst_quality_one_pass_vbr(const VP10_COMP *cpi) {
   int active_worst_quality;
 
   if (cpi->common.frame_type == KEY_FRAME) {
-    active_worst_quality = curr_frame == 0 ? rc->worst_quality
-                                           : rc->last_q[KEY_FRAME] * 2;
+    active_worst_quality =
+        curr_frame == 0 ? rc->worst_quality : rc->last_q[KEY_FRAME] * 2;
   } else {
     if (!rc->is_src_frame_alt_ref &&
         (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
-      active_worst_quality =  curr_frame == 1 ? rc->last_q[KEY_FRAME] * 5 / 4
-                                              : rc->last_q[INTER_FRAME];
+      active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 5 / 4
+                                             : rc->last_q[INTER_FRAME];
     } else {
       active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 2
                                              : rc->last_q[INTER_FRAME] * 2;
@@ -607,28 +586,27 @@ static int calc_active_worst_quality_one_pass_cbr(const VP10_COMP *cpi) {
   int adjustment = 0;
   int active_worst_quality;
   int ambient_qp;
-  if (cm->frame_type == KEY_FRAME)
-    return rc->worst_quality;
+  if (cm->frame_type == KEY_FRAME) return rc->worst_quality;
   // For ambient_qp we use minimum of avg_frame_qindex[KEY_FRAME/INTER_FRAME]
   // for the first few frames following key frame. These are both initialized
   // to worst_quality and updated with (3/4, 1/4) average in postencode_update.
   // So for first few frames following key, the qp of that key frame is weighted
   // into the active_worst_quality setting.
-  ambient_qp = (cm->current_video_frame < 5) ?
-                   VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
-                          rc->avg_frame_qindex[KEY_FRAME]) :
-                   rc->avg_frame_qindex[INTER_FRAME];
+  ambient_qp = (cm->current_video_frame < 5)
+                   ? VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
+                            rc->avg_frame_qindex[KEY_FRAME])
+                   : rc->avg_frame_qindex[INTER_FRAME];
   active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 / 4);
   if (rc->buffer_level > rc->optimal_buffer_level) {
     // Adjust down.
     // Maximum limit for down adjustment, ~30%.
     int max_adjustment_down = active_worst_quality / 3;
     if (max_adjustment_down) {
-      buff_lvl_step = ((rc->maximum_buffer_size -
-                        rc->optimal_buffer_level) / max_adjustment_down);
+      buff_lvl_step = ((rc->maximum_buffer_size - rc->optimal_buffer_level) /
+                       max_adjustment_down);
       if (buff_lvl_step)
         adjustment = (int)((rc->buffer_level - rc->optimal_buffer_level) /
-                            buff_lvl_step);
+                           buff_lvl_step);
       active_worst_quality -= adjustment;
     }
   } else if (rc->buffer_level > critical_level) {
@@ -668,18 +646,16 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
     if (rc->this_key_frame_forced) {
       int qindex = rc->last_boosted_qindex;
       double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      int delta_qindex = vp10_compute_qdelta(rc, last_boosted_q,
-                                            (last_boosted_q * 0.75),
-                                            cm->bit_depth);
+      int delta_qindex = vp10_compute_qdelta(
+          rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth);
       active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
     } else if (cm->current_video_frame > 0) {
       // not first frame of one pass and kf_boost is set
       double q_adj_factor = 1.0;
       double q_val;
 
-      active_best_quality =
-          get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME],
-                                cm->bit_depth);
+      active_best_quality = get_kf_active_quality(
+          rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth);
 
       // Allow somewhat lower kf minq with small image formats.
       if ((cm->width * cm->height) <= (352 * 288)) {
@@ -689,9 +665,8 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
       q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
-      active_best_quality += vp10_compute_qdelta(rc, q_val,
-                                                q_val * q_adj_factor,
-                                                cm->bit_depth);
+      active_best_quality +=
+          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
     }
   } else if (!rc->is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -721,24 +696,22 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
   }
 
   // Clip the active best and worst quality values to limits
-  active_best_quality = clamp(active_best_quality,
-                              rc->best_quality, rc->worst_quality);
-  active_worst_quality = clamp(active_worst_quality,
-                               active_best_quality, rc->worst_quality);
+  active_best_quality =
+      clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+  active_worst_quality =
+      clamp(active_worst_quality, active_best_quality, rc->worst_quality);
 
   *top_index = active_worst_quality;
   *bottom_index = active_best_quality;
 
 #if LIMIT_QRANGE_FOR_ALTREF_AND_KEY
   // Limit Q range for the adaptive loop.
-  if (cm->frame_type == KEY_FRAME &&
-      !rc->this_key_frame_forced  &&
+  if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
       !(cm->current_video_frame == 0)) {
     int qdelta = 0;
     vpx_clear_system_state();
-    qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
-                                        active_worst_quality, 2.0,
-                                        cm->bit_depth);
+    qdelta = vp10_compute_qdelta_by_rate(
+        &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
     *top_index = active_worst_quality + qdelta;
     *top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index;
   }
@@ -748,8 +721,8 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
   if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) {
     q = rc->last_boosted_qindex;
   } else {
-    q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
-                          active_best_quality, active_worst_quality);
+    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+                           active_worst_quality);
     if (q > *top_index) {
       // Special case when we are targeting the max allowed rate
       if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -758,8 +731,7 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
         q = *top_index;
     }
   }
-  assert(*top_index <= rc->worst_quality &&
-         *top_index >= rc->best_quality);
+  assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
   assert(*bottom_index <= rc->worst_quality &&
          *bottom_index >= rc->best_quality);
   assert(q <= rc->worst_quality && q >= rc->best_quality);
@@ -770,8 +742,7 @@ static int get_active_cq_level(const RATE_CONTROL *rc,
                                const VP10EncoderConfig *const oxcf) {
   static const double cq_adjust_threshold = 0.1;
   int active_cq_level = oxcf->cq_level;
-  if (oxcf->rc_mode == VPX_CQ &&
-      rc->total_target_bits > 0) {
+  if (oxcf->rc_mode == VPX_CQ && rc->total_target_bits > 0) {
     const double x = (double)rc->total_actual_bits / rc->total_target_bits;
     if (x < cq_adjust_threshold) {
       active_cq_level = (int)(active_cq_level * x / cq_adjust_threshold);
@@ -797,24 +768,21 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
     if (oxcf->rc_mode == VPX_Q) {
       int qindex = cq_level;
       double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25,
-                                             cm->bit_depth);
+      int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
       active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
     } else if (rc->this_key_frame_forced) {
       int qindex = rc->last_boosted_qindex;
       double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      int delta_qindex = vp10_compute_qdelta(rc, last_boosted_q,
-                                             last_boosted_q * 0.75,
-                                             cm->bit_depth);
+      int delta_qindex = vp10_compute_qdelta(
+          rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
       active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
     } else {
       // not first frame of one pass and kf_boost is set
       double q_adj_factor = 1.0;
       double q_val;
 
-      active_best_quality =
-          get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME],
-                                cm->bit_depth);
+      active_best_quality = get_kf_active_quality(
+          rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth);
 
       // Allow somewhat lower kf minq with small image formats.
       if ((cm->width * cm->height) <= (352 * 288)) {
@@ -824,9 +792,8 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
       q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
-      active_best_quality += vp10_compute_qdelta(rc, q_val,
-                                                 q_val * q_adj_factor,
-                                                 cm->bit_depth);
+      active_best_quality +=
+          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
     }
   } else if (!rc->is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -841,8 +808,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
     }
     // For constrained quality dont allow Q less than the cq level
     if (oxcf->rc_mode == VPX_CQ) {
-      if (q < cq_level)
-        q = cq_level;
+      if (q < cq_level) q = cq_level;
 
       active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
 
@@ -865,12 +831,11 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
     if (oxcf->rc_mode == VPX_Q) {
       int qindex = cq_level;
       double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      double delta_rate[FIXED_GF_INTERVAL] =
-          {0.50, 1.0, 0.85, 1.0, 0.70, 1.0, 0.85, 1.0};
-      int delta_qindex =
-          vp10_compute_qdelta(rc, q,
-                              q * delta_rate[cm->current_video_frame %
-                              FIXED_GF_INTERVAL], cm->bit_depth);
+      double delta_rate[FIXED_GF_INTERVAL] = { 0.50, 1.0, 0.85, 1.0,
+                                               0.70, 1.0, 0.85, 1.0 };
+      int delta_qindex = vp10_compute_qdelta(
+          rc, q, q * delta_rate[cm->current_video_frame % FIXED_GF_INTERVAL],
+          cm->bit_depth);
       active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
     } else {
       // Use the lower of active_worst_quality and recent/average Q.
@@ -880,18 +845,17 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
         active_best_quality = inter_minq[rc->avg_frame_qindex[KEY_FRAME]];
       // For the constrained quality mode we don't want
       // q to fall below the cq level.
-      if ((oxcf->rc_mode == VPX_CQ) &&
-          (active_best_quality < cq_level)) {
+      if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
         active_best_quality = cq_level;
       }
     }
   }
 
   // Clip the active best and worst quality values to limits
-  active_best_quality = clamp(active_best_quality,
-                              rc->best_quality, rc->worst_quality);
-  active_worst_quality = clamp(active_worst_quality,
-                               active_best_quality, rc->worst_quality);
+  active_best_quality =
+      clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+  active_worst_quality =
+      clamp(active_worst_quality, active_best_quality, rc->worst_quality);
 
   *top_index = active_worst_quality;
   *bottom_index = active_best_quality;
@@ -902,17 +866,14 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
     vpx_clear_system_state();
 
     // Limit Q range for the adaptive loop.
-    if (cm->frame_type == KEY_FRAME &&
-        !rc->this_key_frame_forced &&
+    if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
         !(cm->current_video_frame == 0)) {
-      qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
-                                          active_worst_quality, 2.0,
-                                          cm->bit_depth);
+      qdelta = vp10_compute_qdelta_by_rate(
+          &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
     } else if (!rc->is_src_frame_alt_ref &&
                (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
-      qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
-                                          active_worst_quality, 1.75,
-                                          cm->bit_depth);
+      qdelta = vp10_compute_qdelta_by_rate(
+          &cpi->rc, cm->frame_type, active_worst_quality, 1.75, cm->bit_depth);
     }
     *top_index = active_worst_quality + qdelta;
     *top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index;
@@ -921,12 +882,12 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
 
   if (oxcf->rc_mode == VPX_Q) {
     q = active_best_quality;
-  // Special case code to try and match quality with forced key frames
+    // Special case code to try and match quality with forced key frames
   } else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) {
     q = rc->last_boosted_qindex;
   } else {
-    q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
-                          active_best_quality, active_worst_quality);
+    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+                           active_worst_quality);
     if (q > *top_index) {
       // Special case when we are targeting the max allowed rate
       if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -936,8 +897,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
     }
   }
 
-  assert(*top_index <= rc->worst_quality &&
-         *top_index >= rc->best_quality);
+  assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
   assert(*bottom_index <= rc->worst_quality &&
          *bottom_index >= rc->best_quality);
   assert(q <= rc->worst_quality && q >= rc->best_quality);
@@ -952,19 +912,19 @@ int vp10_frame_type_qdelta(const VP10_COMP *cpi, int rf_level, int q) {
     1.75,  // GF_ARF_STD
     2.00,  // KF_STD
   };
-  static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] =
-      {INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME};
+  static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] = {
+    INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME
+  };
   const VP10_COMMON *const cm = &cpi->common;
-  int qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level],
-                                          q, rate_factor_deltas[rf_level],
-                                          cm->bit_depth);
+  int qdelta =
+      vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
+                                  rate_factor_deltas[rf_level], cm->bit_depth);
   return qdelta;
 }
 
 #define STATIC_MOTION_THRESH 95
 static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
-                                         int *bottom_index,
-                                         int *top_index) {
+                                         int *bottom_index, int *top_index) {
   const VP10_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
   const VP10EncoderConfig *const oxcf = &cpi->oxcf;
@@ -989,17 +949,15 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
         qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
         active_best_quality = qindex;
         last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-        delta_qindex = vp10_compute_qdelta(rc, last_boosted_q,
-                                              last_boosted_q * 1.25,
-                                              cm->bit_depth);
+        delta_qindex = vp10_compute_qdelta(
+            rc, last_boosted_q, last_boosted_q * 1.25, cm->bit_depth);
         active_worst_quality =
             VPXMIN(qindex + delta_qindex, active_worst_quality);
       } else {
         qindex = rc->last_boosted_qindex;
         last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-        delta_qindex = vp10_compute_qdelta(rc, last_boosted_q,
-                                              last_boosted_q * 0.75,
-                                              cm->bit_depth);
+        delta_qindex = vp10_compute_qdelta(
+            rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
         active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
       }
     } else {
@@ -1007,8 +965,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
       double q_adj_factor = 1.0;
       double q_val;
       // Baseline value derived from cpi->active_worst_quality and kf boost.
-      active_best_quality = get_kf_active_quality(rc, active_worst_quality,
-                                                  cm->bit_depth);
+      active_best_quality =
+          get_kf_active_quality(rc, active_worst_quality, cm->bit_depth);
 
       // Allow somewhat lower kf minq with small image formats.
       if ((cm->width * cm->height) <= (352 * 288)) {
@@ -1021,9 +979,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
       q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
-      active_best_quality += vp10_compute_qdelta(rc, q_val,
-                                                q_val * q_adj_factor,
-                                                cm->bit_depth);
+      active_best_quality +=
+          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
     }
   } else if (!rc->is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -1038,8 +995,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
     }
     // For constrained quality dont allow Q less than the cq level
     if (oxcf->rc_mode == VPX_CQ) {
-      if (q < cq_level)
-        q = cq_level;
+      if (q < cq_level) q = cq_level;
 
       active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
 
@@ -1050,7 +1006,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
       if (!cpi->refresh_alt_ref_frame) {
         active_best_quality = cq_level;
       } else {
-       active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
+        active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
 
         // Modify best quality for second level arfs. For mode VPX_Q this
         // becomes the baseline frame q.
@@ -1068,8 +1024,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
 
       // For the constrained quality mode we don't want
       // q to fall below the cq level.
-      if ((oxcf->rc_mode == VPX_CQ) &&
-          (active_best_quality < cq_level)) {
+      if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
         active_best_quality = cq_level;
       }
     }
@@ -1083,11 +1038,11 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
         (!rc->is_src_frame_alt_ref &&
          (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
       active_best_quality -=
-        (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
+          (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
       active_worst_quality += (cpi->twopass.extend_maxq / 2);
     } else {
       active_best_quality -=
-        (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
+          (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
       active_worst_quality += cpi->twopass.extend_maxq;
     }
   }
@@ -1095,33 +1050,31 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
 #if LIMIT_QRANGE_FOR_ALTREF_AND_KEY
   vpx_clear_system_state();
   // Static forced key frames Q restrictions dealt with elsewhere.
-  if (!(frame_is_intra_only(cm)) ||
-      !rc->this_key_frame_forced ||
+  if (!(frame_is_intra_only(cm)) || !rc->this_key_frame_forced ||
       (cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) {
-    int qdelta = vp10_frame_type_qdelta(cpi, gf_group->rf_level[gf_group->index],
-                                       active_worst_quality);
-    active_worst_quality = VPXMAX(active_worst_quality + qdelta,
-                                  active_best_quality);
+    int qdelta = vp10_frame_type_qdelta(
+        cpi, gf_group->rf_level[gf_group->index], active_worst_quality);
+    active_worst_quality =
+        VPXMAX(active_worst_quality + qdelta, active_best_quality);
   }
 #endif
 
   // Modify active_best_quality for downscaled normal frames.
   if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) {
-    int qdelta = vp10_compute_qdelta_by_rate(rc, cm->frame_type,
-                                            active_best_quality, 2.0,
-                                            cm->bit_depth);
+    int qdelta = vp10_compute_qdelta_by_rate(
+        rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth);
     active_best_quality =
         VPXMAX(active_best_quality + qdelta, rc->best_quality);
   }
 
-  active_best_quality = clamp(active_best_quality,
-                              rc->best_quality, rc->worst_quality);
-  active_worst_quality = clamp(active_worst_quality,
-                               active_best_quality, rc->worst_quality);
+  active_best_quality =
+      clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+  active_worst_quality =
+      clamp(active_worst_quality, active_best_quality, rc->worst_quality);
 
   if (oxcf->rc_mode == VPX_Q) {
     q = active_best_quality;
-  // Special case code to try and match quality with forced key frames.
+    // Special case code to try and match quality with forced key frames.
   } else if (frame_is_intra_only(cm) && rc->this_key_frame_forced) {
     // If static since last kf use better of last boosted and last kf q.
     if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
@@ -1130,8 +1083,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
       q = rc->last_boosted_qindex;
     }
   } else {
-    q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
-                          active_best_quality, active_worst_quality);
+    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+                           active_worst_quality);
     if (q > active_worst_quality) {
       // Special case when we are targeting the max allowed rate.
       if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -1145,16 +1098,15 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
   *top_index = active_worst_quality;
   *bottom_index = active_best_quality;
 
-  assert(*top_index <= rc->worst_quality &&
-         *top_index >= rc->best_quality);
+  assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
   assert(*bottom_index <= rc->worst_quality &&
          *bottom_index >= rc->best_quality);
   assert(q <= rc->worst_quality && q >= rc->best_quality);
   return q;
 }
 
-int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi,
-                             int *bottom_index, int *top_index) {
+int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi, int *bottom_index,
+                              int *top_index) {
   int q;
   if (cpi->oxcf.pass == 0) {
     if (cpi->oxcf.rc_mode == VPX_CBR)
@@ -1168,20 +1120,19 @@ int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi,
   return q;
 }
 
-void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi,
-                                      int frame_target,
-                                      int *frame_under_shoot_limit,
-                                      int *frame_over_shoot_limit) {
+void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, int frame_target,
+                                       int *frame_under_shoot_limit,
+                                       int *frame_over_shoot_limit) {
   if (cpi->oxcf.rc_mode == VPX_Q) {
     *frame_under_shoot_limit = 0;
-    *frame_over_shoot_limit  = INT_MAX;
+    *frame_over_shoot_limit = INT_MAX;
   } else {
     // For very small rate targets where the fractional adjustment
     // may be tiny make sure there is at least a minimum range.
     const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100;
     *frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0);
-    *frame_over_shoot_limit = VPXMIN(frame_target + tolerance + 200,
-                                     cpi->rc.max_frame_bandwidth);
+    *frame_over_shoot_limit =
+        VPXMIN(frame_target + tolerance + 200, cpi->rc.max_frame_bandwidth);
   }
 }
 
@@ -1194,12 +1145,12 @@ void vp10_rc_set_frame_target(VP10_COMP *cpi, int target) {
   // Modify frame size target when down-scaling.
   if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
       rc->frame_size_selector != UNSCALED)
-    rc->this_frame_target = (int)(rc->this_frame_target
-        * rate_thresh_mult[rc->frame_size_selector]);
+    rc->this_frame_target = (int)(rc->this_frame_target *
+                                  rate_thresh_mult[rc->frame_size_selector]);
 
   // Target rate per SB64 (including partial SB64s.
-  rc->sb64_target_rate = ((int64_t)rc->this_frame_target * 64 * 64) /
-                             (cm->width * cm->height);
+  rc->sb64_target_rate =
+      ((int64_t)rc->this_frame_target * 64 * 64) / (cm->width * cm->height);
 }
 
 static void update_alt_ref_frame_stats(VP10_COMP *cpi) {
@@ -1233,13 +1184,11 @@ static void update_golden_frame_stats(VP10_COMP *cpi) {
     }
 
     // Decrement count down till next gf
-    if (rc->frames_till_gf_update_due > 0)
-      rc->frames_till_gf_update_due--;
+    if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--;
 
   } else if (!cpi->refresh_alt_ref_frame) {
     // Decrement count down till next gf
-    if (rc->frames_till_gf_update_due > 0)
-      rc->frames_till_gf_update_due--;
+    if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--;
 
     rc->frames_since_golden++;
   }
@@ -1271,7 +1220,7 @@ void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
         !(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
       rc->last_q[INTER_FRAME] = qindex;
       rc->avg_frame_qindex[INTER_FRAME] =
-        ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
+          ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
       rc->ni_frames++;
       rc->tot_q += vp10_convert_qindex_to_q(qindex, cm->bit_depth);
       rc->avg_q = rc->tot_q / rc->ni_frames;
@@ -1287,15 +1236,13 @@ void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
   // If all mbs in this group are skipped only update if the Q value is
   // better than that already stored.
   // This is used to help set quality in forced key frames to reduce popping
-  if ((qindex < rc->last_boosted_qindex) ||
-      (cm->frame_type == KEY_FRAME) ||
+  if ((qindex < rc->last_boosted_qindex) || (cm->frame_type == KEY_FRAME) ||
       (!rc->constrained_gf_group &&
        (cpi->refresh_alt_ref_frame ||
         (cpi->refresh_golden_frame && !rc->is_src_frame_alt_ref)))) {
     rc->last_boosted_qindex = qindex;
   }
-  if (cm->frame_type == KEY_FRAME)
-    rc->last_kf_qindex = qindex;
+  if (cm->frame_type == KEY_FRAME) rc->last_kf_qindex = qindex;
 
   update_buffer_level(cpi, rc->projected_frame_size);
 
@@ -1326,8 +1273,7 @@ void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
     // Update the Golden frame stats as appropriate.
     update_golden_frame_stats(cpi);
 
-  if (cm->frame_type == KEY_FRAME)
-    rc->frames_since_key = 0;
+  if (cm->frame_type == KEY_FRAME) rc->frames_since_key = 0;
   if (cm->show_frame) {
     rc->frames_since_key++;
     rc->frames_to_key--;
@@ -1351,19 +1297,20 @@ void vp10_rc_postencode_update_drop_frame(VP10_COMP *cpi) {
 }
 
 // Use this macro to turn on/off use of alt-refs in one-pass mode.
-#define USE_ALTREF_FOR_ONE_PASS   1
+#define USE_ALTREF_FOR_ONE_PASS 1
 
 static int calc_pframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
   static const int af_ratio = 10;
   const RATE_CONTROL *const rc = &cpi->rc;
   int target;
 #if USE_ALTREF_FOR_ONE_PASS
-  target = (!rc->is_src_frame_alt_ref &&
-            (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) ?
-      (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) /
-      (rc->baseline_gf_interval + af_ratio - 1) :
-      (rc->avg_frame_bandwidth * rc->baseline_gf_interval) /
-      (rc->baseline_gf_interval + af_ratio - 1);
+  target =
+      (!rc->is_src_frame_alt_ref &&
+       (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))
+          ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) /
+                (rc->baseline_gf_interval + af_ratio - 1)
+          : (rc->avg_frame_bandwidth * rc->baseline_gf_interval) /
+                (rc->baseline_gf_interval + af_ratio - 1);
 #else
   target = rc->avg_frame_bandwidth;
 #endif
@@ -1383,13 +1330,11 @@ void vp10_rc_get_one_pass_vbr_params(VP10_COMP *cpi) {
   int target;
   // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
   if (!cpi->refresh_alt_ref_frame &&
-      (cm->current_video_frame == 0 ||
-       (cpi->frame_flags & FRAMEFLAGS_KEY) ||
-       rc->frames_to_key == 0 ||
-       (cpi->oxcf.auto_key && 0))) {
+      (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
+       rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) {
     cm->frame_type = KEY_FRAME;
-    rc->this_key_frame_forced = cm->current_video_frame != 0 &&
-                                rc->frames_to_key == 0;
+    rc->this_key_frame_forced =
+        cm->current_video_frame != 0 && rc->frames_to_key == 0;
     rc->frames_to_key = cpi->oxcf.key_freq;
     rc->kf_boost = DEFAULT_KF_BOOST;
     rc->source_alt_ref_active = 0;
@@ -1428,11 +1373,12 @@ static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
 
   if (oxcf->gf_cbr_boost_pct) {
     const int af_ratio_pct = oxcf->gf_cbr_boost_pct + 100;
-    target =  cpi->refresh_golden_frame ?
-      (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio_pct) /
-      (rc->baseline_gf_interval * 100 + af_ratio_pct - 100) :
-      (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) /
-      (rc->baseline_gf_interval * 100 + af_ratio_pct - 100);
+    target = cpi->refresh_golden_frame
+                 ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval *
+                    af_ratio_pct) /
+                       (rc->baseline_gf_interval * 100 + af_ratio_pct - 100)
+                 : (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) /
+                       (rc->baseline_gf_interval * 100 + af_ratio_pct - 100);
   } else {
     target = rc->avg_frame_bandwidth;
   }
@@ -1448,8 +1394,8 @@ static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
     target += (target * pct_high) / 200;
   }
   if (oxcf->rc_max_inter_bitrate_pct) {
-    const int max_rate = rc->avg_frame_bandwidth *
-                         oxcf->rc_max_inter_bitrate_pct / 100;
+    const int max_rate =
+        rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
     target = VPXMIN(target, max_rate);
   }
   return VPXMAX(min_frame_target, target);
@@ -1460,15 +1406,15 @@ static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
   int target;
   if (cpi->common.current_video_frame == 0) {
     target = ((rc->starting_buffer_level / 2) > INT_MAX)
-      ? INT_MAX : (int)(rc->starting_buffer_level / 2);
+                 ? INT_MAX
+                 : (int)(rc->starting_buffer_level / 2);
   } else {
     int kf_boost = 32;
     double framerate = cpi->framerate;
 
     kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16));
-    if (rc->frames_since_key <  framerate / 2) {
-      kf_boost = (int)(kf_boost * rc->frames_since_key /
-                       (framerate / 2));
+    if (rc->frames_since_key < framerate / 2) {
+      kf_boost = (int)(kf_boost * rc->frames_since_key / (framerate / 2));
     }
     target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4;
   }
@@ -1480,13 +1426,11 @@ void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) {
   RATE_CONTROL *const rc = &cpi->rc;
   int target;
   // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
-  if ((cm->current_video_frame == 0 ||
-      (cpi->frame_flags & FRAMEFLAGS_KEY) ||
-      rc->frames_to_key == 0 ||
-      (cpi->oxcf.auto_key && 0))) {
+  if ((cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
+       rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) {
     cm->frame_type = KEY_FRAME;
-    rc->this_key_frame_forced = cm->current_video_frame != 0 &&
-                                rc->frames_to_key == 0;
+    rc->this_key_frame_forced =
+        cm->current_video_frame != 0 && rc->frames_to_key == 0;
     rc->frames_to_key = cpi->oxcf.key_freq;
     rc->kf_boost = DEFAULT_KF_BOOST;
     rc->source_alt_ref_active = 0;
@@ -1525,7 +1469,7 @@ void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) {
 }
 
 int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
-                       vpx_bit_depth_t bit_depth) {
+                        vpx_bit_depth_t bit_depth) {
   int start_index = rc->worst_quality;
   int target_index = rc->worst_quality;
   int i;
@@ -1533,29 +1477,27 @@ int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
   // Convert the average q value to an index.
   for (i = rc->best_quality; i < rc->worst_quality; ++i) {
     start_index = i;
-    if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart)
-      break;
+    if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart) break;
   }
 
   // Convert the q target to an index
   for (i = rc->best_quality; i < rc->worst_quality; ++i) {
     target_index = i;
-    if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget)
-      break;
+    if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
   }
 
   return target_index - start_index;
 }
 
 int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
-                               int qindex, double rate_target_ratio,
-                               vpx_bit_depth_t bit_depth) {
+                                int qindex, double rate_target_ratio,
+                                vpx_bit_depth_t bit_depth) {
   int target_index = rc->worst_quality;
   int i;
 
   // Look up the current projected bits per block for the base index
-  const int base_bits_per_mb = vp10_rc_bits_per_mb(frame_type, qindex, 1.0,
-                                                  bit_depth);
+  const int base_bits_per_mb =
+      vp10_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
 
   // Find the target bits per mb based on the base value and given ratio.
   const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb);
@@ -1572,7 +1514,7 @@ int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
 }
 
 void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi,
-                                  RATE_CONTROL *const rc) {
+                                   RATE_CONTROL *const rc) {
   const VP10EncoderConfig *const oxcf = &cpi->oxcf;
 
   // Special case code for 1 pass fixed Q mode tests
@@ -1614,8 +1556,8 @@ void vp10_rc_update_framerate(VP10_COMP *cpi) {
   int vbr_max_bits;
 
   rc->avg_frame_bandwidth = (int)(oxcf->target_bandwidth / cpi->framerate);
-  rc->min_frame_bandwidth = (int)(rc->avg_frame_bandwidth *
-                                oxcf->two_pass_vbrmin_section / 100);
+  rc->min_frame_bandwidth =
+      (int)(rc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100);
 
   rc->min_frame_bandwidth =
       VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
@@ -1627,8 +1569,9 @@ void vp10_rc_update_framerate(VP10_COMP *cpi) {
   // a very high rate is given on the command line or the the rate cannnot
   // be acheived because of a user specificed max q (e.g. when the user
   // specifies lossless encode.
-  vbr_max_bits = (int)(((int64_t)rc->avg_frame_bandwidth *
-                     oxcf->two_pass_vbrmax_section) / 100);
+  vbr_max_bits =
+      (int)(((int64_t)rc->avg_frame_bandwidth * oxcf->two_pass_vbrmax_section) /
+            100);
   rc->max_frame_bandwidth =
       VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
 
@@ -1655,13 +1598,13 @@ static void vbr_rate_correction(VP10_COMP *cpi, int *this_frame_target) {
 
   // vbr_bits_off_target > 0 means we have extra bits to spend
   if (vbr_bits_off_target > 0) {
-    *this_frame_target +=
-      (vbr_bits_off_target > max_delta) ? max_delta
-                                        : (int)vbr_bits_off_target;
+    *this_frame_target += (vbr_bits_off_target > max_delta)
+                              ? max_delta
+                              : (int)vbr_bits_off_target;
   } else {
-    *this_frame_target -=
-      (vbr_bits_off_target < -max_delta) ? max_delta
-                                         : (int)-vbr_bits_off_target;
+    *this_frame_target -= (vbr_bits_off_target < -max_delta)
+                              ? max_delta
+                              : (int)-vbr_bits_off_target;
   }
 
   // Fast redistribution of bits arising from massive local undershoot.
@@ -1744,7 +1687,7 @@ int vp10_resize_one_pass_cbr(VP10_COMP *cpi) {
     cpi->resize_scale_num = 1;
     cpi->resize_scale_den = 2;
     tot_scale_change = (cpi->resize_scale_den * cpi->resize_scale_den) /
-        (cpi->resize_scale_num * cpi->resize_scale_num);
+                       (cpi->resize_scale_num * cpi->resize_scale_num);
     // Reset buffer level to optimal, update target size.
     rc->buffer_level = rc->optimal_buffer_level;
     rc->bits_off_target = rc->optimal_buffer_level;
@@ -1754,26 +1697,22 @@ int vp10_resize_one_pass_cbr(VP10_COMP *cpi) {
       vp10_cyclic_refresh_reset_resize(cpi);
     // Get the projected qindex, based on the scaled target frame size (scaled
     // so target_bits_per_mb in vp10_rc_regulate_q will be correct target).
-    target_bits_per_frame = (resize_now == 1) ?
-        rc->this_frame_target * tot_scale_change :
-        rc->this_frame_target / tot_scale_change;
+    target_bits_per_frame = (resize_now == 1)
+                                ? rc->this_frame_target * tot_scale_change
+                                : rc->this_frame_target / tot_scale_change;
     active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
-    qindex = vp10_rc_regulate_q(cpi,
-                               target_bits_per_frame,
-                               rc->best_quality,
-                               active_worst_quality);
+    qindex = vp10_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
+                                active_worst_quality);
     // If resize is down, check if projected q index is close to worst_quality,
     // and if so, reduce the rate correction factor (since likely can afford
     // lower q for resized frame).
-    if (resize_now == 1 &&
-        qindex > 90 * cpi->rc.worst_quality / 100) {
+    if (resize_now == 1 && qindex > 90 * cpi->rc.worst_quality / 100) {
       rc->rate_correction_factors[INTER_NORMAL] *= 0.85;
     }
     // If resize is back up, check if projected q index is too much above the
     // current base_qindex, and if so, reduce the rate correction factor
     // (since prefer to keep q for resized frame at least close to previous q).
-    if (resize_now == -1 &&
-       qindex > 130 * cm->base_qindex / 100) {
+    if (resize_now == -1 && qindex > 130 * cm->base_qindex / 100) {
       rc->rate_correction_factors[INTER_NORMAL] *= 0.9;
     }
   }
diff --git a/vp10/encoder/ratectrl.h b/vp10/encoder/ratectrl.h
index 0b9fd456df27a97429f353c798d32335f5797e03..f84df832de4709d0aa1b54bcf92e84b826fb72dc 100644
--- a/vp10/encoder/ratectrl.h
+++ b/vp10/encoder/ratectrl.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_RATECTRL_H_
 #define VP10_ENCODER_RATECTRL_H_
 
@@ -22,11 +21,11 @@ extern "C" {
 #endif
 
 // Bits Per MB at different Q (Multiplied by 512)
-#define BPER_MB_NORMBITS    9
+#define BPER_MB_NORMBITS 9
 
-#define MIN_GF_INTERVAL     4
-#define MAX_GF_INTERVAL     16
-#define FIXED_GF_INTERVAL   8    // Used in some testing modes only
+#define MIN_GF_INTERVAL 4
+#define MAX_GF_INTERVAL 16
+#define FIXED_GF_INTERVAL 8  // Used in some testing modes only
 
 typedef enum {
   INTER_NORMAL = 0,
@@ -49,25 +48,25 @@ typedef enum {
 // e.g. 24 => 16/24 = 2/3 of native size. The restriction to 1/16th is
 // intended to match the capabilities of the normative scaling filters,
 // giving precedence to the up-scaling accuracy.
-static const int frame_scale_factor[FRAME_SCALE_STEPS] = {16, 24};
+static const int frame_scale_factor[FRAME_SCALE_STEPS] = { 16, 24 };
 
 // Multiplier of the target rate to be used as threshold for triggering scaling.
-static const double rate_thresh_mult[FRAME_SCALE_STEPS] = {1.0, 2.0};
+static const double rate_thresh_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 };
 
 // Scale dependent Rate Correction Factor multipliers. Compensates for the
 // greater number of bits per pixel generated in down-scaled frames.
-static const double rcf_mult[FRAME_SCALE_STEPS] = {1.0, 2.0};
+static const double rcf_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 };
 
 typedef struct {
   // Rate targetting variables
-  int base_frame_target;           // A baseline frame target before adjustment
-                                   // for previous under or over shoot.
-  int this_frame_target;           // Actual frame target after rc adjustment.
+  int base_frame_target;  // A baseline frame target before adjustment
+                          // for previous under or over shoot.
+  int this_frame_target;  // Actual frame target after rc adjustment.
   int projected_frame_size;
   int sb64_target_rate;
-  int last_q[FRAME_TYPES];         // Separate values for Intra/Inter
-  int last_boosted_qindex;         // Last boosted GF/KF/ARF q
-  int last_kf_qindex;              // Q index of the last key frame coded.
+  int last_q[FRAME_TYPES];  // Separate values for Intra/Inter
+  int last_boosted_qindex;  // Last boosted GF/KF/ARF q
+  int last_kf_qindex;       // Q index of the last key frame coded.
 
   int gfu_boost;
   int last_boost;
@@ -149,17 +148,18 @@ struct VP10_COMP;
 struct VP10EncoderConfig;
 
 void vp10_rc_init(const struct VP10EncoderConfig *oxcf, int pass,
-                 RATE_CONTROL *rc);
+                  RATE_CONTROL *rc);
 
 int vp10_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
-                           double correction_factor,
-                           vpx_bit_depth_t bit_depth);
+                            double correction_factor,
+                            vpx_bit_depth_t bit_depth);
 
 double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth);
 
 void vp10_rc_init_minq_luts(void);
 
-int vp10_rc_get_default_min_gf_interval(int width, int height, double framerate);
+int vp10_rc_get_default_min_gf_interval(int width, int height,
+                                        double framerate);
 // Note vp10_rc_get_default_max_gf_interval() requires the min_gf_interval to
 // be passed in to ensure that the max_gf_interval returned is at least as bis
 // as that.
@@ -207,28 +207,27 @@ int vp10_rc_drop_frame(struct VP10_COMP *cpi);
 
 // Computes frame size bounds.
 void vp10_rc_compute_frame_size_bounds(const struct VP10_COMP *cpi,
-                                      int this_frame_target,
-                                      int *frame_under_shoot_limit,
-                                      int *frame_over_shoot_limit);
+                                       int this_frame_target,
+                                       int *frame_under_shoot_limit,
+                                       int *frame_over_shoot_limit);
 
 // Picks q and q bounds given the target for bits
-int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi,
-                             int *bottom_index,
-                             int *top_index);
+int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi, int *bottom_index,
+                              int *top_index);
 
 // Estimates q to achieve a target bits per frame
 int vp10_rc_regulate_q(const struct VP10_COMP *cpi, int target_bits_per_frame,
-                      int active_best_quality, int active_worst_quality);
+                       int active_best_quality, int active_worst_quality);
 
 // Estimates bits per mb for a given qindex and correction factor.
 int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
-                       double correction_factor, vpx_bit_depth_t bit_depth);
+                        double correction_factor, vpx_bit_depth_t bit_depth);
 
 // Clamping utilities for bitrate targets for iframes and pframes.
 int vp10_rc_clamp_iframe_target_size(const struct VP10_COMP *const cpi,
-                                    int target);
+                                     int target);
 int vp10_rc_clamp_pframe_target_size(const struct VP10_COMP *const cpi,
-                                    int target);
+                                     int target);
 // Utility to set frame_target into the RATE_CONTROL structure
 // This function is called only from the vp10_rc_get_..._params() functions.
 void vp10_rc_set_frame_target(struct VP10_COMP *cpi, int target);
@@ -236,20 +235,20 @@ void vp10_rc_set_frame_target(struct VP10_COMP *cpi, int target);
 // Computes a q delta (in "q index" terms) to get from a starting q value
 // to a target q value
 int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
-                       vpx_bit_depth_t bit_depth);
+                        vpx_bit_depth_t bit_depth);
 
 // Computes a q delta (in "q index" terms) to get from a starting q value
 // to a value that should equate to the given rate ratio.
 int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
-                               int qindex, double rate_target_ratio,
-                               vpx_bit_depth_t bit_depth);
+                                int qindex, double rate_target_ratio,
+                                vpx_bit_depth_t bit_depth);
 
 int vp10_frame_type_qdelta(const struct VP10_COMP *cpi, int rf_level, int q);
 
 void vp10_rc_update_framerate(struct VP10_COMP *cpi);
 
 void vp10_rc_set_gf_interval_range(const struct VP10_COMP *const cpi,
-                                  RATE_CONTROL *const rc);
+                                   RATE_CONTROL *const rc);
 
 void vp10_set_target_rate(struct VP10_COMP *cpi);
 
diff --git a/vp10/encoder/rd.c b/vp10/encoder/rd.c
index acc58e212fe997b1ee12cd38a52b8d1c2521841b..f72ebdb332551af7187289ca9c66166b6fa12efb 100644
--- a/vp10/encoder/rd.c
+++ b/vp10/encoder/rd.c
@@ -40,8 +40,8 @@
 #include "vp10/encoder/rd.h"
 #include "vp10/encoder/tokenize.h"
 
-#define RD_THRESH_POW      1.25
-#define RD_MULT_EPB_RATIO  64
+#define RD_THRESH_POW 1.25
+#define RD_MULT_EPB_RATIO 64
 
 // Factor to weigh the rate for switchable interp filters.
 #define SWITCHABLE_INTERP_RATE_FACTOR 1
@@ -73,26 +73,25 @@ static void fill_mode_costs(VP10_COMP *cpi) {
   for (i = 0; i < INTRA_MODES; ++i)
     for (j = 0; j < INTRA_MODES; ++j)
       vp10_cost_tokens(cpi->y_mode_costs[i][j], vp10_kf_y_mode_prob[i][j],
-                      vp10_intra_mode_tree);
+                       vp10_intra_mode_tree);
 
   vp10_cost_tokens(cpi->mbmode_cost, fc->y_mode_prob[1], vp10_intra_mode_tree);
   for (i = 0; i < INTRA_MODES; ++i)
-    vp10_cost_tokens(cpi->intra_uv_mode_cost[i],
-                     fc->uv_mode_prob[i], vp10_intra_mode_tree);
+    vp10_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
+                     vp10_intra_mode_tree);
 
   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
     vp10_cost_tokens(cpi->switchable_interp_costs[i],
-                    fc->switchable_interp_prob[i], vp10_switchable_interp_tree);
+                     fc->switchable_interp_prob[i],
+                     vp10_switchable_interp_tree);
 
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     for (j = 0; j < TX_TYPES; ++j)
       vp10_cost_tokens(cpi->intra_tx_type_costs[i][j],
-                       fc->intra_ext_tx_prob[i][j],
-                       vp10_ext_tx_tree);
+                       fc->intra_ext_tx_prob[i][j], vp10_ext_tx_tree);
   }
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-    vp10_cost_tokens(cpi->inter_tx_type_costs[i],
-                     fc->inter_ext_tx_prob[i],
+    vp10_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
                      vp10_ext_tx_tree);
   }
 }
@@ -108,10 +107,9 @@ static void fill_token_costs(vp10_coeff_cost *c,
           for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
             vpx_prob probs[ENTROPY_NODES];
             vp10_model_to_full_probs(p[t][i][j][k][l], probs);
-            vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs,
-                            vp10_coef_tree);
+            vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp10_coef_tree);
             vp10_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
-                                 vp10_coef_tree);
+                                  vp10_coef_tree);
             assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
                    c[t][i][j][k][1][l][EOB_TOKEN]);
           }
@@ -152,28 +150,19 @@ void vp10_init_me_luts(void) {
 #endif
 }
 
-static const int rd_boost_factor[16] = {
-  64, 32, 32, 32, 24, 16, 12, 12,
-  8, 8, 4, 4, 2, 2, 1, 0
-};
-static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = {
-  128, 144, 128, 128, 144
-};
+static const int rd_boost_factor[16] = { 64, 32, 32, 32, 24, 16, 12, 12,
+                                         8,  8,  4,  4,  2,  2,  1,  0 };
+static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { 128, 144, 128,
+                                                              128, 144 };
 
 int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
   const int64_t q = vp10_dc_quant(qindex, 0, cpi->common.bit_depth);
 #if CONFIG_VPX_HIGHBITDEPTH
   int64_t rdmult = 0;
   switch (cpi->common.bit_depth) {
-    case VPX_BITS_8:
-      rdmult = 88 * q * q / 24;
-      break;
-    case VPX_BITS_10:
-      rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4);
-      break;
-    case VPX_BITS_12:
-      rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8);
-      break;
+    case VPX_BITS_8: rdmult = 88 * q * q / 24; break;
+    case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break;
+    case VPX_BITS_12: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); break;
     default:
       assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
       return -1;
@@ -189,8 +178,7 @@ int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
     rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7;
     rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7);
   }
-  if (rdmult < 1)
-    rdmult = 1;
+  if (rdmult < 1) rdmult = 1;
   return (int)rdmult;
 }
 
@@ -198,21 +186,15 @@ static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) {
   double q;
 #if CONFIG_VPX_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8:
-      q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
-      break;
-    case VPX_BITS_10:
-      q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0;
-      break;
-    case VPX_BITS_12:
-      q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0;
-      break;
+    case VPX_BITS_8: q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
+    case VPX_BITS_10: q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
+    case VPX_BITS_12: q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
     default:
       assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
       return -1;
   }
 #else
-  (void) bit_depth;
+  (void)bit_depth;
   q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   // TODO(debargha): Adjust the function below.
@@ -250,7 +232,8 @@ static void set_block_thresholds(const VP10_COMMON *cm, RD_OPT *rd) {
   for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) {
     const int qindex =
         clamp(vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
-              cm->y_dc_delta_q, 0, MAXQ);
+                  cm->y_dc_delta_q,
+              0, MAXQ);
     const int q = compute_rd_thresh_factor(qindex, cm->bit_depth);
 
     for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) {
@@ -261,10 +244,9 @@ static void set_block_thresholds(const VP10_COMMON *cm, RD_OPT *rd) {
 
       if (bsize >= BLOCK_8X8) {
         for (i = 0; i < MAX_MODES; ++i)
-          rd->threshes[segment_id][bsize][i] =
-              rd->thresh_mult[i] < thresh_max
-                  ? rd->thresh_mult[i] * t / 4
-                  : INT_MAX;
+          rd->threshes[segment_id][bsize][i] = rd->thresh_mult[i] < thresh_max
+                                                   ? rd->thresh_mult[i] * t / 4
+                                                   : INT_MAX;
       } else {
         for (i = 0; i < MAX_REFS; ++i)
           rd->threshes[segment_id][bsize][i] =
@@ -291,7 +273,9 @@ void vp10_initialize_rd_consts(VP10_COMP *cpi) {
   x->errorperbit += (x->errorperbit == 0);
 
   x->select_tx_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
-                       cm->frame_type != KEY_FRAME) ? 0 : 1;
+                       cm->frame_type != KEY_FRAME)
+                          ? 0
+                          : 1;
 
   set_block_thresholds(cm, rd);
 
@@ -301,20 +285,20 @@ void vp10_initialize_rd_consts(VP10_COMP *cpi) {
       cm->frame_type == KEY_FRAME) {
     for (i = 0; i < PARTITION_CONTEXTS; ++i)
       vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
-                      vp10_partition_tree);
+                       vp10_partition_tree);
   }
 
   fill_mode_costs(cpi);
 
   if (!frame_is_intra_only(cm)) {
-    vp10_build_nmv_cost_table(x->nmvjointcost,
-                             cm->allow_high_precision_mv ? x->nmvcost_hp
-                                                         : x->nmvcost,
-                             &cm->fc->nmvc, cm->allow_high_precision_mv);
+    vp10_build_nmv_cost_table(
+        x->nmvjointcost,
+        cm->allow_high_precision_mv ? x->nmvcost_hp : x->nmvcost, &cm->fc->nmvc,
+        cm->allow_high_precision_mv);
 
     for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
       vp10_cost_tokens((int *)cpi->inter_mode_cost[i],
-                      cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
+                       cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
   }
 }
 
@@ -332,19 +316,15 @@ static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) {
   // where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance),
   // and H(x) is the binary entropy function.
   static const int rate_tab_q10[] = {
-    65536,  6086,  5574,  5275,  5063,  4899,  4764,  4651,
-     4553,  4389,  4255,  4142,  4044,  3958,  3881,  3811,
-     3748,  3635,  3538,  3453,  3376,  3307,  3244,  3186,
-     3133,  3037,  2952,  2877,  2809,  2747,  2690,  2638,
-     2589,  2501,  2423,  2353,  2290,  2232,  2179,  2130,
-     2084,  2001,  1928,  1862,  1802,  1748,  1698,  1651,
-     1608,  1530,  1460,  1398,  1342,  1290,  1243,  1199,
-     1159,  1086,  1021,   963,   911,   864,   821,   781,
-      745,   680,   623,   574,   530,   490,   455,   424,
-      395,   345,   304,   269,   239,   213,   190,   171,
-      154,   126,   104,    87,    73,    61,    52,    44,
-       38,    28,    21,    16,    12,    10,     8,     6,
-        5,     3,     2,     1,     1,     1,     0,     0,
+    65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651, 4553, 4389, 4255, 4142,
+    4044,  3958, 3881, 3811, 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186,
+    3133,  3037, 2952, 2877, 2809, 2747, 2690, 2638, 2589, 2501, 2423, 2353,
+    2290,  2232, 2179, 2130, 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651,
+    1608,  1530, 1460, 1398, 1342, 1290, 1243, 1199, 1159, 1086, 1021, 963,
+    911,   864,  821,  781,  745,  680,  623,  574,  530,  490,  455,  424,
+    395,   345,  304,  269,  239,  213,  190,  171,  154,  126,  104,  87,
+    73,    61,   52,   44,   38,   28,   21,   16,   12,   10,   8,    6,
+    5,     3,    2,    1,    1,    1,    0,    0,
   };
   // Normalized distortion:
   // This table models the normalized distortion for a Laplacian source
@@ -354,34 +334,29 @@ static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) {
   // where x = qpstep / sqrt(variance).
   // Note the actual distortion is Dn * variance.
   static const int dist_tab_q10[] = {
-       0,     0,     1,     1,     1,     2,     2,     2,
-       3,     3,     4,     5,     5,     6,     7,     7,
-       8,     9,    11,    12,    13,    15,    16,    17,
-      18,    21,    24,    26,    29,    31,    34,    36,
-      39,    44,    49,    54,    59,    64,    69,    73,
-      78,    88,    97,   106,   115,   124,   133,   142,
-     151,   167,   184,   200,   215,   231,   245,   260,
-     274,   301,   327,   351,   375,   397,   418,   439,
-     458,   495,   528,   559,   587,   613,   637,   659,
-     680,   717,   749,   777,   801,   823,   842,   859,
-     874,   899,   919,   936,   949,   960,   969,   977,
-     983,   994,  1001,  1006,  1010,  1013,  1015,  1017,
-    1018,  1020,  1022,  1022,  1023,  1023,  1023,  1024,
+    0,    0,    1,    1,    1,    2,    2,    2,    3,    3,    4,    5,
+    5,    6,    7,    7,    8,    9,    11,   12,   13,   15,   16,   17,
+    18,   21,   24,   26,   29,   31,   34,   36,   39,   44,   49,   54,
+    59,   64,   69,   73,   78,   88,   97,   106,  115,  124,  133,  142,
+    151,  167,  184,  200,  215,  231,  245,  260,  274,  301,  327,  351,
+    375,  397,  418,  439,  458,  495,  528,  559,  587,  613,  637,  659,
+    680,  717,  749,  777,  801,  823,  842,  859,  874,  899,  919,  936,
+    949,  960,  969,  977,  983,  994,  1001, 1006, 1010, 1013, 1015, 1017,
+    1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024,
   };
   static const int xsq_iq_q10[] = {
-         0,      4,      8,     12,     16,     20,     24,     28,
-        32,     40,     48,     56,     64,     72,     80,     88,
-        96,    112,    128,    144,    160,    176,    192,    208,
-       224,    256,    288,    320,    352,    384,    416,    448,
-       480,    544,    608,    672,    736,    800,    864,    928,
-       992,   1120,   1248,   1376,   1504,   1632,   1760,   1888,
-      2016,   2272,   2528,   2784,   3040,   3296,   3552,   3808,
-      4064,   4576,   5088,   5600,   6112,   6624,   7136,   7648,
-      8160,   9184,  10208,  11232,  12256,  13280,  14304,  15328,
-     16352,  18400,  20448,  22496,  24544,  26592,  28640,  30688,
-     32736,  36832,  40928,  45024,  49120,  53216,  57312,  61408,
-     65504,  73696,  81888,  90080,  98272, 106464, 114656, 122848,
-    131040, 147424, 163808, 180192, 196576, 212960, 229344, 245728,
+    0,      4,      8,      12,     16,     20,     24,     28,     32,
+    40,     48,     56,     64,     72,     80,     88,     96,     112,
+    128,    144,    160,    176,    192,    208,    224,    256,    288,
+    320,    352,    384,    416,    448,    480,    544,    608,    672,
+    736,    800,    864,    928,    992,    1120,   1248,   1376,   1504,
+    1632,   1760,   1888,   2016,   2272,   2528,   2784,   3040,   3296,
+    3552,   3808,   4064,   4576,   5088,   5600,   6112,   6624,   7136,
+    7648,   8160,   9184,   10208,  11232,  12256,  13280,  14304,  15328,
+    16352,  18400,  20448,  22496,  24544,  26592,  28640,  30688,  32736,
+    36832,  40928,  45024,  49120,  53216,  57312,  61408,  65504,  73696,
+    81888,  90080,  98272,  106464, 114656, 122848, 131040, 147424, 163808,
+    180192, 196576, 212960, 229344, 245728,
   };
   const int tmp = (xsq_q10 >> 2) + 8;
   const int k = get_msb(tmp) - 3;
@@ -394,8 +369,8 @@ static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) {
 }
 
 void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
-                                  unsigned int qstep, int *rate,
-                                  int64_t *dist) {
+                                   unsigned int qstep, int *rate,
+                                   int64_t *dist) {
   // This function models the rate and distortion for a Laplacian
   // source with given variance when quantized with a uniform quantizer
   // with given stepsize. The closed form expressions are in:
@@ -418,9 +393,9 @@ void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
 }
 
 void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
-                              const struct macroblockd_plane *pd,
-                              ENTROPY_CONTEXT t_above[16],
-                              ENTROPY_CONTEXT t_left[16]) {
+                               const struct macroblockd_plane *pd,
+                               ENTROPY_CONTEXT t_above[16],
+                               ENTROPY_CONTEXT t_left[16]) {
   const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
   const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
   const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
@@ -451,15 +426,12 @@ void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
       for (i = 0; i < num_4x4_h; i += 8)
         t_left[i] = !!*(const uint64_t *)&left[i];
       break;
-    default:
-      assert(0 && "Invalid transform size.");
-      break;
+    default: assert(0 && "Invalid transform size."); break;
   }
 }
 
-void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x,
-                 uint8_t *ref_y_buffer, int ref_y_stride,
-                 int ref_frame, BLOCK_SIZE block_size) {
+void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+                  int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
   int i;
   int zero_seen = 0;
   int best_index = 0;
@@ -469,9 +441,9 @@ void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x,
   int near_same_nearest;
   uint8_t *src_y_ptr = x->plane[0].src.buf;
   uint8_t *ref_y_ptr;
-  const int num_mv_refs = MAX_MV_REF_CANDIDATES +
-                    (cpi->sf.adaptive_motion_search &&
-                     block_size < x->max_partition_size);
+  const int num_mv_refs =
+      MAX_MV_REF_CANDIDATES +
+      (cpi->sf.adaptive_motion_search && block_size < x->max_partition_size);
 
   MV pred_mv[3];
   pred_mv[0] = x->mbmi_ext->ref_mvs[ref_frame][0].as_mv;
@@ -479,25 +451,22 @@ void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x,
   pred_mv[2] = x->pred_mv[ref_frame];
   assert(num_mv_refs <= (int)(sizeof(pred_mv) / sizeof(pred_mv[0])));
 
-  near_same_nearest =
-      x->mbmi_ext->ref_mvs[ref_frame][0].as_int ==
-          x->mbmi_ext->ref_mvs[ref_frame][1].as_int;
+  near_same_nearest = x->mbmi_ext->ref_mvs[ref_frame][0].as_int ==
+                      x->mbmi_ext->ref_mvs[ref_frame][1].as_int;
   // Get the sad for each candidate reference mv.
   for (i = 0; i < num_mv_refs; ++i) {
     const MV *this_mv = &pred_mv[i];
     int fp_row, fp_col;
 
-    if (i == 1 && near_same_nearest)
-      continue;
+    if (i == 1 && near_same_nearest) continue;
     fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3;
     fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3;
     max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3);
 
-    if (fp_row ==0 && fp_col == 0 && zero_seen)
-      continue;
-    zero_seen |= (fp_row ==0 && fp_col == 0);
+    if (fp_row == 0 && fp_col == 0 && zero_seen) continue;
+    zero_seen |= (fp_row == 0 && fp_col == 0);
 
-    ref_y_ptr =&ref_y_buffer[ref_y_stride * fp_row + fp_col];
+    ref_y_ptr = &ref_y_buffer[ref_y_stride * fp_row + fp_col];
     // Find sad for current vector.
     this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride,
                                            ref_y_ptr, ref_y_stride);
@@ -515,11 +484,10 @@ void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x,
 }
 
 void vp10_setup_pred_block(const MACROBLOCKD *xd,
-                          struct buf_2d dst[MAX_MB_PLANE],
-                          const YV12_BUFFER_CONFIG *src,
-                          int mi_row, int mi_col,
-                          const struct scale_factors *scale,
-                          const struct scale_factors *scale_uv) {
+                           struct buf_2d dst[MAX_MB_PLANE],
+                           const YV12_BUFFER_CONFIG *src, int mi_row,
+                           int mi_col, const struct scale_factors *scale,
+                           const struct scale_factors *scale_uv) {
   int i;
 
   dst[0].buf = src->y_buffer;
@@ -530,33 +498,33 @@ void vp10_setup_pred_block(const MACROBLOCKD *xd,
 
   for (i = 0; i < MAX_MB_PLANE; ++i) {
     setup_pred_plane(dst + i, dst[i].buf, dst[i].stride, mi_row, mi_col,
-                     i ? scale_uv : scale,
-                     xd->plane[i].subsampling_x, xd->plane[i].subsampling_y);
+                     i ? scale_uv : scale, xd->plane[i].subsampling_x,
+                     xd->plane[i].subsampling_y);
   }
 }
 
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize,
-                            int raster_block, int stride) {
+int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+                             int stride) {
   const int bw = b_width_log2_lookup[plane_bsize];
   const int y = 4 * (raster_block >> bw);
   const int x = 4 * (raster_block & ((1 << bw) - 1));
   return y * stride + x;
 }
 
-int16_t* vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
-                                       int raster_block, int16_t *base) {
+int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+                                        int raster_block, int16_t *base) {
   const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
   return base + vp10_raster_block_offset(plane_bsize, raster_block, stride);
 }
 
 YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const VP10_COMP *cpi,
-                                             int ref_frame) {
+                                              int ref_frame) {
   const VP10_COMMON *const cm = &cpi->common;
   const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
   const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame);
-  return
-      (scaled_idx != ref_idx && scaled_idx != INVALID_IDX) ?
-          &cm->buffer_pool->frame_bufs[scaled_idx].buf : NULL;
+  return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX)
+             ? &cm->buffer_pool->frame_bufs[scaled_idx].buf
+             : NULL;
 }
 
 int vp10_get_switchable_rate(const VP10_COMP *cpi,
@@ -564,7 +532,7 @@ int vp10_get_switchable_rate(const VP10_COMP *cpi,
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const int ctx = vp10_get_pred_context_switchable_interp(xd);
   return SWITCHABLE_INTERP_RATE_FACTOR *
-             cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
+         cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
 }
 
 void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
@@ -613,7 +581,7 @@ void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
 
   rd->thresh_mult[THR_H_PRED] += 2000;
   rd->thresh_mult[THR_V_PRED] += 2000;
-  rd->thresh_mult[THR_D45_PRED ] += 2500;
+  rd->thresh_mult[THR_D45_PRED] += 2500;
   rd->thresh_mult[THR_D135_PRED] += 2500;
   rd->thresh_mult[THR_D117_PRED] += 2500;
   rd->thresh_mult[THR_D153_PRED] += 2500;
@@ -622,16 +590,17 @@ void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
 }
 
 void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) {
-  static const int thresh_mult[2][MAX_REFS] =
-      {{2500, 2500, 2500, 4500, 4500, 2500},
-       {2000, 2000, 2000, 4000, 4000, 2000}};
+  static const int thresh_mult[2][MAX_REFS] = {
+    { 2500, 2500, 2500, 4500, 4500, 2500 },
+    { 2000, 2000, 2000, 4000, 4000, 2000 }
+  };
   RD_OPT *const rd = &cpi->rd;
   const int idx = cpi->oxcf.mode == BEST;
   memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
 }
 
 void vp10_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
-                               int bsize, int best_mode_index) {
+                                int bsize, int best_mode_index) {
   if (rd_thresh > 0) {
     const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES;
     int mode;
@@ -652,16 +621,13 @@ void vp10_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
 }
 
 int vp10_get_intra_cost_penalty(int qindex, int qdelta,
-                               vpx_bit_depth_t bit_depth) {
+                                vpx_bit_depth_t bit_depth) {
   const int q = vp10_dc_quant(qindex, qdelta, bit_depth);
 #if CONFIG_VPX_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8:
-      return 20 * q;
-    case VPX_BITS_10:
-      return 5 * q;
-    case VPX_BITS_12:
-      return ROUND_POWER_OF_TWO(5 * q, 2);
+    case VPX_BITS_8: return 20 * q;
+    case VPX_BITS_10: return 5 * q;
+    case VPX_BITS_12: return ROUND_POWER_OF_TWO(5 * q, 2);
     default:
       assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
       return -1;
@@ -670,4 +636,3 @@ int vp10_get_intra_cost_penalty(int qindex, int qdelta,
   return 20 * q;
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 }
-
diff --git a/vp10/encoder/rd.h b/vp10/encoder/rd.h
index cd58bf84f2c91ea2f58a02210e478c260895c2ee..f9f811cdd98a3f30e5734ed2529cfd30a3a1828b 100644
--- a/vp10/encoder/rd.h
+++ b/vp10/encoder/rd.h
@@ -22,22 +22,21 @@
 extern "C" {
 #endif
 
-#define RDDIV_BITS          7
+#define RDDIV_BITS 7
 
-#define RDCOST(RM, DM, R, D) \
-  (((128 + ((int64_t)R) * (RM)) >> 8) + (D << DM))
-#define QIDX_SKIP_THRESH     115
+#define RDCOST(RM, DM, R, D) (((128 + ((int64_t)R) * (RM)) >> 8) + (D << DM))
+#define QIDX_SKIP_THRESH 115
 
-#define MV_COST_WEIGHT      108
-#define MV_COST_WEIGHT_SUB  120
+#define MV_COST_WEIGHT 108
+#define MV_COST_WEIGHT_SUB 120
 
 #define INVALID_MV 0x80008000
 
 #define MAX_MODES 30
-#define MAX_REFS  6
+#define MAX_REFS 6
 
 #define RD_THRESH_MAX_FACT 64
-#define RD_THRESH_INC      1
+#define RD_THRESH_INC 1
 
 // This enumerator type needs to be kept aligned with the mode order in
 // const MODE_DEFINITION vp10_mode_order[MAX_MODES] used in the rd code.
@@ -130,57 +129,55 @@ int vp10_compute_rd_mult(const struct VP10_COMP *cpi, int qindex);
 
 void vp10_initialize_rd_consts(struct VP10_COMP *cpi);
 
-void vp10_initialize_me_consts(struct VP10_COMP *cpi,
-                               MACROBLOCK *x, int qindex);
+void vp10_initialize_me_consts(struct VP10_COMP *cpi, MACROBLOCK *x,
+                               int qindex);
 
 void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n,
-                                  unsigned int qstep, int *rate,
-                                  int64_t *dist);
+                                   unsigned int qstep, int *rate,
+                                   int64_t *dist);
 
 int vp10_get_switchable_rate(const struct VP10_COMP *cpi,
-                            const MACROBLOCKD *const xd);
+                             const MACROBLOCKD *const xd);
 
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize,
-                            int raster_block, int stride);
+int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+                             int stride);
 
-int16_t* vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
-                                       int raster_block, int16_t *base);
+int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+                                        int raster_block, int16_t *base);
 
 YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const struct VP10_COMP *cpi,
-                                             int ref_frame);
+                                              int ref_frame);
 
 void vp10_init_me_luts(void);
 
 void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
-                              const struct macroblockd_plane *pd,
-                              ENTROPY_CONTEXT t_above[16],
-                              ENTROPY_CONTEXT t_left[16]);
+                               const struct macroblockd_plane *pd,
+                               ENTROPY_CONTEXT t_above[16],
+                               ENTROPY_CONTEXT t_left[16]);
 
 void vp10_set_rd_speed_thresholds(struct VP10_COMP *cpi);
 
 void vp10_set_rd_speed_thresholds_sub8x8(struct VP10_COMP *cpi);
 
 void vp10_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh,
-                               int bsize, int best_mode_index);
+                                int bsize, int best_mode_index);
 
 static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
                                       int thresh_fact) {
-    return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
+  return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
 }
 
-void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x,
-                 uint8_t *ref_y_buffer, int ref_y_stride,
-                 int ref_frame, BLOCK_SIZE block_size);
+void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+                  int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
 
 void vp10_setup_pred_block(const MACROBLOCKD *xd,
-                          struct buf_2d dst[MAX_MB_PLANE],
-                          const YV12_BUFFER_CONFIG *src,
-                          int mi_row, int mi_col,
-                          const struct scale_factors *scale,
-                          const struct scale_factors *scale_uv);
+                           struct buf_2d dst[MAX_MB_PLANE],
+                           const YV12_BUFFER_CONFIG *src, int mi_row,
+                           int mi_col, const struct scale_factors *scale,
+                           const struct scale_factors *scale_uv);
 
 int vp10_get_intra_cost_penalty(int qindex, int qdelta,
-                               vpx_bit_depth_t bit_depth);
+                                vpx_bit_depth_t bit_depth);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/encoder/rdopt.c b/vp10/encoder/rdopt.c
index 6b591618a2f0f23fddfe6a1ef199a3847bcde78e..ca71429e7daa336c037dd00dcb31f302690079f7 100644
--- a/vp10/encoder/rdopt.c
+++ b/vp10/encoder/rdopt.c
@@ -42,17 +42,17 @@
 #include "vp10/encoder/rdopt.h"
 #include "vp10/encoder/aq_variance.h"
 
-#define LAST_FRAME_MODE_MASK    ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \
-                                 (1 << INTRA_FRAME))
-#define GOLDEN_FRAME_MODE_MASK  ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | \
-                                 (1 << INTRA_FRAME))
-#define ALT_REF_MODE_MASK       ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | \
-                                 (1 << INTRA_FRAME))
+#define LAST_FRAME_MODE_MASK \
+  ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
+#define GOLDEN_FRAME_MODE_MASK \
+  ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
+#define ALT_REF_MODE_MASK \
+  ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | (1 << INTRA_FRAME))
 
-#define SECOND_REF_FRAME_MASK   ((1 << ALTREF_FRAME) | 0x01)
+#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
 
-#define MIN_EARLY_TERM_INDEX    3
-#define NEW_MV_DISCOUNT_FACTOR  8
+#define MIN_EARLY_TERM_INDEX 3
+#define NEW_MV_DISCOUNT_FACTOR 8
 
 const double ext_tx_th = 0.99;
 
@@ -61,9 +61,7 @@ typedef struct {
   MV_REFERENCE_FRAME ref_frame[2];
 } MODE_DEFINITION;
 
-typedef struct {
-  MV_REFERENCE_FRAME ref_frame[2];
-} REF_DEFINITION;
+typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION;
 
 struct rdcost_block_args {
   MACROBLOCK *x;
@@ -82,95 +80,91 @@ struct rdcost_block_args {
 
 #define LAST_NEW_MV_INDEX 6
 static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
-  {NEARESTMV, {LAST_FRAME,   NONE}},
-  {NEARESTMV, {ALTREF_FRAME, NONE}},
-  {NEARESTMV, {GOLDEN_FRAME, NONE}},
+  { NEARESTMV, { LAST_FRAME, NONE } },
+  { NEARESTMV, { ALTREF_FRAME, NONE } },
+  { NEARESTMV, { GOLDEN_FRAME, NONE } },
 
-  {DC_PRED,   {INTRA_FRAME,  NONE}},
+  { DC_PRED, { INTRA_FRAME, NONE } },
 
-  {NEWMV,     {LAST_FRAME,   NONE}},
-  {NEWMV,     {ALTREF_FRAME, NONE}},
-  {NEWMV,     {GOLDEN_FRAME, NONE}},
+  { NEWMV, { LAST_FRAME, NONE } },
+  { NEWMV, { ALTREF_FRAME, NONE } },
+  { NEWMV, { GOLDEN_FRAME, NONE } },
 
-  {NEARMV,    {LAST_FRAME,   NONE}},
-  {NEARMV,    {ALTREF_FRAME, NONE}},
-  {NEARMV,    {GOLDEN_FRAME, NONE}},
+  { NEARMV, { LAST_FRAME, NONE } },
+  { NEARMV, { ALTREF_FRAME, NONE } },
+  { NEARMV, { GOLDEN_FRAME, NONE } },
 
-  {ZEROMV,    {LAST_FRAME,   NONE}},
-  {ZEROMV,    {GOLDEN_FRAME, NONE}},
-  {ZEROMV,    {ALTREF_FRAME, NONE}},
+  { ZEROMV, { LAST_FRAME, NONE } },
+  { ZEROMV, { GOLDEN_FRAME, NONE } },
+  { ZEROMV, { ALTREF_FRAME, NONE } },
 
-  {NEARESTMV, {LAST_FRAME,   ALTREF_FRAME}},
-  {NEARESTMV, {GOLDEN_FRAME, ALTREF_FRAME}},
+  { NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
 
-  {TM_PRED,   {INTRA_FRAME,  NONE}},
+  { TM_PRED, { INTRA_FRAME, NONE } },
 
-  {NEARMV,    {LAST_FRAME,   ALTREF_FRAME}},
-  {NEWMV,     {LAST_FRAME,   ALTREF_FRAME}},
-  {NEARMV,    {GOLDEN_FRAME, ALTREF_FRAME}},
-  {NEWMV,     {GOLDEN_FRAME, ALTREF_FRAME}},
+  { NEARMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEWMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
 
-  {ZEROMV,    {LAST_FRAME,   ALTREF_FRAME}},
-  {ZEROMV,    {GOLDEN_FRAME, ALTREF_FRAME}},
+  { ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
+  { ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
 
-  {H_PRED,    {INTRA_FRAME,  NONE}},
-  {V_PRED,    {INTRA_FRAME,  NONE}},
-  {D135_PRED, {INTRA_FRAME,  NONE}},
-  {D207_PRED, {INTRA_FRAME,  NONE}},
-  {D153_PRED, {INTRA_FRAME,  NONE}},
-  {D63_PRED,  {INTRA_FRAME,  NONE}},
-  {D117_PRED, {INTRA_FRAME,  NONE}},
-  {D45_PRED,  {INTRA_FRAME,  NONE}},
+  { H_PRED, { INTRA_FRAME, NONE } },
+  { V_PRED, { INTRA_FRAME, NONE } },
+  { D135_PRED, { INTRA_FRAME, NONE } },
+  { D207_PRED, { INTRA_FRAME, NONE } },
+  { D153_PRED, { INTRA_FRAME, NONE } },
+  { D63_PRED, { INTRA_FRAME, NONE } },
+  { D117_PRED, { INTRA_FRAME, NONE } },
+  { D45_PRED, { INTRA_FRAME, NONE } },
 };
 
 static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
-  {{LAST_FRAME,   NONE}},
-  {{GOLDEN_FRAME, NONE}},
-  {{ALTREF_FRAME, NONE}},
-  {{LAST_FRAME,   ALTREF_FRAME}},
-  {{GOLDEN_FRAME, ALTREF_FRAME}},
-  {{INTRA_FRAME,  NONE}},
+  { { LAST_FRAME, NONE } },           { { GOLDEN_FRAME, NONE } },
+  { { ALTREF_FRAME, NONE } },         { { LAST_FRAME, ALTREF_FRAME } },
+  { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
 };
 
 static INLINE int write_uniform_cost(int n, int v) {
   int l = get_unsigned_bits(n), m = (1 << l) - n;
-  if (l == 0)
-    return 0;
+  if (l == 0) return 0;
   if (v < m)
     return (l - 1) * vp10_cost_bit(128, 0);
   else
     return l * vp10_cost_bit(128, 0);
 }
 
-static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
-                           int m, int n, int min_plane, int max_plane) {
+static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int m, int n,
+                           int min_plane, int max_plane) {
   int i;
 
   for (i = min_plane; i < max_plane; ++i) {
     struct macroblock_plane *const p = &x->plane[i];
     struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
 
-    p->coeff    = ctx->coeff_pbuf[i][m];
-    p->qcoeff   = ctx->qcoeff_pbuf[i][m];
+    p->coeff = ctx->coeff_pbuf[i][m];
+    p->qcoeff = ctx->qcoeff_pbuf[i][m];
     pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
-    p->eobs     = ctx->eobs_pbuf[i][m];
+    p->eobs = ctx->eobs_pbuf[i][m];
 
-    ctx->coeff_pbuf[i][m]   = ctx->coeff_pbuf[i][n];
-    ctx->qcoeff_pbuf[i][m]  = ctx->qcoeff_pbuf[i][n];
+    ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
+    ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
     ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
-    ctx->eobs_pbuf[i][m]    = ctx->eobs_pbuf[i][n];
+    ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
 
-    ctx->coeff_pbuf[i][n]   = p->coeff;
-    ctx->qcoeff_pbuf[i][n]  = p->qcoeff;
+    ctx->coeff_pbuf[i][n] = p->coeff;
+    ctx->qcoeff_pbuf[i][n] = p->qcoeff;
     ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
-    ctx->eobs_pbuf[i][n]    = p->eobs;
+    ctx->eobs_pbuf[i][n] = p->eobs;
   }
 }
 
-static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize,
-                            MACROBLOCK *x, MACROBLOCKD *xd,
-                            int *out_rate_sum, int64_t *out_dist_sum,
-                            int *skip_txfm_sb, int64_t *skip_sse_sb) {
+static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
+                            MACROBLOCKD *xd, int *out_rate_sum,
+                            int64_t *out_dist_sum, int *skip_txfm_sb,
+                            int64_t *skip_sse_sb) {
   // Note our transform coeffs are 8 times an orthogonal transform.
   // Hence quantizer step is also 8 times. To get effective quantizer
   // we need to divide by 8 before sending to modeling function.
@@ -188,10 +182,9 @@ static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize,
   int64_t dist;
   const int dequant_shift =
 #if CONFIG_VPX_HIGHBITDEPTH
-      (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
-          xd->bd - 5 :
+      (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
 #endif  // CONFIG_VPX_HIGHBITDEPTH
-          3;
+                                                    3;
 
   x->pred_sse[ref] = 0;
 
@@ -222,8 +215,8 @@ static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize,
         int block_idx = (idy << 1) + idx;
         int low_err_skip = 0;
 
-        var = cpi->fn_ptr[unit_size].vf(src, p->src.stride,
-                                        dst, pd->dst.stride, &sse);
+        var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, dst, pd->dst.stride,
+                                        &sse);
         x->bsse[(i << 2) + block_idx] = sse;
         sum_sse += sse;
 
@@ -243,11 +236,9 @@ static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize,
           }
         }
 
-        if (skip_flag && !low_err_skip)
-          skip_flag = 0;
+        if (skip_flag && !low_err_skip) skip_flag = 0;
 
-        if (i == 0)
-          x->pred_sse[ref] += sse;
+        if (i == 0) x->pred_sse[ref] += sse;
       }
     }
 
@@ -268,8 +259,8 @@ static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize,
       dist_sum += dist;
     } else {
       vp10_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
-                                   pd->dequant[1] >> dequant_shift,
-                                   &rate, &dist);
+                                    pd->dequant[1] >> dequant_shift, &rate,
+                                    &dist);
       rate_sum += rate;
       dist_sum += dist;
     }
@@ -282,13 +273,13 @@ static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize,
 }
 
 int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
-                          intptr_t block_size, int64_t *ssz) {
+                           intptr_t block_size, int64_t *ssz) {
   int i;
   int64_t error = 0, sqcoeff = 0;
 
   for (i = 0; i < block_size; i++) {
     const int diff = coeff[i] - dqcoeff[i];
-    error +=  diff * diff;
+    error += diff * diff;
     sqcoeff += coeff[i] * coeff[i];
   }
 
@@ -297,13 +288,13 @@ int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
 }
 
 int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
-                             int block_size) {
+                              int block_size) {
   int i;
   int64_t error = 0;
 
   for (i = 0; i < block_size; i++) {
     const int diff = coeff[i] - dqcoeff[i];
-    error +=  diff * diff;
+    error += diff * diff;
   }
 
   return error;
@@ -311,9 +302,8 @@ int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
 
 #if CONFIG_VPX_HIGHBITDEPTH
 int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
-                                 const tran_low_t *dqcoeff,
-                                 intptr_t block_size,
-                                 int64_t *ssz, int bd) {
+                                  const tran_low_t *dqcoeff,
+                                  intptr_t block_size, int64_t *ssz, int bd) {
   int i;
   int64_t error = 0, sqcoeff = 0;
   int shift = 2 * (bd - 8);
@@ -321,7 +311,7 @@ int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
 
   for (i = 0; i < block_size; i++) {
     const int64_t diff = coeff[i] - dqcoeff[i];
-    error +=  diff * diff;
+    error += diff * diff;
     sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
   }
   assert(error >= 0 && sqcoeff >= 0);
@@ -339,17 +329,14 @@ int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
  * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
  * were non-zero). */
 static const int16_t band_counts[TX_SIZES][8] = {
-  { 1, 2, 3, 4,  3,   16 - 13, 0 },
-  { 1, 2, 3, 4, 11,   64 - 21, 0 },
-  { 1, 2, 3, 4, 11,  256 - 21, 0 },
+  { 1, 2, 3, 4, 3, 16 - 13, 0 },
+  { 1, 2, 3, 4, 11, 64 - 21, 0 },
+  { 1, 2, 3, 4, 11, 256 - 21, 0 },
   { 1, 2, 3, 4, 11, 1024 - 21, 0 },
 };
-static int cost_coeffs(MACROBLOCK *x,
-                       int plane, int block,
-                       ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
-                       TX_SIZE tx_size,
-                       const int16_t *scan, const int16_t *nb,
-                       int use_fast_coef_costing) {
+static int cost_coeffs(MACROBLOCK *x, int plane, int block, ENTROPY_CONTEXT *A,
+                       ENTROPY_CONTEXT *L, TX_SIZE tx_size, const int16_t *scan,
+                       const int16_t *nb, int use_fast_coef_costing) {
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   const struct macroblock_plane *p = &x->plane[plane];
@@ -358,8 +345,8 @@ static int cost_coeffs(MACROBLOCK *x,
   const int16_t *band_count = &band_counts[tx_size][1];
   const int eob = p->eobs[block];
   const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
-  unsigned int (*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
-                   x->token_costs[tx_size][type][is_inter_block(mbmi)];
+  unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
+      x->token_costs[tx_size][type][is_inter_block(mbmi)];
   uint8_t token_cache[32 * 32];
   int pt = combine_entropy_contexts(*A, *L);
   int c, cost;
@@ -386,7 +373,7 @@ static int cost_coeffs(MACROBLOCK *x,
     EXTRABIT e;
     vp10_get_token_extra(v, &prev_t, &e);
     cost = (*token_costs)[0][pt][prev_t] +
-        vp10_get_cost(prev_t, e, cat6_high_cost);
+           vp10_get_cost(prev_t, e, cat6_high_cost);
 
     token_cache[0] = vp10_pt_energy_class[prev_t];
     ++token_costs;
@@ -400,11 +387,11 @@ static int cost_coeffs(MACROBLOCK *x,
       vp10_get_token_extra(v, &t, &e);
       if (use_fast_coef_costing) {
         cost += (*token_costs)[!prev_t][!prev_t][t] +
-            vp10_get_cost(t, e, cat6_high_cost);
+                vp10_get_cost(t, e, cat6_high_cost);
       } else {
         pt = get_coef_context(nb, token_cache, c);
         cost += (*token_costs)[!prev_t][pt][t] +
-            vp10_get_cost(t, e, cat6_high_cost);
+                vp10_get_cost(t, e, cat6_high_cost);
         token_cache[rc] = vp10_pt_energy_class[t];
       }
       prev_t = t;
@@ -434,7 +421,7 @@ static int cost_coeffs(MACROBLOCK *x,
 static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
                        int64_t *out_dist, int64_t *out_sse) {
   const int ss_txfrm_size = tx_size << 1;
-  MACROBLOCKD* const xd = &x->e_mbd;
+  MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &xd->plane[plane];
   int64_t this_sse;
@@ -444,25 +431,24 @@ static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
 #if CONFIG_VPX_HIGHBITDEPTH
   const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
   *out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
-                                     &this_sse, bd) >> shift;
+                                      &this_sse, bd) >>
+              shift;
 #else
-  *out_dist = vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
-                              &this_sse) >> shift;
+  *out_dist =
+      vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >> shift;
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   *out_sse = this_sse >> shift;
 }
 
 static int rate_block(int plane, int block, int blk_row, int blk_col,
-                      TX_SIZE tx_size, struct rdcost_block_args* args) {
+                      TX_SIZE tx_size, struct rdcost_block_args *args) {
   return cost_coeffs(args->x, plane, block, args->t_above + blk_col,
-                     args->t_left + blk_row, tx_size,
-                     args->so->scan, args->so->neighbors,
-                     args->use_fast_coef_costing);
+                     args->t_left + blk_row, tx_size, args->so->scan,
+                     args->so->neighbors, args->use_fast_coef_costing);
 }
 
 static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
-                          BLOCK_SIZE plane_bsize,
-                          TX_SIZE tx_size, void *arg) {
+                          BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
   struct rdcost_block_args *args = arg;
   MACROBLOCK *const x = args->x;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -472,29 +458,27 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
   int64_t dist;
   int64_t sse;
 
-  if (args->exit_early)
-    return;
+  if (args->exit_early) return;
 
   if (!is_inter_block(mbmi)) {
-    struct encode_b_args arg = {x, NULL, &mbmi->skip};
-    vp10_encode_block_intra(plane, block, blk_row, blk_col,
-                            plane_bsize, tx_size, &arg);
+    struct encode_b_args arg = { x, NULL, &mbmi->skip };
+    vp10_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize,
+                            tx_size, &arg);
     dist_block(x, plane, block, tx_size, &dist, &sse);
   } else if (max_txsize_lookup[plane_bsize] == tx_size) {
     if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
         SKIP_TXFM_NONE) {
       // full forward transform and quantization
-      vp10_xform_quant(x, plane, block, blk_row, blk_col,
-                       plane_bsize, tx_size);
+      vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
       dist_block(x, plane, block, tx_size, &dist, &sse);
     } else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
                SKIP_TXFM_AC_ONLY) {
       // compute DC coefficient
-      tran_low_t *const coeff   = BLOCK_OFFSET(x->plane[plane].coeff, block);
+      tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
       tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
-      vp10_xform_quant_dc(x, plane, block, blk_row, blk_col,
-                          plane_bsize, tx_size);
-      sse  = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
+      vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
+                          tx_size);
+      sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
       dist = sse;
       if (x->plane[plane].eobs[block]) {
         const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
@@ -503,8 +487,7 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
 #if CONFIG_VPX_HIGHBITDEPTH
         dc_correct >>= ((xd->bd - 8) * 2);
 #endif
-        if (tx_size != TX_32X32)
-          dc_correct >>= 2;
+        if (tx_size != TX_32X32) dc_correct >>= 2;
 
         dist = VPXMAX(0, sse - dc_correct);
       }
@@ -512,7 +495,7 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
       // SKIP_TXFM_AC_DC
       // skip forward transform
       x->plane[plane].eobs[block] = 0;
-      sse  = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
+      sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
       dist = sse;
     }
   } else {
@@ -534,7 +517,8 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
   // TODO(jingning): temporarily enabled only for luma component
   rd = VPXMIN(rd1, rd2);
   if (plane == 0)
-    x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] ||
+    x->zcoeff_blk[tx_size][block] =
+        !x->plane[plane].eobs[block] ||
         (rd1 > rd2 && !xd->lossless[mbmi->segment_id]);
 
   args->this_rate += rate;
@@ -550,11 +534,9 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
   args->skippable &= !x->plane[plane].eobs[block];
 }
 
-static void txfm_rd_in_plane(MACROBLOCK *x,
-                             int *rate, int64_t *distortion,
-                             int *skippable, int64_t *sse,
-                             int64_t ref_best_rd, int plane,
-                             BLOCK_SIZE bsize, TX_SIZE tx_size,
+static void txfm_rd_in_plane(MACROBLOCK *x, int *rate, int64_t *distortion,
+                             int *skippable, int64_t *sse, int64_t ref_best_rd,
+                             int plane, BLOCK_SIZE bsize, TX_SIZE tx_size,
                              int use_fast_coef_casting) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -566,34 +548,31 @@ static void txfm_rd_in_plane(MACROBLOCK *x,
   args.use_fast_coef_costing = use_fast_coef_casting;
   args.skippable = 1;
 
-  if (plane == 0)
-    xd->mi[0]->mbmi.tx_size = tx_size;
+  if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
 
   vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
 
   tx_type = get_tx_type(pd->plane_type, xd, 0);
   args.so = get_scan(tx_size, tx_type);
 
-  vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
-                                         block_rd_txfm, &args);
+  vp10_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
+                                          &args);
   if (args.exit_early) {
-    *rate       = INT_MAX;
+    *rate = INT_MAX;
     *distortion = INT64_MAX;
-    *sse        = INT64_MAX;
-    *skippable  = 0;
+    *sse = INT64_MAX;
+    *skippable = 0;
   } else {
     *distortion = args.this_dist;
-    *rate       = args.this_rate;
-    *sse        = args.this_sse;
-    *skippable  = args.skippable;
+    *rate = args.this_rate;
+    *sse = args.this_sse;
+    *skippable = args.skippable;
   }
 }
 
-static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
-                                   int *rate, int64_t *distortion,
-                                   int *skip, int64_t *sse,
-                                   int64_t ref_best_rd,
-                                   BLOCK_SIZE bs) {
+static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                   int64_t *distortion, int *skip, int64_t *sse,
+                                   int64_t ref_best_rd, BLOCK_SIZE bs) {
   const TX_SIZE max_tx_size = max_txsize_lookup[bs];
   VP10_COMMON *const cm = &cpi->common;
   const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
@@ -604,20 +583,17 @@ static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
   int r, s;
   int64_t d, psse, this_rd, best_rd = INT64_MAX;
   vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
-  int  s0 = vp10_cost_bit(skip_prob, 0);
-  int  s1 = vp10_cost_bit(skip_prob, 1);
+  int s0 = vp10_cost_bit(skip_prob, 0);
+  int s1 = vp10_cost_bit(skip_prob, 1);
   const int is_inter = is_inter_block(mbmi);
 
   mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
-  if (mbmi->tx_size < TX_32X32 &&
-      !xd->lossless[mbmi->segment_id]) {
+  if (mbmi->tx_size < TX_32X32 && !xd->lossless[mbmi->segment_id]) {
     for (tx_type = 0; tx_type < TX_TYPES; ++tx_type) {
       mbmi->tx_type = tx_type;
-      txfm_rd_in_plane(x, &r, &d, &s,
-                       &psse, ref_best_rd, 0, bs, mbmi->tx_size,
+      txfm_rd_in_plane(x, &r, &d, &s, &psse, ref_best_rd, 0, bs, mbmi->tx_size,
                        cpi->sf.use_fast_coef_costing);
-      if (r == INT_MAX)
-        continue;
+      if (r == INT_MAX) continue;
       if (is_inter)
         r += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
       else
@@ -638,41 +614,35 @@ static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
     }
   }
   mbmi->tx_type = best_tx_type;
-  txfm_rd_in_plane(x, rate, distortion, skip,
-                   sse, ref_best_rd, 0, bs,
+  txfm_rd_in_plane(x, rate, distortion, skip, sse, ref_best_rd, 0, bs,
                    mbmi->tx_size, cpi->sf.use_fast_coef_costing);
   if (mbmi->tx_size < TX_32X32 && !xd->lossless[mbmi->segment_id] &&
       *rate != INT_MAX) {
     if (is_inter)
       *rate += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
     else
-      *rate += cpi->intra_tx_type_costs[mbmi->tx_size]
-          [intra_mode_to_tx_type_context[mbmi->mode]]
-          [mbmi->tx_type];
+      *rate += cpi->intra_tx_type_costs
+                   [mbmi->tx_size][intra_mode_to_tx_type_context[mbmi->mode]]
+                   [mbmi->tx_type];
   }
 }
 
-static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
-                                    int *rate, int64_t *distortion,
-                                    int *skip, int64_t *sse,
-                                    int64_t ref_best_rd,
+static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                    int64_t *distortion, int *skip,
+                                    int64_t *sse, int64_t ref_best_rd,
                                     BLOCK_SIZE bs) {
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
 
   mbmi->tx_size = TX_4X4;
 
-  txfm_rd_in_plane(x, rate, distortion, skip,
-                   sse, ref_best_rd, 0, bs,
+  txfm_rd_in_plane(x, rate, distortion, skip, sse, ref_best_rd, 0, bs,
                    mbmi->tx_size, cpi->sf.use_fast_coef_costing);
 }
 
-static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
-                                   int *rate,
-                                   int64_t *distortion,
-                                   int *skip,
-                                   int64_t *psse,
-                                   int64_t ref_best_rd,
+static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                   int64_t *distortion, int *skip,
+                                   int64_t *psse, int64_t ref_best_rd,
                                    BLOCK_SIZE bs) {
   const TX_SIZE max_tx_size = max_txsize_lookup[bs];
   VP10_COMMON *const cm = &cpi->common;
@@ -707,15 +677,15 @@ static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
   }
 
   *distortion = INT64_MAX;
-  *rate       = INT_MAX;
-  *skip       = 0;
-  *psse       = INT64_MAX;
+  *rate = INT_MAX;
+  *skip = 0;
+  *psse = INT64_MAX;
 
   for (tx_type = DCT_DCT; tx_type < TX_TYPES; ++tx_type) {
     last_rd = INT64_MAX;
     for (n = start_tx; n >= end_tx; --n) {
       int r_tx_size = 0;
-      for (m = 0; m <= n - (n == (int) max_tx_size); ++m) {
+      for (m = 0; m <= n - (n == (int)max_tx_size); ++m) {
         if (m == n)
           r_tx_size += vp10_cost_zero(tx_probs[m]);
         else
@@ -726,56 +696,50 @@ static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
         continue;
       }
       mbmi->tx_type = tx_type;
-      txfm_rd_in_plane(x, &r, &d, &s,
-                       &sse, ref_best_rd, 0, bs, n,
+      txfm_rd_in_plane(x, &r, &d, &s, &sse, ref_best_rd, 0, bs, n,
                        cpi->sf.use_fast_coef_costing);
-      if (n < TX_32X32 &&
-          !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
+      if (n < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
           r != INT_MAX) {
         if (is_inter)
           r += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
         else
-          r += cpi->intra_tx_type_costs[mbmi->tx_size]
-              [intra_mode_to_tx_type_context[mbmi->mode]]
-              [mbmi->tx_type];
+          r += cpi->intra_tx_type_costs
+                   [mbmi->tx_size][intra_mode_to_tx_type_context[mbmi->mode]]
+                   [mbmi->tx_type];
       }
 
-      if (r == INT_MAX)
-        continue;
+      if (r == INT_MAX) continue;
 
       if (s) {
         if (is_inter) {
           rd = RDCOST(x->rdmult, x->rddiv, s1, sse);
         } else {
-          rd =  RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size * tx_select, sse);
+          rd = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size * tx_select, sse);
         }
       } else {
         rd = RDCOST(x->rdmult, x->rddiv, r + s0 + r_tx_size * tx_select, d);
       }
 
-      if (tx_select && !(s && is_inter))
-        r += r_tx_size;
+      if (tx_select && !(s && is_inter)) r += r_tx_size;
 
       if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] && !s)
         rd = VPXMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, sse));
 
       // Early termination in transform size search.
       if (cpi->sf.tx_size_search_breakout &&
-          (rd == INT64_MAX ||
-           (s == 1 && tx_type != DCT_DCT && n < start_tx) ||
-           (n < (int) max_tx_size && rd > last_rd)))
+          (rd == INT64_MAX || (s == 1 && tx_type != DCT_DCT && n < start_tx) ||
+           (n < (int)max_tx_size && rd > last_rd)))
         break;
 
       last_rd = rd;
       if (rd <
-          (is_inter && best_tx_type == DCT_DCT ? ext_tx_th : 1) *
-          best_rd) {
+          (is_inter && best_tx_type == DCT_DCT ? ext_tx_th : 1) * best_rd) {
         best_tx = n;
         best_rd = rd;
         *distortion = d;
-        *rate       = r;
-        *skip       = s;
-        *psse       = sse;
+        *rate = r;
+        *skip = s;
+        *psse = sse;
         best_tx_type = mbmi->tx_type;
       }
     }
@@ -783,17 +747,14 @@ static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
 
   mbmi->tx_size = best_tx;
   mbmi->tx_type = best_tx_type;
-  if (mbmi->tx_size >= TX_32X32)
-    assert(mbmi->tx_type == DCT_DCT);
-  txfm_rd_in_plane(x, &r, &d, &s,
-                   &sse, ref_best_rd, 0, bs, best_tx,
+  if (mbmi->tx_size >= TX_32X32) assert(mbmi->tx_type == DCT_DCT);
+  txfm_rd_in_plane(x, &r, &d, &s, &sse, ref_best_rd, 0, bs, best_tx,
                    cpi->sf.use_fast_coef_costing);
 }
 
 static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
-                            int64_t *distortion, int *skip,
-                            int64_t *psse, BLOCK_SIZE bs,
-                            int64_t ref_best_rd) {
+                            int64_t *distortion, int *skip, int64_t *psse,
+                            BLOCK_SIZE bs, int64_t ref_best_rd) {
   MACROBLOCKD *xd = &x->e_mbd;
   int64_t sse;
   int64_t *ret_sse = psse ? psse : &sse;
@@ -808,39 +769,33 @@ static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
     choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
                            bs);
   } else {
-    choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse,
-                           ref_best_rd, bs);
+    choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
+                           bs);
   }
 }
 
 static int conditional_skipintra(PREDICTION_MODE mode,
                                  PREDICTION_MODE best_intra_mode) {
-  if (mode == D117_PRED &&
-      best_intra_mode != V_PRED &&
+  if (mode == D117_PRED && best_intra_mode != V_PRED &&
       best_intra_mode != D135_PRED)
     return 1;
-  if (mode == D63_PRED &&
-      best_intra_mode != V_PRED &&
+  if (mode == D63_PRED && best_intra_mode != V_PRED &&
       best_intra_mode != D45_PRED)
     return 1;
-  if (mode == D207_PRED &&
-      best_intra_mode != H_PRED &&
+  if (mode == D207_PRED && best_intra_mode != H_PRED &&
       best_intra_mode != D45_PRED)
     return 1;
-  if (mode == D153_PRED &&
-      best_intra_mode != H_PRED &&
+  if (mode == D153_PRED && best_intra_mode != H_PRED &&
       best_intra_mode != D135_PRED)
     return 1;
   return 0;
 }
 
-static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
-                                     int row, int col,
-                                     PREDICTION_MODE *best_mode,
-                                     const int *bmode_costs,
-                                     ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
-                                     int *bestrate, int *bestratey,
-                                     int64_t *bestdistortion,
+static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
+                                     int col, PREDICTION_MODE *best_mode,
+                                     const int *bmode_costs, ENTROPY_CONTEXT *a,
+                                     ENTROPY_CONTEXT *l, int *bestrate,
+                                     int *bestratey, int64_t *bestdistortion,
                                      BLOCK_SIZE bsize, int64_t rd_thresh) {
   PREDICTION_MODE mode;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -873,14 +828,12 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
       int64_t distortion = 0;
       int rate = bmode_costs[mode];
 
-      if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
-        continue;
+      if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
 
       // Only do the oblique modes if the best so far is
       // one of the neighboring directional modes
       if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
-        if (conditional_skipintra(mode, *best_mode))
-            continue;
+        if (conditional_skipintra(mode, *best_mode)) continue;
       }
 
       memcpy(tempa, ta, sizeof(ta));
@@ -891,16 +844,14 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
           const int block = (row + idy) * 2 + (col + idx);
           const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
           uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
-          int16_t *const src_diff = vp10_raster_block_offset_int16(BLOCK_8X8,
-                                                                  block,
-                                                                  p->src_diff);
+          int16_t *const src_diff =
+              vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
           tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
           xd->mi[0]->bmi[block].as_mode = mode;
-          vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride,
-                                  dst, dst_stride,
-                                  col + idx, row + idy, 0);
-          vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride,
-                                    dst, dst_stride, xd->bd);
+          vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+                                   dst_stride, col + idx, row + idy, 0);
+          vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
+                                    dst_stride, xd->bd);
           if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
             TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
             const scan_order *so = get_scan(TX_4X4, tx_type);
@@ -911,9 +862,9 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
                                  cpi->sf.use_fast_coef_costing);
             if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
               goto next_highbd;
-            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
-                                         dst, dst_stride, p->eobs[block],
-                                         xd->bd, DCT_DCT, 1);
+            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                         dst_stride, p->eobs[block], xd->bd,
+                                         DCT_DCT, 1);
           } else {
             int64_t unused;
             TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
@@ -923,14 +874,15 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
             ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
                                  so->scan, so->neighbors,
                                  cpi->sf.use_fast_coef_costing);
-            distortion += vp10_highbd_block_error(
-                coeff, BLOCK_OFFSET(pd->dqcoeff, block),
-                16, &unused, xd->bd) >> 2;
+            distortion +=
+                vp10_highbd_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
+                                        16, &unused, xd->bd) >>
+                2;
             if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
               goto next_highbd;
-            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
-                                         dst, dst_stride, p->eobs[block],
-                                         xd->bd, tx_type, 0);
+            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                         dst_stride, p->eobs[block], xd->bd,
+                                         tx_type, 0);
           }
         }
       }
@@ -952,16 +904,13 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
                  num_4x4_blocks_wide * 4 * sizeof(uint16_t));
         }
       }
-    next_highbd:
-      {}
+    next_highbd : {}
     }
-    if (best_rd >= rd_thresh)
-      return best_rd;
+    if (best_rd >= rd_thresh) return best_rd;
 
     for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
       memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
-             best_dst16 + idy * 8,
-             num_4x4_blocks_wide * 4 * sizeof(uint16_t));
+             best_dst16 + idy * 8, num_4x4_blocks_wide * 4 * sizeof(uint16_t));
     }
 
     return best_rd;
@@ -974,14 +923,12 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
     int64_t distortion = 0;
     int rate = bmode_costs[mode];
 
-    if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
-      continue;
+    if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
 
     // Only do the oblique modes if the best so far is
     // one of the neighboring directional modes
     if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
-      if (conditional_skipintra(mode, *best_mode))
-          continue;
+      if (conditional_skipintra(mode, *best_mode)) continue;
     }
 
     memcpy(tempa, ta, sizeof(ta));
@@ -996,8 +943,8 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
             vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
         tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
         xd->mi[0]->bmi[block].as_mode = mode;
-        vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride,
-                                dst, dst_stride, col + idx, row + idy, 0);
+        vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+                                 dst_stride, col + idx, row + idy, 0);
         vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
 
         if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -1010,8 +957,8 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
                                cpi->sf.use_fast_coef_costing);
           if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
             goto next;
-          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
-                                dst, dst_stride, p->eobs[block], DCT_DCT, 1);
+          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                dst_stride, p->eobs[block], DCT_DCT, 1);
         } else {
           int64_t unused;
           TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
@@ -1019,14 +966,16 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
           vp10_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
           vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
           ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
-                             so->scan, so->neighbors,
-                             cpi->sf.use_fast_coef_costing);
-          distortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
-                                        16, &unused) >> 2;
+                               so->scan, so->neighbors,
+                               cpi->sf.use_fast_coef_costing);
+          distortion +=
+              vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
+                               &unused) >>
+              2;
           if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
             goto next;
-          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
-                                dst, dst_stride, p->eobs[block], tx_type, 0);
+          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                dst_stride, p->eobs[block], tx_type, 0);
         }
       }
     }
@@ -1046,12 +995,10 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
         memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
                num_4x4_blocks_wide * 4);
     }
-  next:
-    {}
+  next : {}
   }
 
-  if (best_rd >= rd_thresh)
-    return best_rd;
+  if (best_rd >= rd_thresh) return best_rd;
 
   for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
     memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
@@ -1094,14 +1041,13 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
         const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i);
         const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i);
 
-        bmode_costs  = cpi->y_mode_costs[A][L];
+        bmode_costs = cpi->y_mode_costs[A][L];
       }
 
       this_rd = rd_pick_intra4x4block(cpi, mb, idy, idx, &best_mode,
                                       bmode_costs, t_above + idx, t_left + idy,
                                       &r, &ry, &d, bsize, best_rd - total_rd);
-      if (this_rd >= best_rd - total_rd)
-        return INT64_MAX;
+      if (this_rd >= best_rd - total_rd) return INT64_MAX;
 
       total_rd += this_rd;
       cost += r;
@@ -1114,8 +1060,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
       for (j = 1; j < num_4x4_blocks_wide; ++j)
         mic->bmi[i + j].as_mode = best_mode;
 
-      if (total_rd >= best_rd)
-        return INT64_MAX;
+      if (total_rd >= best_rd) return INT64_MAX;
     }
   }
 
@@ -1128,10 +1073,9 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
 }
 
 // This function is used only for intra_only frames
-static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x,
-                                      int *rate, int *rate_tokenonly,
-                                      int64_t *distortion, int *skippable,
-                                      BLOCK_SIZE bsize,
+static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                      int *rate_tokenonly, int64_t *distortion,
+                                      int *skippable, BLOCK_SIZE bsize,
                                       int64_t best_rd) {
   PREDICTION_MODE mode;
   PREDICTION_MODE mode_selected = DC_PRED;
@@ -1154,24 +1098,23 @@ static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x,
   for (mode = DC_PRED; mode <= TM_PRED; mode++) {
     mic->mbmi.mode = mode;
 
-    super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion,
-        &s, NULL, bsize, best_rd);
+    super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
+                    bsize, best_rd);
 
-    if (this_rate_tokenonly == INT_MAX)
-      continue;
+    if (this_rate_tokenonly == INT_MAX) continue;
 
     this_rate = this_rate_tokenonly + bmode_costs[mode];
     this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
 
     if (this_rd < best_rd) {
-      mode_selected   = mode;
-      best_rd         = this_rd;
-      best_tx         = mic->mbmi.tx_size;
-      best_tx_type    = mic->mbmi.tx_type;
-      *rate           = this_rate;
+      mode_selected = mode;
+      best_rd = this_rd;
+      best_tx = mic->mbmi.tx_size;
+      best_tx_type = mic->mbmi.tx_type;
+      *rate = this_rate;
       *rate_tokenonly = this_rate_tokenonly;
-      *distortion     = this_distortion;
-      *skippable      = s;
+      *distortion = this_distortion;
+      *skippable = s;
     }
   }
 
@@ -1184,10 +1127,9 @@ static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x,
 
 // Return value 0: early termination triggered, no valid rd cost available;
 //              1: rd cost values are valid.
-static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x,
-                            int *rate, int64_t *distortion, int *skippable,
-                            int64_t *sse, BLOCK_SIZE bsize,
-                            int64_t ref_best_rd) {
+static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                            int64_t *distortion, int *skippable, int64_t *sse,
+                            BLOCK_SIZE bsize, int64_t ref_best_rd) {
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const TX_SIZE uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
@@ -1196,8 +1138,7 @@ static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x,
   int64_t pndist = 0, pnsse = 0;
   int is_cost_valid = 1;
 
-  if (ref_best_rd < 0)
-    is_cost_valid = 0;
+  if (ref_best_rd < 0) is_cost_valid = 0;
 
   if (is_inter_block(mbmi) && is_cost_valid) {
     int plane;
@@ -1211,9 +1152,8 @@ static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x,
   *skippable = 1;
 
   for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
-    txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse,
-                     ref_best_rd, plane, bsize, uv_tx_size,
-                     cpi->sf.use_fast_coef_costing);
+    txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse, ref_best_rd, plane,
+                     bsize, uv_tx_size, cpi->sf.use_fast_coef_costing);
     if (pnrate == INT_MAX) {
       is_cost_valid = 0;
       break;
@@ -1236,10 +1176,10 @@ static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x,
 }
 
 static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x,
-                                       PICK_MODE_CONTEXT *ctx,
-                                       int *rate, int *rate_tokenonly,
-                                       int64_t *distortion, int *skippable,
-                                       BLOCK_SIZE bsize, TX_SIZE max_tx_size) {
+                                       PICK_MODE_CONTEXT *ctx, int *rate,
+                                       int *rate_tokenonly, int64_t *distortion,
+                                       int *skippable, BLOCK_SIZE bsize,
+                                       TX_SIZE max_tx_size) {
   MACROBLOCKD *xd = &x->e_mbd;
   PREDICTION_MODE mode;
   PREDICTION_MODE mode_selected = DC_PRED;
@@ -1249,27 +1189,25 @@ static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x,
 
   memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
   for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
-    if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
-      continue;
+    if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue;
 
     xd->mi[0]->mbmi.uv_mode = mode;
 
-    if (!super_block_uvrd(cpi, x, &this_rate_tokenonly,
-                          &this_distortion, &s, &this_sse, bsize, best_rd))
+    if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+                          &this_sse, bsize, best_rd))
       continue;
     this_rate = this_rate_tokenonly +
-        cpi->intra_uv_mode_cost[xd->mi[0]->mbmi.mode][mode];
+                cpi->intra_uv_mode_cost[xd->mi[0]->mbmi.mode][mode];
     this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
 
     if (this_rd < best_rd) {
-      mode_selected   = mode;
-      best_rd         = this_rd;
-      *rate           = this_rate;
+      mode_selected = mode;
+      best_rd = this_rd;
+      *rate = this_rate;
       *rate_tokenonly = this_rate_tokenonly;
-      *distortion     = this_distortion;
-      *skippable      = s;
-      if (!x->select_tx_size)
-        swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
+      *distortion = this_distortion;
+      *skippable = s;
+      if (!x->select_tx_size) swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
     }
   }
 
@@ -1277,38 +1215,36 @@ static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x,
   return best_rd;
 }
 
-static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x,
-                              int *rate, int *rate_tokenonly,
-                              int64_t *distortion, int *skippable,
-                              BLOCK_SIZE bsize) {
+static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                              int *rate_tokenonly, int64_t *distortion,
+                              int *skippable, BLOCK_SIZE bsize) {
   int64_t unused;
 
   x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED;
   memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
-  super_block_uvrd(cpi, x, rate_tokenonly, distortion,
-                   skippable, &unused, bsize, INT64_MAX);
+  super_block_uvrd(cpi, x, rate_tokenonly, distortion, skippable, &unused,
+                   bsize, INT64_MAX);
   *rate = *rate_tokenonly +
-      cpi->intra_uv_mode_cost[x->e_mbd.mi[0]->mbmi.mode][DC_PRED];
+          cpi->intra_uv_mode_cost[x->e_mbd.mi[0]->mbmi.mode][DC_PRED];
   return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
 }
 
 static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
-                                 PICK_MODE_CONTEXT *ctx,
-                                 BLOCK_SIZE bsize, TX_SIZE max_tx_size,
-                                 int *rate_uv, int *rate_uv_tokenonly,
-                                 int64_t *dist_uv, int *skip_uv,
-                                 PREDICTION_MODE *mode_uv) {
+                                 PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
+                                 TX_SIZE max_tx_size, int *rate_uv,
+                                 int *rate_uv_tokenonly, int64_t *dist_uv,
+                                 int *skip_uv, PREDICTION_MODE *mode_uv) {
   // Use an estimated rd for uv_intra based on DC_PRED if the
   // appropriate speed flag is set.
   if (cpi->sf.use_uv_intra_rd_estimate) {
-    rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv,
-                   skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
-  // Else do a proper rd search for each possible transform size that may
-  // be considered in the main rd loop.
+    rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
+                   bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
+    // Else do a proper rd search for each possible transform size that may
+    // be considered in the main rd loop.
   } else {
-    rd_pick_intra_sbuv_mode(cpi, x, ctx,
-                            rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
-                            bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size);
+    rd_pick_intra_sbuv_mode(cpi, x, ctx, rate_uv, rate_uv_tokenonly, dist_uv,
+                            skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
+                            max_tx_size);
   }
   *mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
 }
@@ -1320,8 +1256,7 @@ static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
 }
 
 static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
-                                int i,
-                                PREDICTION_MODE mode, int_mv this_mv[2],
+                                int i, PREDICTION_MODE mode, int_mv this_mv[2],
                                 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
                                 int_mv seg_mvs[MAX_REF_FRAMES],
                                 int_mv *best_ref_mv[2], const int *mvjcost,
@@ -1339,11 +1274,12 @@ static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
     case NEWMV:
       this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
       thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
-                                    mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
       if (is_compound) {
         this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
-        thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
-                                      mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+        thismvcost +=
+            vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost,
+                             mvcost, MV_COST_WEIGHT_SUB);
       }
       break;
     case NEARMV:
@@ -1354,16 +1290,13 @@ static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
       break;
     case ZEROMV:
       this_mv[0].as_int = 0;
-      if (is_compound)
-        this_mv[1].as_int = 0;
-      break;
-    default:
+      if (is_compound) this_mv[1].as_int = 0;
       break;
+    default: break;
   }
 
   mic->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
-  if (is_compound)
-    mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
+  if (is_compound) mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
 
   mic->bmi[i].as_mode = mode;
 
@@ -1372,19 +1305,14 @@ static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
       memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i]));
 
   return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mbmi->ref_frame[0]]) +
-            thismvcost;
+         thismvcost;
 }
 
-static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
-                                       MACROBLOCK *x,
-                                       int64_t best_yrd,
-                                       int i,
-                                       int *labelyrate,
+static int64_t encode_inter_mb_segment(VP10_COMP *cpi, MACROBLOCK *x,
+                                       int64_t best_yrd, int i, int *labelyrate,
                                        int64_t *distortion, int64_t *sse,
-                                       ENTROPY_CONTEXT *ta,
-                                       ENTROPY_CONTEXT *tl,
-                                       int ir, int ic,
-                                       int mi_row, int mi_col) {
+                                       ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
+                                       int ir, int ic, int mi_row, int mi_col) {
   int k;
   MACROBLOCKD *xd = &x->e_mbd;
   struct macroblockd_plane *const pd = &xd->plane[0];
@@ -1398,8 +1326,8 @@ static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
 
   const uint8_t *const src =
       &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
-  uint8_t *const dst = &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i,
-                                                            pd->dst.stride)];
+  uint8_t *const dst =
+      &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
   int64_t thisdistortion = 0, thissse = 0;
   int thisrate = 0;
   TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, i);
@@ -1421,12 +1349,13 @@ static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     vpx_highbd_subtract_block(
-        height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
-        8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
+        height, width,
+        vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src,
+        p->src.stride, dst, pd->dst.stride, xd->bd);
   } else {
-    vpx_subtract_block(
-        height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
-        8, src, p->src.stride, dst, pd->dst.stride);
+    vpx_subtract_block(height, width, vp10_raster_block_offset_int16(
+                                          BLOCK_8X8, i, p->src_diff),
+                       8, src, p->src.stride, dst, pd->dst.stride);
   }
 #else
   vpx_subtract_block(height, width,
@@ -1438,7 +1367,7 @@ static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
   for (idy = 0; idy < height / 4; ++idy) {
     for (idx = 0; idx < width / 4; ++idx) {
       int64_t ssz, rd, rd1, rd2;
-      tran_low_t* coeff;
+      tran_low_t *coeff;
 
       k += (idy * 2 + idx);
       coeff = BLOCK_OFFSET(p->coeff, k);
@@ -1447,26 +1376,24 @@ static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
       vp10_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
 #if CONFIG_VPX_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-        thisdistortion += vp10_highbd_block_error(coeff,
-                                                 BLOCK_OFFSET(pd->dqcoeff, k),
-                                                 16, &ssz, xd->bd);
+        thisdistortion += vp10_highbd_block_error(
+            coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, xd->bd);
       } else {
-        thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
-                                          16, &ssz);
+        thisdistortion +=
+            vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
       }
 #else
-      thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
-                                        16, &ssz);
+      thisdistortion +=
+          vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
       thissse += ssz;
-      thisrate += cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4,
-                              so->scan, so->neighbors,
-                              cpi->sf.use_fast_coef_costing);
+      thisrate +=
+          cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4, so->scan,
+                      so->neighbors, cpi->sf.use_fast_coef_costing);
       rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
       rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
       rd = VPXMIN(rd1, rd2);
-      if (rd >= best_yrd)
-        return INT64_MAX;
+      if (rd >= best_yrd) return INT64_MAX;
     }
   }
 
@@ -1504,10 +1431,8 @@ typedef struct {
 } BEST_SEG_INFO;
 
 static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) {
-  return (mv->row >> 3) < x->mv_row_min ||
-         (mv->row >> 3) > x->mv_row_max ||
-         (mv->col >> 3) < x->mv_col_min ||
-         (mv->col >> 3) > x->mv_col_max;
+  return (mv->row >> 3) < x->mv_row_min || (mv->row >> 3) > x->mv_row_max ||
+         (mv->col >> 3) < x->mv_col_min || (mv->col >> 3) > x->mv_col_max;
 }
 
 static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
@@ -1515,14 +1440,16 @@ static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
   struct macroblock_plane *const p = &x->plane[0];
   struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
 
-  p->src.buf = &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i,
-                                                   p->src.stride)];
+  p->src.buf =
+      &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
   assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
-  pd->pre[0].buf = &pd->pre[0].buf[vp10_raster_block_offset(BLOCK_8X8, i,
-                                                           pd->pre[0].stride)];
+  pd->pre[0].buf =
+      &pd->pre[0]
+           .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
   if (has_second_ref(mbmi))
-    pd->pre[1].buf = &pd->pre[1].buf[vp10_raster_block_offset(BLOCK_8X8, i,
-                                                           pd->pre[1].stride)];
+    pd->pre[1].buf =
+        &pd->pre[1]
+             .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
 }
 
 static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
@@ -1530,8 +1457,7 @@ static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
   MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
   x->plane[0].src = orig_src;
   x->e_mbd.plane[0].pre[0] = orig_pre[0];
-  if (has_second_ref(mbmi))
-    x->e_mbd.plane[0].pre[1] = orig_pre[1];
+  if (has_second_ref(mbmi)) x->e_mbd.plane[0].pre[1] = orig_pre[1];
 }
 
 static INLINE int mv_has_subpel(const MV *mv) {
@@ -1540,10 +1466,11 @@ static INLINE int mv_has_subpel(const MV *mv) {
 
 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
 // TODO(aconverse): Find out if this is still productive then clean up or remove
-static int check_best_zero_mv(
-    const VP10_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES],
-    int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode,
-    const MV_REFERENCE_FRAME ref_frames[2]) {
+static int check_best_zero_mv(const VP10_COMP *cpi,
+                              const uint8_t mode_context[MAX_REF_FRAMES],
+                              int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
+                              int this_mode,
+                              const MV_REFERENCE_FRAME ref_frames[2]) {
   if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
       frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
       (ref_frames[1] == NONE ||
@@ -1575,10 +1502,8 @@ static int check_best_zero_mv(
   return 1;
 }
 
-static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
-                                BLOCK_SIZE bsize,
-                                int_mv *frame_mv,
-                                int mi_row, int mi_col,
+static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+                                int_mv *frame_mv, int mi_row, int mi_col,
                                 int_mv single_newmv[MAX_REF_FRAMES],
                                 int *rate_mv) {
   const VP10_COMMON *const cm = &cpi->common;
@@ -1586,8 +1511,8 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
   const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
   MACROBLOCKD *xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-  const int refs[2] = {mbmi->ref_frame[0],
-                       mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]};
+  const int refs[2] = { mbmi->ref_frame[0],
+                        mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1] };
   int_mv ref_mv[2];
   int ite, ref;
   const InterpKernel *kernel = vp10_filter_kernels[mbmi->interp_filter];
@@ -1595,13 +1520,13 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
 
   // Do joint motion search in compound mode to get more accurate mv.
   struct buf_2d backup_yv12[2][MAX_MB_PLANE];
-  int last_besterr[2] = {INT_MAX, INT_MAX};
+  int last_besterr[2] = { INT_MAX, INT_MAX };
   const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
     vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
     vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
   };
 
-  // Prediction buffer from second frame.
+// Prediction buffer from second frame.
 #if CONFIG_VPX_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
   uint8_t *second_pred;
@@ -1620,21 +1545,20 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
       for (i = 0; i < MAX_MB_PLANE; i++)
         backup_yv12[ref][i] = xd->plane[i].pre[ref];
       vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
-                           NULL);
+                            NULL);
     }
 
     frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
   }
 
-  // Since we have scaled the reference frames to match the size of the current
-  // frame we must use a unit scaling factor during mode selection.
+// Since we have scaled the reference frames to match the size of the current
+// frame we must use a unit scaling factor during mode selection.
 #if CONFIG_VPX_HIGHBITDEPTH
-  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
-                                    cm->width, cm->height,
-                                    cm->use_highbitdepth);
+  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+                                     cm->height, cm->use_highbitdepth);
 #else
-  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
-                                    cm->width, cm->height);
+  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+                                     cm->height);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
   // Allow joint search multiple times iteratively for each reference frame
@@ -1658,41 +1582,30 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
     ref_yv12[0] = xd->plane[0].pre[0];
     ref_yv12[1] = xd->plane[0].pre[1];
 
-    // Get the prediction block from the 'other' reference frame.
+// Get the prediction block from the 'other' reference frame.
 #if CONFIG_VPX_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
-      vp10_highbd_build_inter_predictor(ref_yv12[!id].buf,
-                                       ref_yv12[!id].stride,
-                                       second_pred, pw,
-                                       &frame_mv[refs[!id]].as_mv,
-                                       &sf, pw, ph, 0,
-                                       kernel, MV_PRECISION_Q3,
-                                       mi_col * MI_SIZE, mi_row * MI_SIZE,
-                                       xd->bd);
+      vp10_highbd_build_inter_predictor(
+          ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
+          &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
+          mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
     } else {
       second_pred = (uint8_t *)second_pred_alloc_16;
-      vp10_build_inter_predictor(ref_yv12[!id].buf,
-                                ref_yv12[!id].stride,
-                                second_pred, pw,
-                                &frame_mv[refs[!id]].as_mv,
-                                &sf, pw, ph, 0,
-                                kernel, MV_PRECISION_Q3,
-                                mi_col * MI_SIZE, mi_row * MI_SIZE);
+      vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+                                 second_pred, pw, &frame_mv[refs[!id]].as_mv,
+                                 &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
+                                 mi_col * MI_SIZE, mi_row * MI_SIZE);
     }
 #else
-    vp10_build_inter_predictor(ref_yv12[!id].buf,
-                              ref_yv12[!id].stride,
-                              second_pred, pw,
-                              &frame_mv[refs[!id]].as_mv,
-                              &sf, pw, ph, 0,
-                              kernel, MV_PRECISION_Q3,
-                              mi_col * MI_SIZE, mi_row * MI_SIZE);
+    vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+                               second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
+                               pw, ph, 0, kernel, MV_PRECISION_Q3,
+                               mi_col * MI_SIZE, mi_row * MI_SIZE);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
     // Do compound motion search on the current reference frame.
-    if (id)
-      xd->plane[0].pre[0] = ref_yv12[id];
+    if (id) xd->plane[0].pre[0] = ref_yv12[id];
     vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
 
     // Use the mv result from the single mode as mv predictor.
@@ -1702,13 +1615,12 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
     tmp_mv.row >>= 3;
 
     // Small-range full-pixel motion search.
-    bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb,
-                                       search_range,
-                                       &cpi->fn_ptr[bsize],
-                                       &ref_mv[id].as_mv, second_pred);
+    bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
+                                        &cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
+                                        second_pred);
     if (bestsme < INT_MAX)
       bestsme = vp10_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
-                                      second_pred, &cpi->fn_ptr[bsize], 1);
+                                       second_pred, &cpi->fn_ptr[bsize], 1);
 
     x->mv_col_min = tmp_col_min;
     x->mv_col_max = tmp_col_max;
@@ -1719,21 +1631,14 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
       int dis; /* TODO: use dis in distortion calculation later. */
       unsigned int sse;
       bestsme = cpi->find_fractional_mv_step(
-          x, &tmp_mv,
-          &ref_mv[id].as_mv,
-          cpi->common.allow_high_precision_mv,
-          x->errorperbit,
-          &cpi->fn_ptr[bsize],
-          0, cpi->sf.mv.subpel_iters_per_step,
-          NULL,
-          x->nmvjointcost, x->mvcost,
-          &dis, &sse, second_pred,
-          pw, ph);
+          x, &tmp_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
+          x->errorperbit, &cpi->fn_ptr[bsize], 0,
+          cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
+          &dis, &sse, second_pred, pw, ph);
     }
 
     // Restore the pointer to the first (possibly scaled) prediction buffer.
-    if (id)
-      xd->plane[0].pre[0] = ref_yv12[0];
+    if (id) xd->plane[0].pre[0] = ref_yv12[0];
 
     if (bestsme < last_besterr[id]) {
       frame_mv[refs[id]].as_mv = tmp_mv;
@@ -1754,22 +1659,17 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
     }
 
     *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
-                                &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
-                                x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+                                 &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
+                                 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
   }
 }
 
-static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
-                                        int_mv *best_ref_mv,
-                                        int_mv *second_best_ref_mv,
-                                        int64_t best_rd, int *returntotrate,
-                                        int *returnyrate,
-                                        int64_t *returndistortion,
-                                        int *skippable, int64_t *psse,
-                                        int mvthresh,
-                                        int_mv seg_mvs[4][MAX_REF_FRAMES],
-                                        BEST_SEG_INFO *bsi_buf, int filter_idx,
-                                        int mi_row, int mi_col) {
+static int64_t rd_pick_best_sub8x8_mode(
+    VP10_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
+    int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
+    int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
+    int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf,
+    int filter_idx, int mi_row, int mi_col) {
   int i;
   BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
   MACROBLOCKD *xd = &x->e_mbd;
@@ -1803,8 +1703,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
   bsi->mvp.as_int = best_ref_mv->as_int;
   bsi->mvthresh = mvthresh;
 
-  for (i = 0; i < 4; i++)
-    bsi->modes[i] = ZEROMV;
+  for (i = 0; i < 4; i++) bsi->modes[i] = ZEROMV;
 
   memcpy(t_above, pd->above_context, sizeof(t_above));
   memcpy(t_left, pd->left_context, sizeof(t_left));
@@ -1830,10 +1729,9 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
       for (ref = 0; ref < 1 + has_second_rf; ++ref) {
         const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
         frame_mv[ZEROMV][frame].as_int = 0;
-        vp10_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
-                                      &frame_mv[NEARESTMV][frame],
-                                      &frame_mv[NEARMV][frame],
-                                      mbmi_ext->mode_context);
+        vp10_append_sub8x8_mvs_for_idx(
+            cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame],
+            &frame_mv[NEARMV][frame], mbmi_ext->mode_context);
       }
 
       // search for the best motion vector on this segment
@@ -1843,8 +1741,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
 
         mode_idx = INTER_OFFSET(this_mode);
         bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
-        if (!(inter_mode_mask & (1 << this_mode)))
-          continue;
+        if (!(inter_mode_mask & (1 << this_mode))) continue;
 
         if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
                                 this_mode, mbmi->ref_frame))
@@ -1869,15 +1766,13 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
 
           /* Is the best so far sufficiently good that we cant justify doing
            * and new motion search. */
-          if (best_rd < label_mv_thresh)
-            break;
+          if (best_rd < label_mv_thresh) break;
 
           if (cpi->oxcf.mode != BEST) {
             // use previous block's result as next block's MV predictor.
             if (i > 0) {
               bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
-              if (i == 2)
-                bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
+              if (i == 2) bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
             }
           }
           if (i == 0)
@@ -1890,8 +1785,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
             // Take wtd average of the step_params based on the last frame's
             // max mv magnitude and the best ref mvs of the current block for
             // the given reference.
-            step_param = (vp10_init_search_range(max_mv) +
-                              cpi->mv_step_param) / 2;
+            step_param =
+                (vp10_init_search_range(max_mv) + cpi->mv_step_param) / 2;
           } else {
             step_param = cpi->mv_step_param;
           }
@@ -1913,24 +1808,16 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
           bestsme = vp10_full_pixel_search(
               cpi, x, bsize, &mvp_full, step_param, sadpb,
               cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
-              &bsi->ref_mv[0]->as_mv, new_mv,
-              INT_MAX, 1);
+              &bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1);
 
           if (bestsme < INT_MAX) {
             int distortion;
             cpi->find_fractional_mv_step(
-                x,
-                new_mv,
-                &bsi->ref_mv[0]->as_mv,
-                cm->allow_high_precision_mv,
+                x, new_mv, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
                 x->errorperbit, &cpi->fn_ptr[bsize],
-                cpi->sf.mv.subpel_force_stop,
-                cpi->sf.mv.subpel_iters_per_step,
-                cond_cost_list(cpi, cost_list),
-                x->nmvjointcost, x->mvcost,
-                &distortion,
-                &x->pred_sse[mbmi->ref_frame[0]],
-                NULL, 0, 0);
+                cpi->sf.mv.subpel_force_stop, cpi->sf.mv.subpel_iters_per_step,
+                cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
+                &distortion, &x->pred_sse[mbmi->ref_frame[0]], NULL, 0, 0);
 
             // save motion search result for use in compound prediction
             seg_mvs[i][mbmi->ref_frame[0]].as_mv = *new_mv;
@@ -1955,9 +1842,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
           mi_buf_shift(x, i);
           if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
             int rate_mv;
-            joint_motion_search(cpi, x, bsize, frame_mv[this_mode],
-                                mi_row, mi_col, seg_mvs[i],
-                                &rate_mv);
+            joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row,
+                                mi_col, seg_mvs[i], &rate_mv);
             seg_mvs[i][mbmi->ref_frame[0]].as_int =
                 frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
             seg_mvs[i][mbmi->ref_frame[1]].as_int =
@@ -1967,10 +1853,9 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
           mi_buf_restore(x, orig_src, orig_pre);
         }
 
-        bsi->rdstat[i][mode_idx].brate =
-            set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode],
-                                 frame_mv, seg_mvs[i], bsi->ref_mv,
-                                 x->nmvjointcost, x->mvcost);
+        bsi->rdstat[i][mode_idx].brate = set_and_cost_bmi_mvs(
+            cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv, seg_mvs[i],
+            bsi->ref_mv, x->nmvjointcost, x->mvcost);
 
         for (ref = 0; ref < 1 + has_second_rf; ++ref) {
           bsi->rdstat[i][mode_idx].mvs[ref].as_int =
@@ -1985,8 +1870,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
 
         // Trap vectors that reach beyond the UMV borders
         if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) ||
-            (has_second_rf &&
-             mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
+            (has_second_rf && mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
           continue;
 
         if (filter_idx > 0) {
@@ -1997,7 +1881,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
           for (ref = 0; ref < 1 + has_second_rf; ++ref) {
             subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
             have_ref &= mode_mv[this_mode][ref].as_int ==
-                ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
+                        ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
           }
 
           if (filter_idx > 1 && !subpelmv && !have_ref) {
@@ -2005,7 +1889,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
             have_ref = 1;
             for (ref = 0; ref < 1 + has_second_rf; ++ref)
               have_ref &= mode_mv[this_mode][ref].as_int ==
-                  ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
+                          ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
           }
 
           if (!subpelmv && have_ref &&
@@ -2027,19 +1911,14 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
           }
         }
 
-        bsi->rdstat[i][mode_idx].brdcost =
-            encode_inter_mb_segment(cpi, x,
-                                    bsi->segment_rd - this_segment_rd, i,
-                                    &bsi->rdstat[i][mode_idx].byrate,
-                                    &bsi->rdstat[i][mode_idx].bdist,
-                                    &bsi->rdstat[i][mode_idx].bsse,
-                                    bsi->rdstat[i][mode_idx].ta,
-                                    bsi->rdstat[i][mode_idx].tl,
-                                    idy, idx,
-                                    mi_row, mi_col);
+        bsi->rdstat[i][mode_idx].brdcost = encode_inter_mb_segment(
+            cpi, x, bsi->segment_rd - this_segment_rd, i,
+            &bsi->rdstat[i][mode_idx].byrate, &bsi->rdstat[i][mode_idx].bdist,
+            &bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta,
+            bsi->rdstat[i][mode_idx].tl, idy, idx, mi_row, mi_col);
         if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
-          bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv,
-                                            bsi->rdstat[i][mode_idx].brate, 0);
+          bsi->rdstat[i][mode_idx].brdcost +=
+              RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0);
           bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
           bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
           if (num_4x4_blocks_wide > 1)
@@ -2095,11 +1974,9 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
   bsi->sse = block_sse;
 
   // update the coding decisions
-  for (k = 0; k < 4; ++k)
-    bsi->modes[k] = mi->bmi[k].as_mode;
+  for (k = 0; k < 4; ++k) bsi->modes[k] = mi->bmi[k].as_mode;
 
-  if (bsi->segment_rd > best_rd)
-    return INT64_MAX;
+  if (bsi->segment_rd > best_rd) return INT64_MAX;
   /* set it to the best */
   for (i = 0; i < 4; i++) {
     mode_idx = INTER_OFFSET(bsi->modes[i]);
@@ -2124,16 +2001,15 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
 }
 
 static void estimate_ref_frame_costs(const VP10_COMMON *cm,
-                                     const MACROBLOCKD *xd,
-                                     int segment_id,
+                                     const MACROBLOCKD *xd, int segment_id,
                                      unsigned int *ref_costs_single,
                                      unsigned int *ref_costs_comp,
                                      vpx_prob *comp_mode_p) {
-  int seg_ref_active = segfeature_active(&cm->seg, segment_id,
-                                         SEG_LVL_REF_FRAME);
+  int seg_ref_active =
+      segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
   if (seg_ref_active) {
     memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
-    memset(ref_costs_comp,   0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
+    memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
     *comp_mode_p = 128;
   } else {
     vpx_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd);
@@ -2158,13 +2034,13 @@ static void estimate_ref_frame_costs(const VP10_COMMON *cm,
 
       ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
           ref_costs_single[ALTREF_FRAME] = base_cost;
-      ref_costs_single[LAST_FRAME]   += vp10_cost_bit(ref_single_p1, 0);
+      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
       ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1);
       ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
       ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0);
       ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
     } else {
-      ref_costs_single[LAST_FRAME]   = 512;
+      ref_costs_single[LAST_FRAME] = 512;
       ref_costs_single[GOLDEN_FRAME] = 512;
       ref_costs_single[ALTREF_FRAME] = 512;
     }
@@ -2175,20 +2051,19 @@ static void estimate_ref_frame_costs(const VP10_COMMON *cm,
       if (cm->reference_mode == REFERENCE_MODE_SELECT)
         base_cost += vp10_cost_bit(comp_inter_p, 1);
 
-      ref_costs_comp[LAST_FRAME]   = base_cost + vp10_cost_bit(ref_comp_p, 0);
+      ref_costs_comp[LAST_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 0);
       ref_costs_comp[GOLDEN_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 1);
     } else {
-      ref_costs_comp[LAST_FRAME]   = 512;
+      ref_costs_comp[LAST_FRAME] = 512;
       ref_costs_comp[GOLDEN_FRAME] = 512;
     }
   }
 }
 
-static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
-                         int mode_index,
-                         int64_t comp_pred_diff[REFERENCE_MODES],
-                         int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],
-                         int skippable) {
+static void store_coding_context(
+    MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index,
+    int64_t comp_pred_diff[REFERENCE_MODES],
+    int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], int skippable) {
   MACROBLOCKD *const xd = &x->e_mbd;
 
   // Take a snapshot of the coding context so it can be
@@ -2199,7 +2074,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
   ctx->mic = *xd->mi[0];
   ctx->mbmi_ext = *x->mbmi_ext;
   ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
-  ctx->comp_pred_diff   = (int)comp_pred_diff[COMPOUND_REFERENCE];
+  ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
   ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
 
   memcpy(ctx->best_filter_diff, best_filter_diff,
@@ -2208,8 +2083,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
 
 static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x,
                                MV_REFERENCE_FRAME ref_frame,
-                               BLOCK_SIZE block_size,
-                               int mi_row, int mi_col,
+                               BLOCK_SIZE block_size, int mi_row, int mi_col,
                                int_mv frame_nearest_mv[MAX_REF_FRAMES],
                                int_mv frame_near_mv[MAX_REF_FRAMES],
                                struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
@@ -2228,8 +2102,8 @@ static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x,
   vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
 
   // Gets an initial list of candidate vectors from neighbours and orders them
-  vp10_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col,
-                   NULL, NULL, mbmi_ext->mode_context);
+  vp10_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col, NULL,
+                    NULL, mbmi_ext->mode_context);
 
   // Candidate refinement carried out at encoder and decoder
   vp10_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
@@ -2240,18 +2114,17 @@ static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x,
   // in full and choose the best as the centre point for subsequent searches.
   // The current implementation doesn't support scaling.
   if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8)
-    vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
-                ref_frame, block_size);
+    vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
+                 block_size);
 }
 
 static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
-                                 BLOCK_SIZE bsize,
-                                 int mi_row, int mi_col,
+                                 BLOCK_SIZE bsize, int mi_row, int mi_col,
                                  int_mv *tmp_mv, int *rate_mv) {
   MACROBLOCKD *xd = &x->e_mbd;
   const VP10_COMMON *cm = &cpi->common;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-  struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
+  struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
   int bestsme = INT_MAX;
   int step_param;
   int sadpb = x->sadperbit16;
@@ -2265,8 +2138,8 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
   int tmp_row_max = x->mv_row_max;
   int cost_list[5];
 
-  const YV12_BUFFER_CONFIG *scaled_ref_frame = vp10_get_scaled_ref_frame(cpi,
-                                                                        ref);
+  const YV12_BUFFER_CONFIG *scaled_ref_frame =
+      vp10_get_scaled_ref_frame(cpi, ref);
 
   MV pred_mv[3];
   pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -2278,8 +2151,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
     // Swap out the reference frame for a version that's been scaled to
     // match the resolution of the current frame, allowing the existing
     // motion search code to be used without additional modifications.
-    for (i = 0; i < MAX_MB_PLANE; i++)
-      backup_yv12[i] = xd->plane[i].pre[0];
+    for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
 
     vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
   }
@@ -2292,8 +2164,9 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
     // Take wtd average of the step_params based on the last frame's
     // max mv magnitude and that based on the best ref mvs of the current
     // block for the given reference.
-    step_param = (vp10_init_search_range(x->max_mv_context[ref]) +
-                    cpi->mv_step_param) / 2;
+    step_param =
+        (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+        2;
   } else {
     step_param = cpi->mv_step_param;
   }
@@ -2310,8 +2183,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
     int bhl = b_height_log2_lookup[bsize];
     int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
 
-    if (tlevel < 5)
-      step_param += 2;
+    if (tlevel < 5) step_param += 2;
 
     // prev_mv_sad is not setup for dynamically scaled frames.
     if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
@@ -2339,8 +2211,8 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
   mvp_full.row >>= 3;
 
   bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
-                                  cond_cost_list(cpi, cost_list),
-                                  &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
+                                   cond_cost_list(cpi, cost_list), &ref_mv,
+                                   &tmp_mv->as_mv, INT_MAX, 1);
 
   x->mv_col_min = tmp_col_min;
   x->mv_col_max = tmp_col_max;
@@ -2348,32 +2220,24 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
   x->mv_row_max = tmp_row_max;
 
   if (bestsme < INT_MAX) {
-    int dis;  /* TODO: use dis in distortion calculation later. */
-    cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
-                                 cm->allow_high_precision_mv,
-                                 x->errorperbit,
-                                 &cpi->fn_ptr[bsize],
-                                 cpi->sf.mv.subpel_force_stop,
-                                 cpi->sf.mv.subpel_iters_per_step,
-                                 cond_cost_list(cpi, cost_list),
-                                 x->nmvjointcost, x->mvcost,
-                                 &dis, &x->pred_sse[ref], NULL, 0, 0);
+    int dis; /* TODO: use dis in distortion calculation later. */
+    cpi->find_fractional_mv_step(
+        x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
+        &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
+        cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
+        x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
   }
-  *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv,
-                             x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+  *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+                              x->mvcost, MV_COST_WEIGHT);
 
-  if (cpi->sf.adaptive_motion_search)
-    x->pred_mv[ref] = tmp_mv->as_mv;
+  if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = tmp_mv->as_mv;
 
   if (scaled_ref_frame) {
     int i;
-    for (i = 0; i < MAX_MB_PLANE; i++)
-      xd->plane[i].pre[0] = backup_yv12[i];
+    for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
   }
 }
 
-
-
 static INLINE void restore_dst_buf(MACROBLOCKD *xd,
                                    uint8_t *orig_dst[MAX_MB_PLANE],
                                    int orig_dst_stride[MAX_MB_PLANE]) {
@@ -2391,13 +2255,11 @@ static INLINE void restore_dst_buf(MACROBLOCKD *xd,
 // However, once established that vector may be usable through the nearest and
 // near mv modes to reduce distortion in subsequent blocks and also improve
 // visual quality.
-static int discount_newmv_test(const VP10_COMP *cpi,
-                               int this_mode,
+static int discount_newmv_test(const VP10_COMP *cpi, int this_mode,
                                int_mv this_mv,
                                int_mv (*mode_mv)[MAX_REF_FRAMES],
                                int ref_frame) {
-  return (!cpi->rc.is_src_frame_alt_ref &&
-          (this_mode == NEWMV) &&
+  return (!cpi->rc.is_src_frame_alt_ref && (this_mode == NEWMV) &&
           (this_mv.as_int != 0) &&
           ((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
            (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
@@ -2406,32 +2268,25 @@ static int discount_newmv_test(const VP10_COMP *cpi,
 }
 
 #define LEFT_TOP_MARGIN ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
-#define RIGHT_BOTTOM_MARGIN ((VPX_ENC_BORDER_IN_PIXELS -\
-                                VPX_INTERP_EXTEND) << 3)
+#define RIGHT_BOTTOM_MARGIN \
+  ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
 
 // TODO(jingning): this mv clamping function should be block size dependent.
 static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
   clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN,
-               xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
-               xd->mb_to_top_edge - LEFT_TOP_MARGIN,
-               xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
+           xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
+           xd->mb_to_top_edge - LEFT_TOP_MARGIN,
+           xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
 }
 
-static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
-                                 BLOCK_SIZE bsize,
-                                 int *rate2, int64_t *distortion,
-                                 int *skippable,
-                                 int *rate_y, int *rate_uv,
-                                 int *disable_skip,
-                                 int_mv (*mode_mv)[MAX_REF_FRAMES],
-                                 int mi_row, int mi_col,
-                                 int_mv single_newmv[MAX_REF_FRAMES],
-                                 INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
-                                 int (*single_skippable)[MAX_REF_FRAMES],
-                                 int64_t *psse,
-                                 const int64_t ref_best_rd,
-                                 int64_t *mask_filter,
-                                 int64_t filter_cache[]) {
+static int64_t handle_inter_mode(
+    VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
+    int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
+    int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row,
+    int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
+    INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
+    int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
+    const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
   VP10_COMMON *cm = &cpi->common;
   MACROBLOCKD *xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -2441,7 +2296,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
   int_mv *frame_mv = mode_mv[this_mode];
   int i;
   int refs[2] = { mbmi->ref_frame[0],
-    (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
+                  (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
   int_mv cur_mv[2];
 #if CONFIG_VPX_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
@@ -2457,13 +2312,16 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
   int orig_dst_stride[MAX_MB_PLANE];
   int rs = 0;
   INTERP_FILTER best_filter = SWITCHABLE;
-  uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0};
-  int64_t bsse[MAX_MB_PLANE << 2] = {0};
+  uint8_t skip_txfm[MAX_MB_PLANE << 2] = { 0 };
+  int64_t bsse[MAX_MB_PLANE << 2] = { 0 };
 
   int bsl = mi_width_log2_lookup[bsize];
-  int pred_filter_search = cpi->sf.cb_pred_filter_search ?
-      (((mi_row + mi_col) >> bsl) +
-       get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
+  int pred_filter_search =
+      cpi->sf.cb_pred_filter_search
+          ? (((mi_row + mi_col) >> bsl) +
+             get_chessboard_index(cm->current_video_frame)) &
+                0x1
+          : 0;
 
   int skip_txfm_sb = 0;
   int64_t skip_sse_sb = INT64_MAX;
@@ -2479,13 +2337,10 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
 
   if (pred_filter_search) {
     INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
-    if (xd->up_available)
-      af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
-    if (xd->left_available)
-      lf = xd->mi[-1]->mbmi.interp_filter;
+    if (xd->up_available) af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
+    if (xd->left_available) lf = xd->mi[-1]->mbmi.interp_filter;
 
-    if ((this_mode != NEWMV) || (af == lf))
-      best_filter = af;
+    if ((this_mode != NEWMV) || (af == lf)) best_filter = af;
   }
 
   if (is_comp_pred) {
@@ -2508,26 +2363,24 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
       frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
 
       if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
-        joint_motion_search(cpi, x, bsize, frame_mv,
-                            mi_row, mi_col, single_newmv, &rate_mv);
+        joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
+                            single_newmv, &rate_mv);
       } else {
-        rate_mv  = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+        rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
                                    &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
                                    x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
         rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
-                                   &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
-                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+                                    &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
+                                    x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
       }
       *rate2 += rate_mv;
     } else {
       int_mv tmp_mv;
-      single_motion_search(cpi, x, bsize, mi_row, mi_col,
-                           &tmp_mv, &rate_mv);
-      if (tmp_mv.as_int == INVALID_MV)
-        return INT64_MAX;
+      single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
+      if (tmp_mv.as_int == INVALID_MV) return INT64_MAX;
 
-      frame_mv[refs[0]].as_int =
-          xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+      frame_mv[refs[0]].as_int = xd->mi[0]->bmi[0].as_mv[0].as_int =
+          tmp_mv.as_int;
       single_newmv[refs[0]].as_int = tmp_mv.as_int;
 
       // Estimate the rate implications of a new mv but discount this
@@ -2545,11 +2398,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
   for (i = 0; i < is_comp_pred + 1; ++i) {
     cur_mv[i] = frame_mv[refs[i]];
     // Clip "next_nearest" so that it does not extend to far out of image
-    if (this_mode != NEWMV)
-      clamp_mv2(&cur_mv[i].as_mv, xd);
+    if (this_mode != NEWMV) clamp_mv2(&cur_mv[i].as_mv, xd);
 
-    if (mv_check_bounds(x, &cur_mv[i].as_mv))
-      return INT64_MAX;
+    if (mv_check_bounds(x, &cur_mv[i].as_mv)) return INT64_MAX;
     mbmi->mv[i].as_int = cur_mv[i].as_int;
   }
 
@@ -2570,12 +2421,11 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
   //
   // Under some circumstances we discount the cost of new mv mode to encourage
   // initiation of a motion field.
-  if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]],
-                          mode_mv, refs[0])) {
-    *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode,
-                                 mbmi_ext->mode_context[refs[0]]),
-                     cost_mv_ref(cpi, NEARESTMV,
-                                 mbmi_ext->mode_context[refs[0]]));
+  if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
+                          refs[0])) {
+    *rate2 +=
+        VPXMIN(cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]),
+               cost_mv_ref(cpi, NEARESTMV, mbmi_ext->mode_context[refs[0]]));
   } else {
     *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
   }
@@ -2587,13 +2437,11 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
   pred_exists = 0;
   // Are all MVs integer pel for Y and UV
   intpel_mv = !mv_has_subpel(&mbmi->mv[0].as_mv);
-  if (is_comp_pred)
-    intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv);
+  if (is_comp_pred) intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv);
 
   // Search for best switchable filter by checking the variance of
   // pred error irrespective of whether the filter will be used
-  for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
-    filter_cache[i] = INT64_MAX;
+  for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
 
   if (cm->interp_filter != BILINEAR) {
     if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
@@ -2618,8 +2466,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
           filter_cache[i] = rd;
           filter_cache[SWITCHABLE_FILTERS] =
               VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
-          if (cm->interp_filter == SWITCHABLE)
-            rd += rs_rd;
+          if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
           *mask_filter = VPXMAX(*mask_filter, rd);
         } else {
           int rate_sum = 0;
@@ -2631,8 +2478,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
             continue;
           }
 
-          if ((cm->interp_filter == SWITCHABLE &&
-               (!i || best_needs_copy)) ||
+          if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) ||
               (cm->interp_filter != SWITCHABLE &&
                (cm->interp_filter == mbmi->interp_filter ||
                 (i == 0 && intpel_mv)))) {
@@ -2644,15 +2490,14 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
             }
           }
           vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
-          model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
-                          &tmp_skip_sb, &tmp_skip_sse);
+          model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb,
+                          &tmp_skip_sse);
 
           rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
           filter_cache[i] = rd;
           filter_cache[SWITCHABLE_FILTERS] =
               VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
-          if (cm->interp_filter == SWITCHABLE)
-            rd += rs_rd;
+          if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
           *mask_filter = VPXMAX(*mask_filter, rd);
 
           if (i == 0 && intpel_mv) {
@@ -2692,8 +2537,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     }
   }
   // Set the appropriate filter
-  mbmi->interp_filter = cm->interp_filter != SWITCHABLE ?
-      cm->interp_filter : best_filter;
+  mbmi->interp_filter =
+      cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
   rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
 
   if (pred_exists) {
@@ -2712,15 +2557,14 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     // switchable list (ex. bilinear) is indicated at the frame level, or
     // skip condition holds.
     vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
-    model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
-                    &skip_txfm_sb, &skip_sse_sb);
+    model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
+                    &skip_sse_sb);
     rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
     memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
     memcpy(bsse, x->bsse, sizeof(bsse));
   }
 
-  if (!is_comp_pred)
-    single_filter[this_mode][refs[0]] = mbmi->interp_filter;
+  if (!is_comp_pred) single_filter[this_mode][refs[0]] = mbmi->interp_filter;
 
   if (cpi->sf.adaptive_mode_search)
     if (is_comp_pred)
@@ -2737,8 +2581,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     }
   }
 
-  if (cm->interp_filter == SWITCHABLE)
-    *rate2 += rs;
+  if (cm->interp_filter == SWITCHABLE) *rate2 += rs;
 
   memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
   memcpy(x->bsse, bsse, sizeof(bsse));
@@ -2750,8 +2593,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
 
     // Y cost and distortion
     vp10_subtract_plane(x, bsize, 0);
-    super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
-                    bsize, ref_best_rd);
+    super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
+                    ref_best_rd);
 
     if (*rate_y == INT_MAX) {
       *rate2 = INT_MAX;
@@ -2788,16 +2631,15 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     *distortion = skip_sse_sb;
   }
 
-  if (!is_comp_pred)
-    single_skippable[this_mode][refs[0]] = *skippable;
+  if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
 
   restore_dst_buf(xd, orig_dst, orig_dst_stride);
   return 0;  // The rate-distortion cost will be re-calculated by caller.
 }
 
-void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x,
-                               RD_COST *rd_cost, BLOCK_SIZE bsize,
-                               PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
+void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
+                                BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                                int64_t best_rd) {
   VP10_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblockd_plane *const pd = xd->plane;
@@ -2810,9 +2652,8 @@ void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x,
   xd->mi[0]->mbmi.ref_frame[1] = NONE;
 
   if (bsize >= BLOCK_8X8) {
-    if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
-                               &dist_y, &y_skip, bsize,
-                               best_rd) >= best_rd) {
+    if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y,
+                               &y_skip, bsize, best_rd) >= best_rd) {
       rd_cost->rate = INT_MAX;
       return;
     }
@@ -2824,20 +2665,18 @@ void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x,
       return;
     }
   }
-  max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0]->mbmi.tx_size, bsize,
-                                       pd[1].subsampling_x,
-                                       pd[1].subsampling_y);
-  rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly,
-                          &dist_uv, &uv_skip, VPXMAX(BLOCK_8X8, bsize),
-                          max_uv_tx_size);
+  max_uv_tx_size = get_uv_tx_size_impl(
+      xd->mi[0]->mbmi.tx_size, bsize, pd[1].subsampling_x, pd[1].subsampling_y);
+  rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, &dist_uv,
+                          &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
 
   if (y_skip && uv_skip) {
     rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
                     vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
     rd_cost->dist = dist_y + dist_uv;
   } else {
-    rd_cost->rate = rate_y + rate_uv +
-                      vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+    rd_cost->rate =
+        rate_y + rate_uv + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
     rd_cost->dist = dist_y + dist_uv;
   }
 
@@ -2851,10 +2690,8 @@ void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x,
 #define LOW_VAR_THRESH 16
 #define VLOW_ADJ_MAX 25
 #define VHIGH_ADJ_MAX 8
-static void rd_variance_adjustment(VP10_COMP *cpi,
-                                   MACROBLOCK *x,
-                                   BLOCK_SIZE bsize,
-                                   int64_t *this_rd,
+static void rd_variance_adjustment(VP10_COMP *cpi, MACROBLOCK *x,
+                                   BLOCK_SIZE bsize, int64_t *this_rd,
                                    MV_REFERENCE_FRAME ref_frame,
                                    unsigned int source_variance) {
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -2863,30 +2700,29 @@ static void rd_variance_adjustment(VP10_COMP *cpi,
   int64_t var_error = 0;
   int64_t var_factor = 0;
 
-  if (*this_rd == INT64_MAX)
-    return;
+  if (*this_rd == INT64_MAX) return;
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    recon_variance =
-      vp10_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize, xd->bd);
+    recon_variance = vp10_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
+                                                         bsize, xd->bd);
   } else {
     recon_variance =
-      vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+        vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
   }
 #else
   recon_variance =
-    vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+      vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
   if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
     absvar_diff = (source_variance > recon_variance)
-      ? (source_variance - recon_variance)
-      : (recon_variance - source_variance);
+                      ? (source_variance - recon_variance)
+                      : (recon_variance - source_variance);
 
     var_error = ((int64_t)200 * source_variance * recon_variance) /
-      (((int64_t)source_variance * source_variance) +
-       ((int64_t)recon_variance * recon_variance));
+                (((int64_t)source_variance * source_variance) +
+                 ((int64_t)recon_variance * recon_variance));
     var_error = 100 - var_error;
   }
 
@@ -2896,8 +2732,8 @@ static void rd_variance_adjustment(VP10_COMP *cpi,
   if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
       (source_variance > recon_variance)) {
     var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
-  // A second possible case of interest is where the source variance
-  // is very low and we wish to discourage false texture or motion trails.
+    // A second possible case of interest is where the source variance
+    // is very low and we wish to discourage false texture or motion trails.
   } else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
              (recon_variance > source_variance)) {
     var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
@@ -2905,12 +2741,11 @@ static void rd_variance_adjustment(VP10_COMP *cpi,
   *this_rd += (*this_rd * var_factor) / 100;
 }
 
-
 // Do we have an internal image edge (e.g. formatting bars).
 int vp10_internal_image_edge(VP10_COMP *cpi) {
   return (cpi->oxcf.pass == 2) &&
-    ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
-    (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
+         ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
+          (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
 }
 
 // Checks to see if a super block is on a horizontal image edge.
@@ -2970,16 +2805,13 @@ int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) {
 // Checks to see if a super block is at the edge of the active image.
 // In most cases this is the "real" edge unless there are formatting
 // bars embedded in the stream.
-int vp10_active_edge_sb(VP10_COMP *cpi,
-                       int mi_row, int mi_col) {
+int vp10_active_edge_sb(VP10_COMP *cpi, int mi_row, int mi_col) {
   return vp10_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
          vp10_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
 }
 
-void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
-                                TileDataEnc *tile_data,
-                                MACROBLOCK *x,
-                                int mi_row, int mi_col,
+void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
+                                MACROBLOCK *x, int mi_row, int mi_col,
                                 RD_COST *rd_cost, BLOCK_SIZE bsize,
                                 PICK_MODE_CONTEXT *ctx,
                                 int64_t best_rd_so_far) {
@@ -3034,20 +2866,16 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
 
   vp10_zero(best_mbmode);
 
-  for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
-    filter_cache[i] = INT64_MAX;
+  for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
 
   estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
                            &comp_mode_p);
 
-  for (i = 0; i < REFERENCE_MODES; ++i)
-    best_pred_rd[i] = INT64_MAX;
+  for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
     best_filter_rd[i] = INT64_MAX;
-  for (i = 0; i < TX_SIZES; i++)
-    rate_uv_intra[i] = INT_MAX;
-  for (i = 0; i < MAX_REF_FRAMES; ++i)
-    x->pred_sse[i] = INT_MAX;
+  for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX;
+  for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
   for (i = 0; i < MB_MODE_COUNT; ++i) {
     for (k = 0; k < MAX_REF_FRAMES; ++k) {
       single_inter_filter[i][k] = SWITCHABLE;
@@ -3140,12 +2968,11 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
   mode_skip_mask[INTRA_FRAME] |=
       ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
 
-  for (i = 0; i <= LAST_NEW_MV_INDEX; ++i)
-    mode_threshold[i] = 0;
+  for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) mode_threshold[i] = 0;
   for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
     mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
 
-  midx =  sf->schedule_mode_search ? mode_skip_start : 0;
+  midx = sf->schedule_mode_search ? mode_skip_start : 0;
   while (midx > 4) {
     uint8_t end_pos = 0;
     for (i = 5; i < midx; ++i) {
@@ -3180,8 +3007,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
     // skip mask to look at a subset of the remaining modes.
     if (midx == mode_skip_start && best_mode_index >= 0) {
       switch (best_mbmode.ref_frame[0]) {
-        case INTRA_FRAME:
-          break;
+        case INTRA_FRAME: break;
         case LAST_FRAME:
           ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
           ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
@@ -3190,13 +3016,9 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
           ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
           ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
           break;
-        case ALTREF_FRAME:
-          ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK;
-          break;
+        case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; break;
         case NONE:
-        case MAX_REF_FRAMES:
-          assert(0 && "Invalid Reference frame");
-          break;
+        case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
       }
     }
 
@@ -3204,29 +3026,24 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
         (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
       continue;
 
-    if (mode_skip_mask[ref_frame] & (1 << this_mode))
-      continue;
+    if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
 
     // Test best rd so far against threshold for trying this mode.
     if (best_mode_skippable && sf->schedule_mode_search)
       mode_threshold[mode_index] <<= 1;
 
-    if (best_rd < mode_threshold[mode_index])
-      continue;
+    if (best_rd < mode_threshold[mode_index]) continue;
 
     comp_pred = second_ref_frame > INTRA_FRAME;
     if (comp_pred) {
-      if (!cpi->allow_comp_inter_inter)
-        continue;
+      if (!cpi->allow_comp_inter_inter) continue;
 
       // Skip compound inter modes if ARF is not available.
-      if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
-        continue;
+      if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
 
       // Do not allow compound prediction if the segment level reference frame
       // feature is in use as in this case there can only be one reference.
-      if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
-        continue;
+      if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
 
       if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
           best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
@@ -3255,19 +3072,17 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
         // one of the neighboring directional modes
         if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
             (this_mode >= D45_PRED && this_mode <= TM_PRED)) {
-          if (best_mode_index >= 0 &&
-              best_mbmode.ref_frame[0] > INTRA_FRAME)
+          if (best_mode_index >= 0 && best_mbmode.ref_frame[0] > INTRA_FRAME)
             continue;
         }
         if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
-          if (conditional_skipintra(this_mode, best_intra_mode))
-              continue;
+          if (conditional_skipintra(this_mode, best_intra_mode)) continue;
         }
       }
     } else {
-      const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame};
-      if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
-                              this_mode, ref_frames))
+      const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame };
+      if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode,
+                              ref_frames))
         continue;
     }
 
@@ -3277,8 +3092,8 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
     mbmi->ref_frame[1] = second_ref_frame;
     // Evaluate all sub-pel filters irrespective of whether we can use
     // them for this frame.
-    mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
-                                                          : cm->interp_filter;
+    mbmi->interp_filter =
+        cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
     mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
 
     x->skip = 0;
@@ -3287,25 +3102,23 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
     // Select prediction reference frames.
     for (i = 0; i < MAX_MB_PLANE; i++) {
       xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
-      if (comp_pred)
-        xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
+      if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
     }
 
     if (ref_frame == INTRA_FRAME) {
       TX_SIZE uv_tx;
       struct macroblockd_plane *const pd = &xd->plane[1];
       memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
-      super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
-                      NULL, bsize, best_rd);
-      if (rate_y == INT_MAX)
-        continue;
+      super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
+                      best_rd);
+      if (rate_y == INT_MAX) continue;
 
       uv_tx = get_uv_tx_size_impl(mbmi->tx_size, bsize, pd->subsampling_x,
                                   pd->subsampling_y);
       if (rate_uv_intra[uv_tx] == INT_MAX) {
-        choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx,
-                             &rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx],
-                             &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]);
+        choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
+                             &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
+                             &skip_uv[uv_tx], &mode_uv[uv_tx]);
       }
 
       rate_uv = rate_uv_tokenonly[uv_tx];
@@ -3318,21 +3131,16 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
         rate2 += intra_cost_penalty;
       distortion2 = distortion_y + distortion_uv;
     } else {
-      this_rd = handle_inter_mode(cpi, x, bsize,
-                                  &rate2, &distortion2, &skippable,
-                                  &rate_y, &rate_uv,
-                                  &disable_skip, frame_mv,
-                                  mi_row, mi_col,
-                                  single_newmv, single_inter_filter,
-                                  single_skippable, &total_sse, best_rd,
-                                  &mask_filter, filter_cache);
-      if (this_rd == INT64_MAX)
-        continue;
+      this_rd = handle_inter_mode(
+          cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv,
+          &disable_skip, frame_mv, mi_row, mi_col, single_newmv,
+          single_inter_filter, single_skippable, &total_sse, best_rd,
+          &mask_filter, filter_cache);
+      if (this_rd == INT64_MAX) continue;
 
       compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
 
-      if (cm->reference_mode == REFERENCE_MODE_SELECT)
-        rate2 += compmode_cost;
+      if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
     }
 
     // Estimate the reference frame signaling cost and add it
@@ -3374,11 +3182,11 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
 
     // Apply an adjustment to the rd value based on the similarity of the
     // source variance and reconstructed variance.
-    rd_variance_adjustment(cpi, x, bsize, &this_rd,
-                           ref_frame, x->source_variance);
+    rd_variance_adjustment(cpi, x, bsize, &this_rd, ref_frame,
+                           x->source_variance);
 
     if (ref_frame == INTRA_FRAME) {
-    // Keep record of best intra rd
+      // Keep record of best intra rd
       if (this_rd < best_intra_rd) {
         best_intra_rd = this_rd;
         best_intra_mode = mbmi->mode;
@@ -3415,8 +3223,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
         best_skip2 = this_skip2;
         best_mode_skippable = skippable;
 
-        if (!x->select_tx_size)
-          swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
+        if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
         memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
                sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
 
@@ -3436,8 +3243,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
             const int var_adjust = (x->source_variance < 16);
             scale -= var_adjust;
           }
-          if (ref_frame > INTRA_FRAME &&
-              distortion2 * scale < qstep * qstep) {
+          if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
             early_term = 1;
           }
         }
@@ -3471,8 +3277,9 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
 
       /* keep record of best filter type */
       if (!mode_excluded && cm->interp_filter != BILINEAR) {
-        int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
-                              SWITCHABLE_FILTERS : cm->interp_filter];
+        int64_t ref =
+            filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
+                                                         : cm->interp_filter];
 
         for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
           int64_t adj_rd;
@@ -3493,11 +3300,9 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
       }
     }
 
-    if (early_term)
-      break;
+    if (early_term) break;
 
-    if (x->skip && !comp_pred)
-      break;
+    if (x->skip && !comp_pred) break;
   }
 
   // The inter modes' rate costs are not calculated precisely in some cases.
@@ -3505,20 +3310,23 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
   // ZEROMV. Here, checks are added for those cases, and the mode decisions
   // are corrected.
   if (best_mbmode.mode == NEWMV) {
-    const MV_REFERENCE_FRAME refs[2] = {best_mbmode.ref_frame[0],
-        best_mbmode.ref_frame[1]};
+    const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
+                                         best_mbmode.ref_frame[1] };
     int comp_pred_mode = refs[1] > INTRA_FRAME;
 
     if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
-        ((comp_pred_mode && frame_mv[NEARESTMV][refs[1]].as_int ==
-            best_mbmode.mv[1].as_int) || !comp_pred_mode))
+        ((comp_pred_mode &&
+          frame_mv[NEARESTMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
+         !comp_pred_mode))
       best_mbmode.mode = NEARESTMV;
     else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
-        ((comp_pred_mode && frame_mv[NEARMV][refs[1]].as_int ==
-            best_mbmode.mv[1].as_int) || !comp_pred_mode))
+             ((comp_pred_mode &&
+               frame_mv[NEARMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
+              !comp_pred_mode))
       best_mbmode.mode = NEARMV;
     else if (best_mbmode.mv[0].as_int == 0 &&
-        ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) || !comp_pred_mode))
+             ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) ||
+              !comp_pred_mode))
       best_mbmode.mode = ZEROMV;
   }
 
@@ -3537,8 +3345,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
       uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
       rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
                               &rate_uv_tokenonly[uv_tx_size],
-                              &dist_uv[uv_tx_size],
-                              &skip_uv[uv_tx_size],
+                              &dist_uv[uv_tx_size], &skip_uv[uv_tx_size],
                               bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
                               uv_tx_size);
     }
@@ -3550,7 +3357,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
 
   if (!cpi->rc.is_src_frame_alt_ref)
     vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
-                              sf->adaptive_rd_thresh, bsize, best_mode_index);
+                               sf->adaptive_rd_thresh, bsize, best_mode_index);
 
   // macroblock modes
   *mbmi = best_mbmode;
@@ -3583,8 +3390,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
   if (!x->skip && !x->select_tx_size) {
     int has_high_freq_coeff = 0;
     int plane;
-    int max_plane = is_inter_block(&xd->mi[0]->mbmi)
-                        ? MAX_MB_PLANE : 1;
+    int max_plane = is_inter_block(&xd->mi[0]->mbmi) ? MAX_MB_PLANE : 1;
     for (plane = 0; plane < max_plane; ++plane) {
       x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
       has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
@@ -3604,13 +3410,11 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
                        best_filter_diff, best_mode_skippable);
 }
 
-void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi,
-                                        TileDataEnc *tile_data,
-                                        MACROBLOCK *x,
-                                        RD_COST *rd_cost,
-                                        BLOCK_SIZE bsize,
-                                        PICK_MODE_CONTEXT *ctx,
-                                        int64_t best_rd_so_far) {
+void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
+                                         MACROBLOCK *x, RD_COST *rd_cost,
+                                         BLOCK_SIZE bsize,
+                                         PICK_MODE_CONTEXT *ctx,
+                                         int64_t best_rd_so_far) {
   VP10_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
@@ -3629,10 +3433,8 @@ void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi,
   estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
                            &comp_mode_p);
 
-  for (i = 0; i < MAX_REF_FRAMES; ++i)
-    x->pred_sse[i] = INT_MAX;
-  for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
-    x->pred_mv_sad[i] = INT_MAX;
+  for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
+  for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) x->pred_mv_sad[i] = INT_MAX;
 
   rd_cost->rate = INT_MAX;
 
@@ -3691,25 +3493,20 @@ void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi,
          (cm->interp_filter == mbmi->interp_filter));
 
   vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
-                            cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
+                             cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
 
   vp10_zero(best_pred_diff);
   vp10_zero(best_filter_diff);
 
-  if (!x->select_tx_size)
-    swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
-  store_coding_context(x, ctx, THR_ZEROMV,
-                       best_pred_diff, best_filter_diff, 0);
+  if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
+  store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0);
 }
 
-void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
-                                   TileDataEnc *tile_data,
-                                   MACROBLOCK *x,
-                                   int mi_row, int mi_col,
-                                   RD_COST *rd_cost,
-                                   BLOCK_SIZE bsize,
-                                   PICK_MODE_CONTEXT *ctx,
-                                   int64_t best_rd_so_far) {
+void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
+                                    MACROBLOCK *x, int mi_row, int mi_col,
+                                    RD_COST *rd_cost, BLOCK_SIZE bsize,
+                                    PICK_MODE_CONTEXT *ctx,
+                                    int64_t best_rd_so_far) {
   VP10_COMMON *const cm = &cpi->common;
   RD_OPT *const rd_opt = &cpi->rd;
   SPEED_FEATURES *const sf = &cpi->sf;
@@ -3739,7 +3536,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
   int skip_uv;
   PREDICTION_MODE mode_uv = DC_PRED;
   const int intra_cost_penalty = vp10_get_intra_cost_penalty(
-    cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
+      cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
   int_mv seg_mvs[4][MAX_REF_FRAMES];
   b_mode_info best_bmodes[4];
   int best_skip2 = 0;
@@ -3747,25 +3544,22 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
   int64_t mask_filter = 0;
   int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
   int internal_active_edge =
-    vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
+      vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
 
   memset(x->zcoeff_blk[TX_4X4], 0, 4);
   vp10_zero(best_mbmode);
 
-  for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
-    filter_cache[i] = INT64_MAX;
+  for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
 
   for (i = 0; i < 4; i++) {
     int j;
-    for (j = 0; j < MAX_REF_FRAMES; j++)
-      seg_mvs[i][j].as_int = INVALID_MV;
+    for (j = 0; j < MAX_REF_FRAMES; j++) seg_mvs[i][j].as_int = INVALID_MV;
   }
 
   estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
                            &comp_mode_p);
 
-  for (i = 0; i < REFERENCE_MODES; ++i)
-    best_pred_rd[i] = INT64_MAX;
+  for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
     best_filter_rd[i] = INT64_MAX;
   rate_uv_intra = INT_MAX;
@@ -3775,8 +3569,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
     if (cpi->ref_frame_flags & flag_list[ref_frame]) {
       setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
-                         frame_mv[NEARESTMV], frame_mv[NEARMV],
-                         yv12_mb);
+                         frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
     } else {
       ref_frame_skip_mask[0] |= (1 << ref_frame);
       ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
@@ -3806,8 +3599,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
     if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
       if (ref_index == 3) {
         switch (best_mbmode.ref_frame[0]) {
-          case INTRA_FRAME:
-            break;
+          case INTRA_FRAME: break;
           case LAST_FRAME:
             ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
             ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
@@ -3820,9 +3612,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
             ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
             break;
           case NONE:
-          case MAX_REF_FRAMES:
-            assert(0 && "Invalid Reference frame");
-            break;
+          case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
         }
       }
     }
@@ -3840,14 +3630,11 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
 
     comp_pred = second_ref_frame > INTRA_FRAME;
     if (comp_pred) {
-      if (!cpi->allow_comp_inter_inter)
-        continue;
-      if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
-        continue;
+      if (!cpi->allow_comp_inter_inter) continue;
+      if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
       // Do not allow compound prediction if the segment level reference frame
       // feature is in use as in this case there can only be one reference.
-      if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
-        continue;
+      if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
 
       if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
           best_mbmode.ref_frame[0] == INTRA_FRAME)
@@ -3874,9 +3661,9 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
     if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
         get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
       continue;
-    // Disable this drop out case if the ref frame
-    // segment level feature is enabled for this segment. This is to
-    // prevent the possibility that we end up unable to pick any mode.
+      // Disable this drop out case if the ref frame
+      // segment level feature is enabled for this segment. This is to
+      // prevent the possibility that we end up unable to pick any mode.
     } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
       // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
       // unless ARNR filtering is enabled in which case we want
@@ -3892,33 +3679,29 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
     mbmi->ref_frame[1] = second_ref_frame;
     // Evaluate all sub-pel filters irrespective of whether we can use
     // them for this frame.
-    mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
-                                                          : cm->interp_filter;
+    mbmi->interp_filter =
+        cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
     x->skip = 0;
     set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
 
     // Select prediction reference frames.
     for (i = 0; i < MAX_MB_PLANE; i++) {
       xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
-      if (comp_pred)
-        xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
+      if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
     }
 
     if (ref_frame == INTRA_FRAME) {
       int rate;
-      if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y,
-                                       &distortion_y, best_rd) >= best_rd)
+      if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, &distortion_y,
+                                       best_rd) >= best_rd)
         continue;
       rate2 += rate;
       rate2 += intra_cost_penalty;
       distortion2 += distortion_y;
 
       if (rate_uv_intra == INT_MAX) {
-        choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4,
-                             &rate_uv_intra,
-                             &rate_uv_tokenonly,
-                             &dist_uv, &skip_uv,
-                             &mode_uv);
+        choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, &rate_uv_intra,
+                             &rate_uv_tokenonly, &dist_uv, &skip_uv, &mode_uv);
       }
       rate2 += rate_uv_intra;
       rate_uv = rate_uv_tokenonly;
@@ -3934,19 +3717,20 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
       int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
       int tmp_best_skippable = 0;
       int switchable_filter_index;
-      int_mv *second_ref = comp_pred ?
-                             &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
+      int_mv *second_ref =
+          comp_pred ? &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
       b_mode_info tmp_best_bmodes[16];
       MB_MODE_INFO tmp_best_mbmode;
       BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
       int pred_exists = 0;
       int uv_skippable;
 
-      this_rd_thresh = (ref_frame == LAST_FRAME) ?
-          rd_opt->threshes[segment_id][bsize][THR_LAST] :
-          rd_opt->threshes[segment_id][bsize][THR_ALTR];
-      this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
-      rd_opt->threshes[segment_id][bsize][THR_GOLD] : this_rd_thresh;
+      this_rd_thresh = (ref_frame == LAST_FRAME)
+                           ? rd_opt->threshes[segment_id][bsize][THR_LAST]
+                           : rd_opt->threshes[segment_id][bsize][THR_ALTR];
+      this_rd_thresh = (ref_frame == GOLDEN_FRAME)
+                           ? rd_opt->threshes[segment_id][bsize][THR_GOLD]
+                           : this_rd_thresh;
       for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
         filter_cache[i] = INT64_MAX;
 
@@ -3958,8 +3742,9 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
                    ctx->pred_interp_filter < SWITCHABLE) {
           tmp_best_filter = ctx->pred_interp_filter;
         } else if (sf->adaptive_pred_interp_filter == 2) {
-          tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE ?
-                              ctx->pred_interp_filter : 0;
+          tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE
+                                ? ctx->pred_interp_filter
+                                : 0;
         } else {
           for (switchable_filter_index = 0;
                switchable_filter_index < SWITCHABLE_FILTERS;
@@ -3968,24 +3753,19 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
             int64_t rs_rd;
             MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
             mbmi->interp_filter = switchable_filter_index;
-            tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
-                                              &mbmi_ext->ref_mvs[ref_frame][0],
-                                              second_ref, best_yrd, &rate,
-                                              &rate_y, &distortion,
-                                              &skippable, &total_sse,
-                                              (int) this_rd_thresh, seg_mvs,
-                                              bsi, switchable_filter_index,
-                                              mi_row, mi_col);
-
-            if (tmp_rd == INT64_MAX)
-              continue;
+            tmp_rd = rd_pick_best_sub8x8_mode(
+                cpi, x, &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
+                &rate, &rate_y, &distortion, &skippable, &total_sse,
+                (int)this_rd_thresh, seg_mvs, bsi, switchable_filter_index,
+                mi_row, mi_col);
+
+            if (tmp_rd == INT64_MAX) continue;
             rs = vp10_get_switchable_rate(cpi, xd);
             rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
             filter_cache[switchable_filter_index] = tmp_rd;
             filter_cache[SWITCHABLE_FILTERS] =
                 VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
-            if (cm->interp_filter == SWITCHABLE)
-              tmp_rd += rs_rd;
+            if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
 
             mask_filter = VPXMAX(mask_filter, tmp_rd);
 
@@ -4009,8 +3789,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
                 x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
               }
               pred_exists = 1;
-              if (switchable_filter_index == 0 &&
-                  sf->use_rd_breakout &&
+              if (switchable_filter_index == 0 && sf->use_rd_breakout &&
                   best_rd < INT64_MAX) {
                 if (tmp_best_rdu / 2 > best_rd) {
                   // skip searching the other filters if the first is
@@ -4025,22 +3804,19 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
         }
       }
 
-      if (tmp_best_rdu == INT64_MAX && pred_exists)
-        continue;
+      if (tmp_best_rdu == INT64_MAX && pred_exists) continue;
 
-      mbmi->interp_filter = (cm->interp_filter == SWITCHABLE ?
-                             tmp_best_filter : cm->interp_filter);
+      mbmi->interp_filter =
+          (cm->interp_filter == SWITCHABLE ? tmp_best_filter
+                                           : cm->interp_filter);
       if (!pred_exists) {
         // Handles the special case when a filter that is not in the
         // switchable list (bilinear, 6-tap) is indicated at the frame level
-        tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
-                                          &x->mbmi_ext->ref_mvs[ref_frame][0],
-                                          second_ref, best_yrd, &rate, &rate_y,
-                                          &distortion, &skippable, &total_sse,
-                                          (int) this_rd_thresh, seg_mvs, bsi, 0,
-                                          mi_row, mi_col);
-        if (tmp_rd == INT64_MAX)
-          continue;
+        tmp_rd = rd_pick_best_sub8x8_mode(
+            cpi, x, &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
+            &rate, &rate_y, &distortion, &skippable, &total_sse,
+            (int)this_rd_thresh, seg_mvs, bsi, 0, mi_row, mi_col);
+        if (tmp_rd == INT64_MAX) continue;
       } else {
         total_sse = tmp_best_sse;
         rate = tmp_best_rate;
@@ -4048,8 +3824,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
         distortion = tmp_best_distortion;
         skippable = tmp_best_skippable;
         *mbmi = tmp_best_mbmode;
-        for (i = 0; i < 4; i++)
-          xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
+        for (i = 0; i < 4; i++) xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
       }
 
       rate2 += rate;
@@ -4064,15 +3839,14 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
 
       compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
 
-      tmp_best_rdu = best_rd -
-          VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
-                 RDCOST(x->rdmult, x->rddiv, 0, total_sse));
+      tmp_best_rdu =
+          best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
+                           RDCOST(x->rdmult, x->rddiv, 0, total_sse));
 
       if (tmp_best_rdu > 0) {
         // If even the 'Y' rd value of split is higher than best so far
         // then dont bother looking at UV
-        vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
-                                        BLOCK_8X8);
+        vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
         memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
         if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
                               &uv_sse, BLOCK_8X8, tmp_best_rdu))
@@ -4085,8 +3859,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
       }
     }
 
-    if (cm->reference_mode == REFERENCE_MODE_SELECT)
-      rate2 += compmode_cost;
+    if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
 
     // Estimate the reference frame signaling cost and add it
     // to the rolling cost variable.
@@ -4148,17 +3921,15 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
         rd_cost->dist = distortion2;
         rd_cost->rdcost = this_rd;
         best_rd = this_rd;
-        best_yrd = best_rd -
-                   RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
+        best_yrd =
+            best_rd - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
         best_mbmode = *mbmi;
         best_skip2 = this_skip2;
-        if (!x->select_tx_size)
-          swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
+        if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
         memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
                sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
 
-        for (i = 0; i < 4; i++)
-          best_bmodes[i] = xd->mi[0]->bmi[i];
+        for (i = 0; i < 4; i++) best_bmodes[i] = xd->mi[0]->bmi[i];
 
         // TODO(debargha): enhance this test with a better distortion prediction
         // based on qp, activity mask and history
@@ -4176,8 +3947,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
             const int var_adjust = (x->source_variance < 16);
             scale -= var_adjust;
           }
-          if (ref_frame > INTRA_FRAME &&
-              distortion2 * scale < qstep * qstep) {
+          if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
             early_term = 1;
           }
         }
@@ -4211,8 +3981,9 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
     /* keep record of best filter type */
     if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
         cm->interp_filter != BILINEAR) {
-      int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
-                              SWITCHABLE_FILTERS : cm->interp_filter];
+      int64_t ref =
+          filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
+                                                       : cm->interp_filter];
       int64_t adj_rd;
       for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
         if (ref == INT64_MAX)
@@ -4231,11 +4002,9 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
       }
     }
 
-    if (early_term)
-      break;
+    if (early_term) break;
 
-    if (x->skip && !comp_pred)
-      break;
+    if (x->skip && !comp_pred) break;
   }
 
   if (best_rd >= best_rd_so_far) {
@@ -4249,11 +4018,8 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
     // Do Intra UV best rd mode selection if best mode choice above was intra.
     if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
       *mbmi = best_mbmode;
-      rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra,
-                              &rate_uv_tokenonly,
-                              &dist_uv,
-                              &skip_uv,
-                              BLOCK_8X8, TX_4X4);
+      rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, &rate_uv_tokenonly,
+                              &dist_uv, &skip_uv, BLOCK_8X8, TX_4X4);
     }
   }
 
@@ -4269,14 +4035,13 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
          !is_inter_block(&best_mbmode));
 
   vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
-                            sf->adaptive_rd_thresh, bsize, best_ref_index);
+                             sf->adaptive_rd_thresh, bsize, best_ref_index);
 
   // macroblock modes
   *mbmi = best_mbmode;
   x->skip |= best_skip2;
   if (!is_inter_block(&best_mbmode)) {
-    for (i = 0; i < 4; i++)
-      xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
+    for (i = 0; i < 4; i++) xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
   } else {
     for (i = 0; i < 4; ++i)
       memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
@@ -4305,6 +4070,6 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
     vp10_zero(best_filter_diff);
   }
 
-  store_coding_context(x, ctx, best_ref_index,
-                       best_pred_diff, best_filter_diff, 0);
+  store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff,
+                       0);
 }
diff --git a/vp10/encoder/rdopt.h b/vp10/encoder/rdopt.h
index 6526d2bba9390b9f0ee079d29b24cd6d670648fd..b887b8a64718d3ed6a527392c8b870b6c203b324 100644
--- a/vp10/encoder/rdopt.h
+++ b/vp10/encoder/rdopt.h
@@ -26,33 +26,28 @@ struct macroblock;
 struct RD_COST;
 
 void vp10_rd_pick_intra_mode_sb(struct VP10_COMP *cpi, struct macroblock *x,
-                               struct RD_COST *rd_cost, BLOCK_SIZE bsize,
-                               PICK_MODE_CONTEXT *ctx, int64_t best_rd);
+                                struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+                                PICK_MODE_CONTEXT *ctx, int64_t best_rd);
 
 unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
-                                           const struct buf_2d *ref,
-                                           BLOCK_SIZE bs);
+                                            const struct buf_2d *ref,
+                                            BLOCK_SIZE bs);
 #if CONFIG_VPX_HIGHBITDEPTH
 unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
-                                                const struct buf_2d *ref,
-                                                BLOCK_SIZE bs, int bd);
+                                                 const struct buf_2d *ref,
+                                                 BLOCK_SIZE bs, int bd);
 #endif
 
 void vp10_rd_pick_inter_mode_sb(struct VP10_COMP *cpi,
-                               struct TileDataEnc *tile_data,
-                               struct macroblock *x,
-                               int mi_row, int mi_col,
-                               struct RD_COST *rd_cost,
-                               BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
-                               int64_t best_rd_so_far);
+                                struct TileDataEnc *tile_data,
+                                struct macroblock *x, int mi_row, int mi_col,
+                                struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+                                PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far);
 
-void vp10_rd_pick_inter_mode_sb_seg_skip(struct VP10_COMP *cpi,
-                                        struct TileDataEnc *tile_data,
-                                        struct macroblock *x,
-                                        struct RD_COST *rd_cost,
-                                        BLOCK_SIZE bsize,
-                                        PICK_MODE_CONTEXT *ctx,
-                                        int64_t best_rd_so_far);
+void vp10_rd_pick_inter_mode_sb_seg_skip(
+    struct VP10_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
+    struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+    int64_t best_rd_so_far);
 
 int vp10_internal_image_edge(struct VP10_COMP *cpi);
 int vp10_active_h_edge(struct VP10_COMP *cpi, int mi_row, int mi_step);
@@ -60,12 +55,11 @@ int vp10_active_v_edge(struct VP10_COMP *cpi, int mi_col, int mi_step);
 int vp10_active_edge_sb(struct VP10_COMP *cpi, int mi_row, int mi_col);
 
 void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
-                                   struct TileDataEnc *tile_data,
-                                   struct macroblock *x,
-                                   int mi_row, int mi_col,
-                                   struct RD_COST *rd_cost,
-                                   BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
-                                   int64_t best_rd_so_far);
+                                    struct TileDataEnc *tile_data,
+                                    struct macroblock *x, int mi_row,
+                                    int mi_col, struct RD_COST *rd_cost,
+                                    BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                                    int64_t best_rd_so_far);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vp10/encoder/resize.c b/vp10/encoder/resize.c
index 862ab029209a71f19c2a21a4574a5c62a0fe9d25..b75ceb46b52c7ab6c508e3cda882b735d2cc935c 100644
--- a/vp10/encoder/resize.c
+++ b/vp10/encoder/resize.c
@@ -22,198 +22,118 @@
 #include "vp10/common/common.h"
 #include "vp10/encoder/resize.h"
 
-#define FILTER_BITS               7
+#define FILTER_BITS 7
 
-#define INTERP_TAPS               8
-#define SUBPEL_BITS               5
-#define SUBPEL_MASK               ((1 << SUBPEL_BITS) - 1)
-#define INTERP_PRECISION_BITS     32
+#define INTERP_TAPS 8
+#define SUBPEL_BITS 5
+#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1)
+#define INTERP_PRECISION_BITS 32
 
 typedef int16_t interp_kernel[INTERP_TAPS];
 
 // Filters for interpolation (0.5-band) - note this also filters integer pels.
 static const interp_kernel filteredinterp_filters500[(1 << SUBPEL_BITS)] = {
-  {-3,  0, 35, 64, 35,  0, -3, 0},
-  {-3, -1, 34, 64, 36,  1, -3, 0},
-  {-3, -1, 32, 64, 38,  1, -3, 0},
-  {-2, -2, 31, 63, 39,  2, -3, 0},
-  {-2, -2, 29, 63, 41,  2, -3, 0},
-  {-2, -2, 28, 63, 42,  3, -4, 0},
-  {-2, -3, 27, 63, 43,  4, -4, 0},
-  {-2, -3, 25, 62, 45,  5, -4, 0},
-  {-2, -3, 24, 62, 46,  5, -4, 0},
-  {-2, -3, 23, 61, 47,  6, -4, 0},
-  {-2, -3, 21, 60, 49,  7, -4, 0},
-  {-1, -4, 20, 60, 50,  8, -4, -1},
-  {-1, -4, 19, 59, 51,  9, -4, -1},
-  {-1, -4, 17, 58, 52, 10, -4, 0},
-  {-1, -4, 16, 57, 53, 12, -4, -1},
-  {-1, -4, 15, 56, 54, 13, -4, -1},
-  {-1, -4, 14, 55, 55, 14, -4, -1},
-  {-1, -4, 13, 54, 56, 15, -4, -1},
-  {-1, -4, 12, 53, 57, 16, -4, -1},
-  {0, -4, 10, 52, 58, 17, -4, -1},
-  {-1, -4,  9, 51, 59, 19, -4, -1},
-  {-1, -4,  8, 50, 60, 20, -4, -1},
-  {0, -4,  7, 49, 60, 21, -3, -2},
-  {0, -4,  6, 47, 61, 23, -3, -2},
-  {0, -4,  5, 46, 62, 24, -3, -2},
-  {0, -4,  5, 45, 62, 25, -3, -2},
-  {0, -4,  4, 43, 63, 27, -3, -2},
-  {0, -4,  3, 42, 63, 28, -2, -2},
-  {0, -3,  2, 41, 63, 29, -2, -2},
-  {0, -3,  2, 39, 63, 31, -2, -2},
-  {0, -3,  1, 38, 64, 32, -1, -3},
-  {0, -3,  1, 36, 64, 34, -1, -3}
+  { -3, 0, 35, 64, 35, 0, -3, 0 },    { -3, -1, 34, 64, 36, 1, -3, 0 },
+  { -3, -1, 32, 64, 38, 1, -3, 0 },   { -2, -2, 31, 63, 39, 2, -3, 0 },
+  { -2, -2, 29, 63, 41, 2, -3, 0 },   { -2, -2, 28, 63, 42, 3, -4, 0 },
+  { -2, -3, 27, 63, 43, 4, -4, 0 },   { -2, -3, 25, 62, 45, 5, -4, 0 },
+  { -2, -3, 24, 62, 46, 5, -4, 0 },   { -2, -3, 23, 61, 47, 6, -4, 0 },
+  { -2, -3, 21, 60, 49, 7, -4, 0 },   { -1, -4, 20, 60, 50, 8, -4, -1 },
+  { -1, -4, 19, 59, 51, 9, -4, -1 },  { -1, -4, 17, 58, 52, 10, -4, 0 },
+  { -1, -4, 16, 57, 53, 12, -4, -1 }, { -1, -4, 15, 56, 54, 13, -4, -1 },
+  { -1, -4, 14, 55, 55, 14, -4, -1 }, { -1, -4, 13, 54, 56, 15, -4, -1 },
+  { -1, -4, 12, 53, 57, 16, -4, -1 }, { 0, -4, 10, 52, 58, 17, -4, -1 },
+  { -1, -4, 9, 51, 59, 19, -4, -1 },  { -1, -4, 8, 50, 60, 20, -4, -1 },
+  { 0, -4, 7, 49, 60, 21, -3, -2 },   { 0, -4, 6, 47, 61, 23, -3, -2 },
+  { 0, -4, 5, 46, 62, 24, -3, -2 },   { 0, -4, 5, 45, 62, 25, -3, -2 },
+  { 0, -4, 4, 43, 63, 27, -3, -2 },   { 0, -4, 3, 42, 63, 28, -2, -2 },
+  { 0, -3, 2, 41, 63, 29, -2, -2 },   { 0, -3, 2, 39, 63, 31, -2, -2 },
+  { 0, -3, 1, 38, 64, 32, -1, -3 },   { 0, -3, 1, 36, 64, 34, -1, -3 }
 };
 
 // Filters for interpolation (0.625-band) - note this also filters integer pels.
 static const interp_kernel filteredinterp_filters625[(1 << SUBPEL_BITS)] = {
-  {-1, -8, 33, 80, 33, -8, -1, 0},
-  {-1, -8, 30, 80, 35, -8, -1, 1},
-  {-1, -8, 28, 80, 37, -7, -2, 1},
-  {0, -8, 26, 79, 39, -7, -2, 1},
-  {0, -8, 24, 79, 41, -7, -2, 1},
-  {0, -8, 22, 78, 43, -6, -2, 1},
-  {0, -8, 20, 78, 45, -5, -3, 1},
-  {0, -8, 18, 77, 48, -5, -3, 1},
-  {0, -8, 16, 76, 50, -4, -3, 1},
-  {0, -8, 15, 75, 52, -3, -4, 1},
-  {0, -7, 13, 74, 54, -3, -4, 1},
-  {0, -7, 11, 73, 56, -2, -4, 1},
-  {0, -7, 10, 71, 58, -1, -4, 1},
-  {1, -7,  8, 70, 60,  0, -5, 1},
-  {1, -6,  6, 68, 62,  1, -5, 1},
-  {1, -6,  5, 67, 63,  2, -5, 1},
-  {1, -6,  4, 65, 65,  4, -6, 1},
-  {1, -5,  2, 63, 67,  5, -6, 1},
-  {1, -5,  1, 62, 68,  6, -6, 1},
-  {1, -5,  0, 60, 70,  8, -7, 1},
-  {1, -4, -1, 58, 71, 10, -7, 0},
-  {1, -4, -2, 56, 73, 11, -7, 0},
-  {1, -4, -3, 54, 74, 13, -7, 0},
-  {1, -4, -3, 52, 75, 15, -8, 0},
-  {1, -3, -4, 50, 76, 16, -8, 0},
-  {1, -3, -5, 48, 77, 18, -8, 0},
-  {1, -3, -5, 45, 78, 20, -8, 0},
-  {1, -2, -6, 43, 78, 22, -8, 0},
-  {1, -2, -7, 41, 79, 24, -8, 0},
-  {1, -2, -7, 39, 79, 26, -8, 0},
-  {1, -2, -7, 37, 80, 28, -8, -1},
-  {1, -1, -8, 35, 80, 30, -8, -1},
+  { -1, -8, 33, 80, 33, -8, -1, 0 }, { -1, -8, 30, 80, 35, -8, -1, 1 },
+  { -1, -8, 28, 80, 37, -7, -2, 1 }, { 0, -8, 26, 79, 39, -7, -2, 1 },
+  { 0, -8, 24, 79, 41, -7, -2, 1 },  { 0, -8, 22, 78, 43, -6, -2, 1 },
+  { 0, -8, 20, 78, 45, -5, -3, 1 },  { 0, -8, 18, 77, 48, -5, -3, 1 },
+  { 0, -8, 16, 76, 50, -4, -3, 1 },  { 0, -8, 15, 75, 52, -3, -4, 1 },
+  { 0, -7, 13, 74, 54, -3, -4, 1 },  { 0, -7, 11, 73, 56, -2, -4, 1 },
+  { 0, -7, 10, 71, 58, -1, -4, 1 },  { 1, -7, 8, 70, 60, 0, -5, 1 },
+  { 1, -6, 6, 68, 62, 1, -5, 1 },    { 1, -6, 5, 67, 63, 2, -5, 1 },
+  { 1, -6, 4, 65, 65, 4, -6, 1 },    { 1, -5, 2, 63, 67, 5, -6, 1 },
+  { 1, -5, 1, 62, 68, 6, -6, 1 },    { 1, -5, 0, 60, 70, 8, -7, 1 },
+  { 1, -4, -1, 58, 71, 10, -7, 0 },  { 1, -4, -2, 56, 73, 11, -7, 0 },
+  { 1, -4, -3, 54, 74, 13, -7, 0 },  { 1, -4, -3, 52, 75, 15, -8, 0 },
+  { 1, -3, -4, 50, 76, 16, -8, 0 },  { 1, -3, -5, 48, 77, 18, -8, 0 },
+  { 1, -3, -5, 45, 78, 20, -8, 0 },  { 1, -2, -6, 43, 78, 22, -8, 0 },
+  { 1, -2, -7, 41, 79, 24, -8, 0 },  { 1, -2, -7, 39, 79, 26, -8, 0 },
+  { 1, -2, -7, 37, 80, 28, -8, -1 }, { 1, -1, -8, 35, 80, 30, -8, -1 },
 };
 
 // Filters for interpolation (0.75-band) - note this also filters integer pels.
 static const interp_kernel filteredinterp_filters750[(1 << SUBPEL_BITS)] = {
-  {2, -11,  25,  96,  25, -11,   2, 0},
-  {2, -11,  22,  96,  28, -11,   2, 0},
-  {2, -10,  19,  95,  31, -11,   2, 0},
-  {2, -10,  17,  95,  34, -12,   2, 0},
-  {2,  -9,  14,  94,  37, -12,   2, 0},
-  {2,  -8,  12,  93,  40, -12,   1, 0},
-  {2,  -8,   9,  92,  43, -12,   1, 1},
-  {2,  -7,   7,  91,  46, -12,   1, 0},
-  {2,  -7,   5,  90,  49, -12,   1, 0},
-  {2,  -6,   3,  88,  52, -12,   0, 1},
-  {2,  -5,   1,  86,  55, -12,   0, 1},
-  {2,  -5,  -1,  84,  58, -11,   0, 1},
-  {2,  -4,  -2,  82,  61, -11,  -1, 1},
-  {2,  -4,  -4,  80,  64, -10,  -1, 1},
-  {1, -3, -5, 77, 67, -9, -1, 1},
-  {1, -3, -6, 75, 70, -8, -2, 1},
-  {1, -2, -7, 72, 72, -7, -2, 1},
-  {1, -2, -8, 70, 75, -6, -3, 1},
-  {1, -1, -9, 67, 77, -5, -3, 1},
-  {1,  -1, -10,  64,  80,  -4,  -4, 2},
-  {1,  -1, -11,  61,  82,  -2,  -4, 2},
-  {1,   0, -11,  58,  84,  -1,  -5, 2},
-  {1,   0, -12,  55,  86,   1,  -5, 2},
-  {1,   0, -12,  52,  88,   3,  -6, 2},
-  {0,   1, -12,  49,  90,   5,  -7, 2},
-  {0,   1, -12,  46,  91,   7,  -7, 2},
-  {1,   1, -12,  43,  92,   9,  -8, 2},
-  {0,   1, -12,  40,  93,  12,  -8, 2},
-  {0,   2, -12,  37,  94,  14,  -9, 2},
-  {0,   2, -12,  34,  95,  17, -10, 2},
-  {0,   2, -11,  31,  95,  19, -10, 2},
-  {0,   2, -11,  28,  96,  22, -11, 2}
+  { 2, -11, 25, 96, 25, -11, 2, 0 }, { 2, -11, 22, 96, 28, -11, 2, 0 },
+  { 2, -10, 19, 95, 31, -11, 2, 0 }, { 2, -10, 17, 95, 34, -12, 2, 0 },
+  { 2, -9, 14, 94, 37, -12, 2, 0 },  { 2, -8, 12, 93, 40, -12, 1, 0 },
+  { 2, -8, 9, 92, 43, -12, 1, 1 },   { 2, -7, 7, 91, 46, -12, 1, 0 },
+  { 2, -7, 5, 90, 49, -12, 1, 0 },   { 2, -6, 3, 88, 52, -12, 0, 1 },
+  { 2, -5, 1, 86, 55, -12, 0, 1 },   { 2, -5, -1, 84, 58, -11, 0, 1 },
+  { 2, -4, -2, 82, 61, -11, -1, 1 }, { 2, -4, -4, 80, 64, -10, -1, 1 },
+  { 1, -3, -5, 77, 67, -9, -1, 1 },  { 1, -3, -6, 75, 70, -8, -2, 1 },
+  { 1, -2, -7, 72, 72, -7, -2, 1 },  { 1, -2, -8, 70, 75, -6, -3, 1 },
+  { 1, -1, -9, 67, 77, -5, -3, 1 },  { 1, -1, -10, 64, 80, -4, -4, 2 },
+  { 1, -1, -11, 61, 82, -2, -4, 2 }, { 1, 0, -11, 58, 84, -1, -5, 2 },
+  { 1, 0, -12, 55, 86, 1, -5, 2 },   { 1, 0, -12, 52, 88, 3, -6, 2 },
+  { 0, 1, -12, 49, 90, 5, -7, 2 },   { 0, 1, -12, 46, 91, 7, -7, 2 },
+  { 1, 1, -12, 43, 92, 9, -8, 2 },   { 0, 1, -12, 40, 93, 12, -8, 2 },
+  { 0, 2, -12, 37, 94, 14, -9, 2 },  { 0, 2, -12, 34, 95, 17, -10, 2 },
+  { 0, 2, -11, 31, 95, 19, -10, 2 }, { 0, 2, -11, 28, 96, 22, -11, 2 }
 };
 
 // Filters for interpolation (0.875-band) - note this also filters integer pels.
 static const interp_kernel filteredinterp_filters875[(1 << SUBPEL_BITS)] = {
-  {3,  -8,  13, 112,  13,  -8,   3, 0},
-  {3,  -7,  10, 112,  17,  -9,   3, -1},
-  {2,  -6,   7, 111,  21,  -9,   3, -1},
-  {2,  -5,   4, 111,  24, -10,   3, -1},
-  {2,  -4,   1, 110,  28, -11,   3, -1},
-  {1,  -3,  -1, 108,  32, -12,   4, -1},
-  {1,  -2,  -3, 106,  36, -13,   4, -1},
-  {1,  -1,  -6, 105,  40, -14,   4, -1},
-  {1,  -1,  -7, 102,  44, -14,   4, -1},
-  {1,   0,  -9, 100,  48, -15,   4, -1},
-  {1,   1, -11,  97,  53, -16,   4, -1},
-  {0,   1, -12,  95,  57, -16,   4, -1},
-  {0,   2, -13,  91,  61, -16,   4, -1},
-  {0,   2, -14,  88,  65, -16,   4, -1},
-  {0,   3, -15,  84,  69, -17,   4, 0},
-  {0,   3, -16,  81,  73, -16,   3, 0},
-  {0,   3, -16,  77,  77, -16,   3, 0},
-  {0,   3, -16,  73,  81, -16,   3, 0},
-  {0,   4, -17,  69,  84, -15,   3, 0},
-  {-1,   4, -16,  65,  88, -14,   2, 0},
-  {-1,   4, -16,  61,  91, -13,   2, 0},
-  {-1,   4, -16,  57,  95, -12,   1, 0},
-  {-1,   4, -16,  53,  97, -11,   1, 1},
-  {-1,   4, -15,  48, 100,  -9,   0, 1},
-  {-1,   4, -14,  44, 102,  -7,  -1, 1},
-  {-1,   4, -14,  40, 105,  -6,  -1, 1},
-  {-1,   4, -13,  36, 106,  -3,  -2, 1},
-  {-1,   4, -12,  32, 108,  -1,  -3, 1},
-  {-1,   3, -11,  28, 110,   1,  -4, 2},
-  {-1,   3, -10,  24, 111,   4,  -5, 2},
-  {-1,   3,  -9,  21, 111,   7,  -6, 2},
-  {-1,   3,  -9,  17, 112,  10,  -7, 3}
+  { 3, -8, 13, 112, 13, -8, 3, 0 },   { 3, -7, 10, 112, 17, -9, 3, -1 },
+  { 2, -6, 7, 111, 21, -9, 3, -1 },   { 2, -5, 4, 111, 24, -10, 3, -1 },
+  { 2, -4, 1, 110, 28, -11, 3, -1 },  { 1, -3, -1, 108, 32, -12, 4, -1 },
+  { 1, -2, -3, 106, 36, -13, 4, -1 }, { 1, -1, -6, 105, 40, -14, 4, -1 },
+  { 1, -1, -7, 102, 44, -14, 4, -1 }, { 1, 0, -9, 100, 48, -15, 4, -1 },
+  { 1, 1, -11, 97, 53, -16, 4, -1 },  { 0, 1, -12, 95, 57, -16, 4, -1 },
+  { 0, 2, -13, 91, 61, -16, 4, -1 },  { 0, 2, -14, 88, 65, -16, 4, -1 },
+  { 0, 3, -15, 84, 69, -17, 4, 0 },   { 0, 3, -16, 81, 73, -16, 3, 0 },
+  { 0, 3, -16, 77, 77, -16, 3, 0 },   { 0, 3, -16, 73, 81, -16, 3, 0 },
+  { 0, 4, -17, 69, 84, -15, 3, 0 },   { -1, 4, -16, 65, 88, -14, 2, 0 },
+  { -1, 4, -16, 61, 91, -13, 2, 0 },  { -1, 4, -16, 57, 95, -12, 1, 0 },
+  { -1, 4, -16, 53, 97, -11, 1, 1 },  { -1, 4, -15, 48, 100, -9, 0, 1 },
+  { -1, 4, -14, 44, 102, -7, -1, 1 }, { -1, 4, -14, 40, 105, -6, -1, 1 },
+  { -1, 4, -13, 36, 106, -3, -2, 1 }, { -1, 4, -12, 32, 108, -1, -3, 1 },
+  { -1, 3, -11, 28, 110, 1, -4, 2 },  { -1, 3, -10, 24, 111, 4, -5, 2 },
+  { -1, 3, -9, 21, 111, 7, -6, 2 },   { -1, 3, -9, 17, 112, 10, -7, 3 }
 };
 
 // Filters for interpolation (full-band) - no filtering for integer pixels
 static const interp_kernel filteredinterp_filters1000[(1 << SUBPEL_BITS)] = {
-  {0,   0,   0, 128,   0,   0,   0, 0},
-  {0,   1,  -3, 128,   3,  -1,   0, 0},
-  {-1,   2,  -6, 127,   7,  -2,   1, 0},
-  {-1,   3,  -9, 126,  12,  -4,   1, 0},
-  {-1,   4, -12, 125,  16,  -5,   1, 0},
-  {-1,   4, -14, 123,  20,  -6,   2, 0},
-  {-1,   5, -15, 120,  25,  -8,   2, 0},
-  {-1,   5, -17, 118,  30,  -9,   3, -1},
-  {-1,   6, -18, 114,  35, -10,   3, -1},
-  {-1,   6, -19, 111,  41, -12,   3, -1},
-  {-1,   6, -20, 107,  46, -13,   4, -1},
-  {-1,   6, -21, 103,  52, -14,   4, -1},
-  {-1,   6, -21,  99,  57, -16,   5, -1},
-  {-1,   6, -21,  94,  63, -17,   5, -1},
-  {-1,   6, -20,  89,  68, -18,   5, -1},
-  {-1,   6, -20,  84,  73, -19,   6, -1},
-  {-1,   6, -20,  79,  79, -20,   6, -1},
-  {-1,   6, -19,  73,  84, -20,   6, -1},
-  {-1,   5, -18,  68,  89, -20,   6, -1},
-  {-1,   5, -17,  63,  94, -21,   6, -1},
-  {-1,   5, -16,  57,  99, -21,   6, -1},
-  {-1,   4, -14,  52, 103, -21,   6, -1},
-  {-1,   4, -13,  46, 107, -20,   6, -1},
-  {-1,   3, -12,  41, 111, -19,   6, -1},
-  {-1,   3, -10,  35, 114, -18,   6, -1},
-  {-1,   3,  -9,  30, 118, -17,   5, -1},
-  {0,   2,  -8,  25, 120, -15,   5, -1},
-  {0,   2,  -6,  20, 123, -14,   4, -1},
-  {0,   1,  -5,  16, 125, -12,   4, -1},
-  {0,   1,  -4,  12, 126,  -9,   3, -1},
-  {0,   1,  -2,   7, 127,  -6,   2, -1},
-  {0,   0,  -1,   3, 128,  -3,   1, 0}
+  { 0, 0, 0, 128, 0, 0, 0, 0 },        { 0, 1, -3, 128, 3, -1, 0, 0 },
+  { -1, 2, -6, 127, 7, -2, 1, 0 },     { -1, 3, -9, 126, 12, -4, 1, 0 },
+  { -1, 4, -12, 125, 16, -5, 1, 0 },   { -1, 4, -14, 123, 20, -6, 2, 0 },
+  { -1, 5, -15, 120, 25, -8, 2, 0 },   { -1, 5, -17, 118, 30, -9, 3, -1 },
+  { -1, 6, -18, 114, 35, -10, 3, -1 }, { -1, 6, -19, 111, 41, -12, 3, -1 },
+  { -1, 6, -20, 107, 46, -13, 4, -1 }, { -1, 6, -21, 103, 52, -14, 4, -1 },
+  { -1, 6, -21, 99, 57, -16, 5, -1 },  { -1, 6, -21, 94, 63, -17, 5, -1 },
+  { -1, 6, -20, 89, 68, -18, 5, -1 },  { -1, 6, -20, 84, 73, -19, 6, -1 },
+  { -1, 6, -20, 79, 79, -20, 6, -1 },  { -1, 6, -19, 73, 84, -20, 6, -1 },
+  { -1, 5, -18, 68, 89, -20, 6, -1 },  { -1, 5, -17, 63, 94, -21, 6, -1 },
+  { -1, 5, -16, 57, 99, -21, 6, -1 },  { -1, 4, -14, 52, 103, -21, 6, -1 },
+  { -1, 4, -13, 46, 107, -20, 6, -1 }, { -1, 3, -12, 41, 111, -19, 6, -1 },
+  { -1, 3, -10, 35, 114, -18, 6, -1 }, { -1, 3, -9, 30, 118, -17, 5, -1 },
+  { 0, 2, -8, 25, 120, -15, 5, -1 },   { 0, 2, -6, 20, 123, -14, 4, -1 },
+  { 0, 1, -5, 16, 125, -12, 4, -1 },   { 0, 1, -4, 12, 126, -9, 3, -1 },
+  { 0, 1, -2, 7, 127, -6, 2, -1 },     { 0, 0, -1, 3, 128, -3, 1, 0 }
 };
 
 // Filters for factor of 2 downsampling.
-static const int16_t vp10_down2_symeven_half_filter[] = {56, 12, -3, -1};
-static const int16_t vp10_down2_symodd_half_filter[] = {64, 35, 0, -3};
+static const int16_t vp10_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
+static const int16_t vp10_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
 
 static const interp_kernel *choose_interp_filter(int inlength, int outlength) {
   int outlength16 = outlength * 16;
@@ -231,11 +151,14 @@ static const interp_kernel *choose_interp_filter(int inlength, int outlength) {
 
 static void interpolate(const uint8_t *const input, int inlength,
                         uint8_t *output, int outlength) {
-  const int64_t delta = (((uint64_t)inlength << 32) + outlength / 2) /
-      outlength;
-  const int64_t offset = inlength > outlength ?
-      (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength :
-      -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength;
+  const int64_t delta =
+      (((uint64_t)inlength << 32) + outlength / 2) / outlength;
+  const int64_t offset =
+      inlength > outlength
+          ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) /
+                outlength
+          : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) /
+                outlength;
   uint8_t *optr = output;
   int x, x1, x2, sum, k, int_pel, sub_pel;
   int64_t y;
@@ -252,8 +175,8 @@ static void interpolate(const uint8_t *const input, int inlength,
   x1 = x;
   x = outlength - 1;
   y = delta * x + offset;
-  while ((y >> INTERP_PRECISION_BITS) +
-         (int64_t)(INTERP_TAPS / 2) >= inlength) {
+  while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >=
+         inlength) {
     x--;
     y -= delta;
   }
@@ -267,8 +190,8 @@ static void interpolate(const uint8_t *const input, int inlength,
       sum = 0;
       for (k = 0; k < INTERP_TAPS; ++k) {
         const int pk = int_pel - INTERP_TAPS / 2 + 1 + k;
-        sum += filter[k] * input[(pk < 0 ? 0 :
-                                  (pk >= inlength ? inlength - 1 : pk))];
+        sum += filter[k] *
+               input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
       }
       *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
     }
@@ -281,9 +204,9 @@ static void interpolate(const uint8_t *const input, int inlength,
       filter = interp_filters[sub_pel];
       sum = 0;
       for (k = 0; k < INTERP_TAPS; ++k)
-        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ?
-                                  0 :
-                                  int_pel - INTERP_TAPS / 2 + 1 + k)];
+        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0
+                                      ? 0
+                                      : int_pel - INTERP_TAPS / 2 + 1 + k)];
       *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
     }
     // Middle part.
@@ -305,9 +228,9 @@ static void interpolate(const uint8_t *const input, int inlength,
       filter = interp_filters[sub_pel];
       sum = 0;
       for (k = 0; k < INTERP_TAPS; ++k)
-        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >=
-                                  inlength ?  inlength - 1 :
-                                  int_pel - INTERP_TAPS / 2 + 1 + k)];
+        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength
+                                      ? inlength - 1
+                                      : int_pel - INTERP_TAPS / 2 + 1 + k)];
       *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
     }
   }
@@ -331,7 +254,7 @@ static void down2_symeven(const uint8_t *const input, int length,
       for (j = 0; j < filter_len_half; ++j) {
         sum += (input[(i - j < 0 ? 0 : i - j)] +
                 input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
-            filter[j];
+               filter[j];
       }
       sum >>= FILTER_BITS;
       *optr++ = clip_pixel(sum);
@@ -361,7 +284,7 @@ static void down2_symeven(const uint8_t *const input, int length,
       for (j = 0; j < filter_len_half; ++j) {
         sum += (input[i - j] +
                 input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
-            filter[j];
+               filter[j];
       }
       sum >>= FILTER_BITS;
       *optr++ = clip_pixel(sum);
@@ -387,7 +310,7 @@ static void down2_symodd(const uint8_t *const input, int length,
       for (j = 1; j < filter_len_half; ++j) {
         sum += (input[(i - j < 0 ? 0 : i - j)] +
                 input[(i + j >= length ? length - 1 : i + j)]) *
-            filter[j];
+               filter[j];
       }
       sum >>= FILTER_BITS;
       *optr++ = clip_pixel(sum);
@@ -416,7 +339,7 @@ static void down2_symodd(const uint8_t *const input, int length,
       int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
       for (j = 1; j < filter_len_half; ++j) {
         sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) *
-            filter[j];
+               filter[j];
       }
       sum >>= FILTER_BITS;
       *optr++ = clip_pixel(sum);
@@ -426,8 +349,7 @@ static void down2_symodd(const uint8_t *const input, int length,
 
 static int get_down2_length(int length, int steps) {
   int s;
-  for (s = 0; s < steps; ++s)
-    length = (length + 1) >> 1;
+  for (s = 0; s < steps; ++s) length = (length + 1) >> 1;
   return length;
 }
 
@@ -441,11 +363,8 @@ static int get_down2_steps(int in_length, int out_length) {
   return steps;
 }
 
-static void resize_multistep(const uint8_t *const input,
-                             int length,
-                             uint8_t *output,
-                             int olength,
-                             uint8_t *buf) {
+static void resize_multistep(const uint8_t *const input, int length,
+                             uint8_t *output, int olength, uint8_t *buf) {
   int steps;
   if (length == olength) {
     memcpy(output, input, sizeof(output[0]) * length);
@@ -482,8 +401,7 @@ static void resize_multistep(const uint8_t *const input,
     if (filteredlength != olength) {
       interpolate(out, filteredlength, output, olength);
     }
-    if (tmpbuf)
-      free(tmpbuf);
+    if (tmpbuf) free(tmpbuf);
   } else {
     interpolate(input, length, output, olength);
   }
@@ -507,26 +425,21 @@ static void fill_arr_to_col(uint8_t *img, int stride, int len, uint8_t *arr) {
   }
 }
 
-void vp10_resize_plane(const uint8_t *const input,
-                      int height,
-                      int width,
-                      int in_stride,
-                      uint8_t *output,
-                      int height2,
-                      int width2,
-                      int out_stride) {
+void vp10_resize_plane(const uint8_t *const input, int height, int width,
+                       int in_stride, uint8_t *output, int height2, int width2,
+                       int out_stride) {
   int i;
   uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height);
-  uint8_t *tmpbuf = (uint8_t *)malloc(sizeof(uint8_t) *
-                                      (width < height ? height : width));
+  uint8_t *tmpbuf =
+      (uint8_t *)malloc(sizeof(uint8_t) * (width < height ? height : width));
   uint8_t *arrbuf = (uint8_t *)malloc(sizeof(uint8_t) * (height + height2));
   assert(width > 0);
   assert(height > 0);
   assert(width2 > 0);
   assert(height2 > 0);
   for (i = 0; i < height; ++i)
-    resize_multistep(input + in_stride * i, width,
-                        intbuf + width2 * i, width2, tmpbuf);
+    resize_multistep(input + in_stride * i, width, intbuf + width2 * i, width2,
+                     tmpbuf);
   for (i = 0; i < width2; ++i) {
     fill_col_to_arr(intbuf + i, width2, height, arrbuf);
     resize_multistep(arrbuf, height, arrbuf + height, height2, tmpbuf);
@@ -542,9 +455,12 @@ static void highbd_interpolate(const uint16_t *const input, int inlength,
                                uint16_t *output, int outlength, int bd) {
   const int64_t delta =
       (((uint64_t)inlength << 32) + outlength / 2) / outlength;
-  const int64_t offset = inlength > outlength ?
-      (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength :
-      -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength;
+  const int64_t offset =
+      inlength > outlength
+          ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) /
+                outlength
+          : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) /
+                outlength;
   uint16_t *optr = output;
   int x, x1, x2, sum, k, int_pel, sub_pel;
   int64_t y;
@@ -561,8 +477,8 @@ static void highbd_interpolate(const uint16_t *const input, int inlength,
   x1 = x;
   x = outlength - 1;
   y = delta * x + offset;
-  while ((y >> INTERP_PRECISION_BITS) +
-         (int64_t)(INTERP_TAPS / 2) >= inlength) {
+  while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >=
+         inlength) {
     x--;
     y -= delta;
   }
@@ -577,7 +493,7 @@ static void highbd_interpolate(const uint16_t *const input, int inlength,
       for (k = 0; k < INTERP_TAPS; ++k) {
         const int pk = int_pel - INTERP_TAPS / 2 + 1 + k;
         sum += filter[k] *
-            input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
+               input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
       }
       *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
     }
@@ -590,9 +506,9 @@ static void highbd_interpolate(const uint16_t *const input, int inlength,
       filter = interp_filters[sub_pel];
       sum = 0;
       for (k = 0; k < INTERP_TAPS; ++k)
-        sum += filter[k] *
-            input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ?
-                   0 : int_pel - INTERP_TAPS / 2 + 1 + k)];
+        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0
+                                      ? 0
+                                      : int_pel - INTERP_TAPS / 2 + 1 + k)];
       *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
     }
     // Middle part.
@@ -614,9 +530,9 @@ static void highbd_interpolate(const uint16_t *const input, int inlength,
       filter = interp_filters[sub_pel];
       sum = 0;
       for (k = 0; k < INTERP_TAPS; ++k)
-        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >=
-                                  inlength ?  inlength - 1 :
-                                  int_pel - INTERP_TAPS / 2 + 1 + k)];
+        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength
+                                      ? inlength - 1
+                                      : int_pel - INTERP_TAPS / 2 + 1 + k)];
       *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
     }
   }
@@ -640,7 +556,7 @@ static void highbd_down2_symeven(const uint16_t *const input, int length,
       for (j = 0; j < filter_len_half; ++j) {
         sum += (input[(i - j < 0 ? 0 : i - j)] +
                 input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
-            filter[j];
+               filter[j];
       }
       sum >>= FILTER_BITS;
       *optr++ = clip_pixel_highbd(sum, bd);
@@ -670,7 +586,7 @@ static void highbd_down2_symeven(const uint16_t *const input, int length,
       for (j = 0; j < filter_len_half; ++j) {
         sum += (input[i - j] +
                 input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
-            filter[j];
+               filter[j];
       }
       sum >>= FILTER_BITS;
       *optr++ = clip_pixel_highbd(sum, bd);
@@ -679,7 +595,7 @@ static void highbd_down2_symeven(const uint16_t *const input, int length,
 }
 
 static void highbd_down2_symodd(const uint16_t *const input, int length,
-                              uint16_t *output, int bd) {
+                                uint16_t *output, int bd) {
   // Actual filter len = 2 * filter_len_half - 1.
   static const int16_t *filter = vp10_down2_symodd_half_filter;
   const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
@@ -696,7 +612,7 @@ static void highbd_down2_symodd(const uint16_t *const input, int length,
       for (j = 1; j < filter_len_half; ++j) {
         sum += (input[(i - j < 0 ? 0 : i - j)] +
                 input[(i + j >= length ? length - 1 : i + j)]) *
-            filter[j];
+               filter[j];
       }
       sum >>= FILTER_BITS;
       *optr++ = clip_pixel_highbd(sum, bd);
@@ -725,7 +641,7 @@ static void highbd_down2_symodd(const uint16_t *const input, int length,
       int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
       for (j = 1; j < filter_len_half; ++j) {
         sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) *
-            filter[j];
+               filter[j];
       }
       sum >>= FILTER_BITS;
       *optr++ = clip_pixel_highbd(sum, bd);
@@ -733,12 +649,9 @@ static void highbd_down2_symodd(const uint16_t *const input, int length,
   }
 }
 
-static void highbd_resize_multistep(const uint16_t *const input,
-                                    int length,
-                                    uint16_t *output,
-                                    int olength,
-                                    uint16_t *buf,
-                                    int bd) {
+static void highbd_resize_multistep(const uint16_t *const input, int length,
+                                    uint16_t *output, int olength,
+                                    uint16_t *buf, int bd) {
   int steps;
   if (length == olength) {
     memcpy(output, input, sizeof(output[0]) * length);
@@ -775,8 +688,7 @@ static void highbd_resize_multistep(const uint16_t *const input,
     if (filteredlength != olength) {
       highbd_interpolate(out, filteredlength, output, olength, bd);
     }
-    if (tmpbuf)
-      free(tmpbuf);
+    if (tmpbuf) free(tmpbuf);
   } else {
     highbd_interpolate(input, length, output, olength, bd);
   }
@@ -802,19 +714,13 @@ static void highbd_fill_arr_to_col(uint16_t *img, int stride, int len,
   }
 }
 
-void vp10_highbd_resize_plane(const uint8_t *const input,
-                             int height,
-                             int width,
-                             int in_stride,
-                             uint8_t *output,
-                             int height2,
-                             int width2,
-                             int out_stride,
-                             int bd) {
+void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+                              int in_stride, uint8_t *output, int height2,
+                              int width2, int out_stride, int bd) {
   int i;
   uint16_t *intbuf = (uint16_t *)malloc(sizeof(uint16_t) * width2 * height);
-  uint16_t *tmpbuf = (uint16_t *)malloc(sizeof(uint16_t) *
-                                        (width < height ? height : width));
+  uint16_t *tmpbuf =
+      (uint16_t *)malloc(sizeof(uint16_t) * (width < height ? height : width));
   uint16_t *arrbuf = (uint16_t *)malloc(sizeof(uint16_t) * (height + height2));
   for (i = 0; i < height; ++i) {
     highbd_resize_multistep(CONVERT_TO_SHORTPTR(input + in_stride * i), width,
@@ -833,96 +739,82 @@ void vp10_highbd_resize_plane(const uint8_t *const input,
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-void vp10_resize_frame420(const uint8_t *const y,
-                         int y_stride,
-                         const uint8_t *const u, const uint8_t *const v,
-                         int uv_stride,
-                         int height, int width,
-                         uint8_t *oy, int oy_stride,
-                         uint8_t *ou, uint8_t *ov, int ouv_stride,
-                         int oheight, int owidth) {
-  vp10_resize_plane(y, height, width, y_stride,
-                   oy, oheight, owidth, oy_stride);
-  vp10_resize_plane(u, height / 2, width / 2, uv_stride,
-                   ou, oheight / 2, owidth / 2, ouv_stride);
-  vp10_resize_plane(v, height / 2, width / 2, uv_stride,
-                   ov, oheight / 2, owidth / 2, ouv_stride);
+void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth) {
+  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  vp10_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+                    owidth / 2, ouv_stride);
+  vp10_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+                    owidth / 2, ouv_stride);
 }
 
 void vp10_resize_frame422(const uint8_t *const y, int y_stride,
-                         const uint8_t *const u, const uint8_t *const v,
-                         int uv_stride,
-                         int height, int width,
-                         uint8_t *oy, int oy_stride,
-                         uint8_t *ou, uint8_t *ov, int ouv_stride,
-                         int oheight, int owidth) {
-  vp10_resize_plane(y, height, width, y_stride,
-                   oy, oheight, owidth, oy_stride);
-  vp10_resize_plane(u, height, width / 2, uv_stride,
-                   ou, oheight, owidth / 2, ouv_stride);
-  vp10_resize_plane(v, height, width / 2, uv_stride,
-                   ov, oheight, owidth / 2, ouv_stride);
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth) {
+  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  vp10_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
+                    ouv_stride);
+  vp10_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
+                    ouv_stride);
 }
 
 void vp10_resize_frame444(const uint8_t *const y, int y_stride,
-                         const uint8_t *const u, const uint8_t *const v,
-                         int uv_stride,
-                         int height, int width,
-                         uint8_t *oy, int oy_stride,
-                         uint8_t *ou, uint8_t *ov, int ouv_stride,
-                         int oheight, int owidth) {
-  vp10_resize_plane(y, height, width, y_stride,
-                   oy, oheight, owidth, oy_stride);
-  vp10_resize_plane(u, height, width, uv_stride,
-                   ou, oheight, owidth, ouv_stride);
-  vp10_resize_plane(v, height, width, uv_stride,
-                   ov, oheight, owidth, ouv_stride);
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth) {
+  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  vp10_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+                    ouv_stride);
+  vp10_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+                    ouv_stride);
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_resize_frame420(const uint8_t *const y,
-                                int y_stride,
-                                const uint8_t *const u, const uint8_t *const v,
-                                int uv_stride,
-                                int height, int width,
-                                uint8_t *oy, int oy_stride,
-                                uint8_t *ou, uint8_t *ov, int ouv_stride,
-                                int oheight, int owidth, int bd) {
-  vp10_highbd_resize_plane(y, height, width, y_stride,
-                          oy, oheight, owidth, oy_stride, bd);
-  vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride,
-                          ou, oheight / 2, owidth / 2, ouv_stride, bd);
-  vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride,
-                          ov, oheight / 2, owidth / 2, ouv_stride, bd);
+void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd) {
+  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+                           oy_stride, bd);
+  vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+                           owidth / 2, ouv_stride, bd);
+  vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+                           owidth / 2, ouv_stride, bd);
 }
 
 void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
-                                const uint8_t *const u, const uint8_t *const v,
-                                int uv_stride,
-                                int height, int width,
-                                uint8_t *oy, int oy_stride,
-                                uint8_t *ou, uint8_t *ov, int ouv_stride,
-                                int oheight, int owidth, int bd) {
-  vp10_highbd_resize_plane(y, height, width, y_stride,
-                          oy, oheight, owidth, oy_stride, bd);
-  vp10_highbd_resize_plane(u, height, width / 2, uv_stride,
-                          ou, oheight, owidth / 2, ouv_stride, bd);
-  vp10_highbd_resize_plane(v, height, width / 2, uv_stride,
-                          ov, oheight, owidth / 2, ouv_stride, bd);
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd) {
+  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+                           oy_stride, bd);
+  vp10_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
+                           owidth / 2, ouv_stride, bd);
+  vp10_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
+                           owidth / 2, ouv_stride, bd);
 }
 
 void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
-                                const uint8_t *const u, const uint8_t *const v,
-                                int uv_stride,
-                                int height, int width,
-                                uint8_t *oy, int oy_stride,
-                                uint8_t *ou, uint8_t *ov, int ouv_stride,
-                                int oheight, int owidth, int bd) {
-  vp10_highbd_resize_plane(y, height, width, y_stride,
-                          oy, oheight, owidth, oy_stride, bd);
-  vp10_highbd_resize_plane(u, height, width, uv_stride,
-                          ou, oheight, owidth, ouv_stride, bd);
-  vp10_highbd_resize_plane(v, height, width, uv_stride,
-                          ov, oheight, owidth, ouv_stride, bd);
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd) {
+  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+                           oy_stride, bd);
+  vp10_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+                           ouv_stride, bd);
+  vp10_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+                           ouv_stride, bd);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vp10/encoder/resize.h b/vp10/encoder/resize.h
index a85735bbee7368132d136a4b3bdbe5362c0354fc..9dc34ef27f2c56e478fbe50d0f9d874562882005 100644
--- a/vp10/encoder/resize.h
+++ b/vp10/encoder/resize.h
@@ -18,116 +18,51 @@
 extern "C" {
 #endif
 
-void vp10_resize_plane(const uint8_t *const input,
-                      int height,
-                      int width,
-                      int in_stride,
-                      uint8_t *output,
-                      int height2,
-                      int width2,
-                      int out_stride);
-void vp10_resize_frame420(const uint8_t *const y,
-                         int y_stride,
-                         const uint8_t *const u,
-                         const uint8_t *const v,
-                         int uv_stride,
-                         int height,
-                         int width,
-                         uint8_t *oy,
-                         int oy_stride,
-                         uint8_t *ou,
-                         uint8_t *ov,
-                         int ouv_stride,
-                         int oheight,
-                         int owidth);
-void vp10_resize_frame422(const uint8_t *const y,
-                         int y_stride,
-                         const uint8_t *const u,
-                         const uint8_t *const v,
-                         int uv_stride,
-                         int height,
-                         int width,
-                         uint8_t *oy,
-                         int oy_stride,
-                         uint8_t *ou,
-                         uint8_t *ov,
-                         int ouv_stride,
-                         int oheight,
-                         int owidth);
-void vp10_resize_frame444(const uint8_t *const y,
-                         int y_stride,
-                         const uint8_t *const u,
-                         const uint8_t *const v,
-                         int uv_stride,
-                         int height,
-                         int width,
-                         uint8_t *oy,
-                         int oy_stride,
-                         uint8_t *ou,
-                         uint8_t *ov,
-                         int ouv_stride,
-                         int oheight,
-                         int owidth);
+void vp10_resize_plane(const uint8_t *const input, int height, int width,
+                       int in_stride, uint8_t *output, int height2, int width2,
+                       int out_stride);
+void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth);
+void vp10_resize_frame422(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth);
+void vp10_resize_frame444(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth);
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_resize_plane(const uint8_t *const input,
-                             int height,
-                             int width,
-                             int in_stride,
-                             uint8_t *output,
-                             int height2,
-                             int width2,
-                             int out_stride,
-                             int bd);
-void vp10_highbd_resize_frame420(const uint8_t *const y,
-                                int y_stride,
-                                const uint8_t *const u,
-                                const uint8_t *const v,
-                                int uv_stride,
-                                int height,
-                                int width,
-                                uint8_t *oy,
-                                int oy_stride,
-                                uint8_t *ou,
-                                uint8_t *ov,
-                                int ouv_stride,
-                                int oheight,
-                                int owidth,
-                                int bd);
-void vp10_highbd_resize_frame422(const uint8_t *const y,
-                                int y_stride,
-                                const uint8_t *const u,
-                                const uint8_t *const v,
-                                int uv_stride,
-                                int height,
-                                int width,
-                                uint8_t *oy,
-                                int oy_stride,
-                                uint8_t *ou,
-                                uint8_t *ov,
-                                int ouv_stride,
-                                int oheight,
-                                int owidth,
-                                int bd);
-void vp10_highbd_resize_frame444(const uint8_t *const y,
-                                int y_stride,
-                                const uint8_t *const u,
-                                const uint8_t *const v,
-                                int uv_stride,
-                                int height,
-                                int width,
-                                uint8_t *oy,
-                                int oy_stride,
-                                uint8_t *ou,
-                                uint8_t *ov,
-                                int ouv_stride,
-                                int oheight,
-                                int owidth,
-                                int bd);
-#endif    // CONFIG_VPX_HIGHBITDEPTH
+void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+                              int in_stride, uint8_t *output, int height2,
+                              int width2, int out_stride, int bd);
+void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd);
+void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd);
+void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd);
+#endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif    // VP10_ENCODER_RESIZE_H_
+#endif  // VP10_ENCODER_RESIZE_H_
diff --git a/vp10/encoder/segmentation.c b/vp10/encoder/segmentation.c
index 677910fa37c16f0b31acefe535ae93846e80af64..1b293663fc8ed10b21b4f2924d4308bbf6ec96ce 100644
--- a/vp10/encoder/segmentation.c
+++ b/vp10/encoder/segmentation.c
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #include <limits.h>
 
 #include "vpx_mem/vpx_mem.h"
@@ -32,31 +31,31 @@ void vp10_disable_segmentation(struct segmentation *seg) {
   seg->update_data = 0;
 }
 
-void vp10_set_segment_data(struct segmentation *seg,
-                          signed char *feature_data,
-                          unsigned char abs_delta) {
+void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
+                           unsigned char abs_delta) {
   seg->abs_delta = abs_delta;
 
   memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
 }
 void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
-                            SEG_LVL_FEATURES feature_id) {
+                             SEG_LVL_FEATURES feature_id) {
   seg->feature_mask[segment_id] &= ~(1 << feature_id);
 }
 
 void vp10_clear_segdata(struct segmentation *seg, int segment_id,
-                       SEG_LVL_FEATURES feature_id) {
+                        SEG_LVL_FEATURES feature_id) {
   seg->feature_data[segment_id][feature_id] = 0;
 }
 
 // Based on set of segment counts calculate a probability tree
 static void calc_segtree_probs(unsigned *segcounts,
-    vpx_prob *segment_tree_probs, const vpx_prob *cur_tree_probs) {
+                               vpx_prob *segment_tree_probs,
+                               const vpx_prob *cur_tree_probs) {
   // Work out probabilities of each segment
-  const unsigned cc[4] = {
-    segcounts[0] + segcounts[1], segcounts[2] + segcounts[3],
-    segcounts[4] + segcounts[5], segcounts[6] + segcounts[7]
-  };
+  const unsigned cc[4] = { segcounts[0] + segcounts[1],
+                           segcounts[2] + segcounts[3],
+                           segcounts[4] + segcounts[5],
+                           segcounts[6] + segcounts[7] };
   const unsigned ccc[2] = { cc[0] + cc[1], cc[2] + cc[3] };
 #if CONFIG_MISC_FIXES
   int i;
@@ -72,13 +71,13 @@ static void calc_segtree_probs(unsigned *segcounts,
 
 #if CONFIG_MISC_FIXES
   for (i = 0; i < 7; i++) {
-    const unsigned *ct = i == 0 ? ccc : i < 3 ? cc + (i & 2)
-        : segcounts + (i - 3) * 2;
-    vp10_prob_diff_update_savings_search(ct,
-        cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB);
+    const unsigned *ct =
+        i == 0 ? ccc : i < 3 ? cc + (i & 2) : segcounts + (i - 3) * 2;
+    vp10_prob_diff_update_savings_search(
+        ct, cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB);
   }
 #else
-  (void) cur_tree_probs;
+  (void)cur_tree_probs;
 #endif
 }
 
@@ -92,13 +91,11 @@ static int cost_segmap(unsigned *segcounts, vpx_prob *probs) {
   const int c4567 = c45 + c67;
 
   // Cost the top node of the tree
-  int cost = c0123 * vp10_cost_zero(probs[0]) +
-             c4567 * vp10_cost_one(probs[0]);
+  int cost = c0123 * vp10_cost_zero(probs[0]) + c4567 * vp10_cost_one(probs[0]);
 
   // Cost subsequent levels
   if (c0123 > 0) {
-    cost += c01 * vp10_cost_zero(probs[1]) +
-            c23 * vp10_cost_one(probs[1]);
+    cost += c01 * vp10_cost_zero(probs[1]) + c23 * vp10_cost_one(probs[1]);
 
     if (c01 > 0)
       cost += segcounts[0] * vp10_cost_zero(probs[3]) +
@@ -109,8 +106,7 @@ static int cost_segmap(unsigned *segcounts, vpx_prob *probs) {
   }
 
   if (c4567 > 0) {
-    cost += c45 * vp10_cost_zero(probs[2]) +
-            c67 * vp10_cost_one(probs[2]);
+    cost += c45 * vp10_cost_zero(probs[2]) + c67 * vp10_cost_one(probs[2]);
 
     if (c45 > 0)
       cost += segcounts[4] * vp10_cost_zero(probs[5]) +
@@ -127,12 +123,11 @@ static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
                        const TileInfo *tile, MODE_INFO **mi,
                        unsigned *no_pred_segcounts,
                        unsigned (*temporal_predictor_count)[2],
-                       unsigned *t_unpred_seg_counts,
-                       int bw, int bh, int mi_row, int mi_col) {
+                       unsigned *t_unpred_seg_counts, int bw, int bh,
+                       int mi_row, int mi_col) {
   int segment_id;
 
-  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
-    return;
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
 
   xd->mi = mi;
   segment_id = xd->mi[0]->mbmi.segment_id;
@@ -146,8 +141,8 @@ static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
   if (cm->frame_type != KEY_FRAME) {
     const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
     // Test to see if the segment id matches the predicted value.
-    const int pred_segment_id = get_segment_id(cm, cm->last_frame_seg_map,
-                                               bsize, mi_row, mi_col);
+    const int pred_segment_id =
+        get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col);
     const int pred_flag = pred_segment_id == segment_id;
     const int pred_context = vp10_get_pred_context_seg_id(xd);
 
@@ -157,8 +152,7 @@ static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
     temporal_predictor_count[pred_context][pred_flag]++;
 
     // Update the "unpredicted" segment count
-    if (!pred_flag)
-      t_unpred_seg_counts[segment_id]++;
+    if (!pred_flag) t_unpred_seg_counts[segment_id]++;
   }
 }
 
@@ -166,15 +160,13 @@ static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd,
                           const TileInfo *tile, MODE_INFO **mi,
                           unsigned *no_pred_segcounts,
                           unsigned (*temporal_predictor_count)[2],
-                          unsigned *t_unpred_seg_counts,
-                          int mi_row, int mi_col,
+                          unsigned *t_unpred_seg_counts, int mi_row, int mi_col,
                           BLOCK_SIZE bsize) {
   const int mis = cm->mi_stride;
   int bw, bh;
   const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
 
-  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
-    return;
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
 
   bw = num_8x8_blocks_wide_lookup[mi[0]->mbmi.sb_type];
   bh = num_8x8_blocks_high_lookup[mi[0]->mbmi.sb_type];
@@ -191,9 +183,9 @@ static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd,
   } else if (bw < bs && bh == bs) {
     count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
                t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
-    count_segs(cm, xd, tile, mi + hbs,
-               no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts,
-               hbs, bs, mi_row, mi_col + hbs);
+    count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts,
+               temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row,
+               mi_col + hbs);
   } else {
     const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
     int n;
@@ -204,9 +196,8 @@ static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd,
       const int mi_dc = hbs * (n & 1);
       const int mi_dr = hbs * (n >> 1);
 
-      count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc],
-                    no_pred_segcounts, temporal_predictor_count,
-                    t_unpred_seg_counts,
+      count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], no_pred_segcounts,
+                    temporal_predictor_count, t_unpred_seg_counts,
                     mi_row + mi_dr, mi_col + mi_dc, subsize);
     }
   }
@@ -226,7 +217,7 @@ void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
   int i, tile_col, mi_row, mi_col;
 
 #if CONFIG_MISC_FIXES
-  unsigned (*temporal_predictor_count)[2] = cm->counts.seg.pred;
+  unsigned(*temporal_predictor_count)[2] = cm->counts.seg.pred;
   unsigned *no_pred_segcounts = cm->counts.seg.tree_total;
   unsigned *t_unpred_seg_counts = cm->counts.seg.tree_mispred;
 #else
@@ -240,7 +231,7 @@ void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
   vpx_prob t_nopred_prob[PREDICTION_PROBS];
 
 #if CONFIG_MISC_FIXES
-  (void) xd;
+  (void)xd;
 #else
   // Set default state for the segment tree probabilities and the
   // temporal coding probabilities
@@ -262,8 +253,8 @@ void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
       for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
            mi_col += 8, mi += 8)
         count_segs_sb(cm, xd, &tile, mi, no_pred_segcounts,
-                      temporal_predictor_count, t_unpred_seg_counts,
-                      mi_row, mi_col, BLOCK_64X64);
+                      temporal_predictor_count, t_unpred_seg_counts, mi_row,
+                      mi_col, BLOCK_64X64);
     }
   }
 
diff --git a/vp10/encoder/segmentation.h b/vp10/encoder/segmentation.h
index b8e6c06c69fa8fd35bf9bacdf2bb6a317f67216b..c07f37ff7b8dc4c4ae69c2088dd1189551b1851a 100644
--- a/vp10/encoder/segmentation.h
+++ b/vp10/encoder/segmentation.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_SEGMENTATION_H_
 #define VP10_ENCODER_SEGMENTATION_H_
 
@@ -22,12 +21,10 @@ extern "C" {
 void vp10_enable_segmentation(struct segmentation *seg);
 void vp10_disable_segmentation(struct segmentation *seg);
 
-void vp10_disable_segfeature(struct segmentation *seg,
-                            int segment_id,
-                            SEG_LVL_FEATURES feature_id);
-void vp10_clear_segdata(struct segmentation *seg,
-                       int segment_id,
-                       SEG_LVL_FEATURES feature_id);
+void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
+                             SEG_LVL_FEATURES feature_id);
+void vp10_clear_segdata(struct segmentation *seg, int segment_id,
+                        SEG_LVL_FEATURES feature_id);
 
 // The values given for each segment can be either deltas (from the default
 // value chosen for the frame) or absolute values.
@@ -40,7 +37,7 @@ void vp10_clear_segdata(struct segmentation *seg,
 // abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
 // the absolute values given).
 void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
-                          unsigned char abs_delta);
+                           unsigned char abs_delta);
 
 void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd);
 
diff --git a/vp10/encoder/skin_detection.c b/vp10/encoder/skin_detection.c
index 1714cd34d5da350ffe2ddfc621ffb9b11c3121aa..324ac596ac5ffb02175ce7e991db9945e64fa825 100644
--- a/vp10/encoder/skin_detection.c
+++ b/vp10/encoder/skin_detection.c
@@ -16,9 +16,9 @@
 #include "vp10/encoder/skin_detection.h"
 
 // Fixed-point skin color model parameters.
-static const int skin_mean[2] = {7463, 9614};                 // q6
-static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157};  // q16
-static const int skin_threshold = 1570636;                    // q18
+static const int skin_mean[2] = { 7463, 9614 };                 // q6
+static const int skin_inv_cov[4] = { 4107, 1663, 1663, 2157 };  // q16
+static const int skin_threshold = 1570636;                      // q18
 
 // Thresholds on luminance.
 static const int y_low = 20;
@@ -34,10 +34,9 @@ static int evaluate_skin_color_difference(int cb, int cr) {
   const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
   const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
   const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
-  const int skin_diff = skin_inv_cov[0] * cb_diff_q2 +
-      skin_inv_cov[1] * cbcr_diff_q2 +
-      skin_inv_cov[2] * cbcr_diff_q2 +
-      skin_inv_cov[3] * cr_diff_q2;
+  const int skin_diff =
+      skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
+      skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
   return skin_diff;
 }
 
@@ -61,11 +60,11 @@ void vp10_compute_skin_map(VP10_COMP *const cpi, FILE *yuv_skinmap_file) {
   const int src_uvstride = cpi->Source->uv_stride;
   YV12_BUFFER_CONFIG skinmap;
   memset(&skinmap, 0, sizeof(YV12_BUFFER_CONFIG));
-  if (vpx_alloc_frame_buffer(&skinmap, cm->width, cm->height,
-                               cm->subsampling_x, cm->subsampling_y,
-                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment)) {
-      vpx_free_frame_buffer(&skinmap);
-      return;
+  if (vpx_alloc_frame_buffer(&skinmap, cm->width, cm->height, cm->subsampling_x,
+                             cm->subsampling_y, VPX_ENC_BORDER_IN_PIXELS,
+                             cm->byte_alignment)) {
+    vpx_free_frame_buffer(&skinmap);
+    return;
   }
   memset(skinmap.buffer_alloc, 128, skinmap.frame_size);
   y = skinmap.y_buffer;
diff --git a/vp10/encoder/speed_features.c b/vp10/encoder/speed_features.c
index b82dec007104ea0d575b313ddb20b6e042931977..d95dc65c40c051573695f8c440690100082f3d30 100644
--- a/vp10/encoder/speed_features.c
+++ b/vp10/encoder/speed_features.c
@@ -17,21 +17,23 @@
 #include "vpx_dsp/vpx_dsp_common.h"
 
 // Mesh search patters for various speed settings
-static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] =
-    {{64, 4}, {28, 2}, {15, 1}, {7, 1}};
+static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] = {
+  { 64, 4 }, { 28, 2 }, { 15, 1 }, { 7, 1 }
+};
 
 #define MAX_MESH_SPEED 5  // Max speed setting for mesh motion method
-static MESH_PATTERN good_quality_mesh_patterns[MAX_MESH_SPEED + 1]
-                                              [MAX_MESH_STEP] =
-    {{{64, 8}, {28, 4}, {15, 1}, {7, 1}},
-     {{64, 8}, {28, 4}, {15, 1}, {7, 1}},
-     {{64, 8},  {14, 2}, {7, 1},  {7, 1}},
-     {{64, 16}, {24, 8}, {12, 4}, {7, 1}},
-     {{64, 16}, {24, 8}, {12, 4}, {7, 1}},
-     {{64, 16}, {24, 8}, {12, 4}, {7, 1}},
+static MESH_PATTERN
+    good_quality_mesh_patterns[MAX_MESH_SPEED + 1][MAX_MESH_STEP] = {
+      { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } },
+      { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } },
+      { { 64, 8 }, { 14, 2 }, { 7, 1 }, { 7, 1 } },
+      { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
+      { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
+      { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
     };
-static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] =
-    {50, 25, 15, 5, 1, 1};
+static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] = {
+  50, 25, 15, 5, 1, 1
+};
 
 // Intra only frames, golden frames (except alt ref overlays) and
 // alt ref frames tend to be coded at a higher than ambient quality
@@ -68,8 +70,8 @@ static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi,
 
   if (speed >= 1) {
     if (VPXMIN(cm->width, cm->height) >= 720) {
-      sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
-                                              : DISABLE_ALL_INTER_SPLIT;
+      sf->disable_split_mask =
+          cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
       sf->partition_search_breakout_dist_thr = (1 << 23);
     } else {
       sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
@@ -79,8 +81,8 @@ static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi,
 
   if (speed >= 2) {
     if (VPXMIN(cm->width, cm->height) >= 720) {
-      sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
-                                              : DISABLE_ALL_INTER_SPLIT;
+      sf->disable_split_mask =
+          cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
       sf->adaptive_pred_interp_filter = 0;
       sf->partition_search_breakout_dist_thr = (1 << 24);
       sf->partition_search_breakout_rate_thr = 120;
@@ -141,7 +143,7 @@ static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
       sf->use_square_partition_only = !frame_is_intra_only(cm);
     }
 
-    sf->less_rectangular_check  = 1;
+    sf->less_rectangular_check = 1;
 
     sf->use_rd_breakout = 1;
     sf->adaptive_motion_search = 1;
@@ -162,14 +164,14 @@ static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
   }
 
   if (speed >= 2) {
-    sf->tx_size_search_method = frame_is_boosted(cpi) ? USE_FULL_RD
-                                                      : USE_LARGESTALL;
-
-    sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 :
-                                 FLAG_SKIP_INTRA_DIRMISMATCH |
-                                 FLAG_SKIP_INTRA_BESTINTER |
-                                 FLAG_SKIP_COMP_BESTINTRA |
-                                 FLAG_SKIP_INTRA_LOWVAR;
+    sf->tx_size_search_method =
+        frame_is_boosted(cpi) ? USE_FULL_RD : USE_LARGESTALL;
+
+    sf->mode_search_skip_flags =
+        (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH |
+                                                FLAG_SKIP_INTRA_BESTINTER |
+                                                FLAG_SKIP_COMP_BESTINTRA |
+                                                FLAG_SKIP_INTRA_LOWVAR;
     sf->disable_filter_search_var_thresh = 100;
     sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
     sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX;
@@ -178,8 +180,8 @@ static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
 
   if (speed >= 3) {
     sf->use_square_partition_only = !frame_is_intra_only(cm);
-    sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD
-                                                        : USE_LARGESTALL;
+    sf->tx_size_search_method =
+        frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL;
     sf->mv.subpel_search_method = SUBPEL_TREE_PRUNED;
     sf->adaptive_pred_interp_filter = 0;
     sf->adaptive_mode_search = 1;
@@ -225,13 +227,14 @@ static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
 }
 
 static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
-    SPEED_FEATURES *sf, int speed) {
+                                                     SPEED_FEATURES *sf,
+                                                     int speed) {
   VP10_COMMON *const cm = &cpi->common;
 
   if (speed >= 1) {
     if (VPXMIN(cm->width, cm->height) >= 720) {
-      sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
-                                              : DISABLE_ALL_INTER_SPLIT;
+      sf->disable_split_mask =
+          cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
     } else {
       sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
     }
@@ -239,8 +242,8 @@ static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
 
   if (speed >= 2) {
     if (VPXMIN(cm->width, cm->height) >= 720) {
-      sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
-                                              : DISABLE_ALL_INTER_SPLIT;
+      sf->disable_split_mask =
+          cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
     } else {
       sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY;
     }
@@ -255,13 +258,13 @@ static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
   }
 
   if (speed >= 7) {
-    sf->encode_breakout_thresh = (VPXMIN(cm->width, cm->height) >= 720) ?
-        800 : 300;
+    sf->encode_breakout_thresh =
+        (VPXMIN(cm->width, cm->height) >= 720) ? 800 : 300;
   }
 }
 
-static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
-                                 int speed, vpx_tune_content content) {
+static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf, int speed,
+                                 vpx_tune_content content) {
   VP10_COMMON *const cm = &cpi->common;
   const int is_keyframe = cm->frame_type == KEY_FRAME;
   const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
@@ -274,8 +277,8 @@ static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
   if (speed >= 1) {
     sf->use_square_partition_only = !frame_is_intra_only(cm);
     sf->less_rectangular_check = 1;
-    sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD
-                                                        : USE_LARGESTALL;
+    sf->tx_size_search_method =
+        frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL;
 
     sf->use_rd_breakout = 1;
 
@@ -289,11 +292,11 @@ static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
   }
 
   if (speed >= 2) {
-    sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 :
-                                 FLAG_SKIP_INTRA_DIRMISMATCH |
-                                 FLAG_SKIP_INTRA_BESTINTER |
-                                 FLAG_SKIP_COMP_BESTINTRA |
-                                 FLAG_SKIP_INTRA_LOWVAR;
+    sf->mode_search_skip_flags =
+        (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH |
+                                                FLAG_SKIP_INTRA_BESTINTER |
+                                                FLAG_SKIP_COMP_BESTINTRA |
+                                                FLAG_SKIP_INTRA_LOWVAR;
     sf->adaptive_pred_interp_filter = 2;
     sf->disable_filter_search_var_thresh = 50;
     sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
@@ -326,8 +329,8 @@ static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
     sf->use_fast_coef_costing = 0;
     sf->auto_min_max_partition_size = STRICT_NEIGHBORING_MIN_MAX;
     sf->adjust_partitioning_from_last_frame =
-        cm->last_frame_type != cm->frame_type || (0 ==
-        (frames_since_key + 1) % sf->last_partitioning_redo_frequency);
+        cm->last_frame_type != cm->frame_type ||
+        (0 == (frames_since_key + 1) % sf->last_partitioning_redo_frequency);
     sf->mv.subpel_force_stop = 1;
     for (i = 0; i < TX_SIZES; i++) {
       sf->intra_y_mode_mask[i] = INTRA_DC_H_V;
@@ -347,11 +350,12 @@ static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
 
   if (speed >= 5) {
     sf->use_quant_fp = !is_keyframe;
-    sf->auto_min_max_partition_size = is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX
-                                                  : STRICT_NEIGHBORING_MIN_MAX;
+    sf->auto_min_max_partition_size =
+        is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX : STRICT_NEIGHBORING_MIN_MAX;
     sf->default_max_partition_size = BLOCK_32X32;
     sf->default_min_partition_size = BLOCK_8X8;
-    sf->force_frame_boost = is_keyframe ||
+    sf->force_frame_boost =
+        is_keyframe ||
         (frames_since_key % (sf->last_partitioning_redo_frequency << 1) == 1);
     sf->max_delta_qindex = is_keyframe ? 20 : 15;
     sf->partition_search_type = REFERENCE_PARTITION;
@@ -494,8 +498,7 @@ void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
   sf->use_fast_coef_costing = 0;
   sf->mode_skip_start = MAX_MODES;  // Mode index at which mode skip mask set
   sf->schedule_mode_search = 0;
-  for (i = 0; i < BLOCK_SIZES; ++i)
-    sf->inter_mode_mask[i] = INTER_ALL;
+  for (i = 0; i < BLOCK_SIZES; ++i) sf->inter_mode_mask[i] = INTER_ALL;
   sf->max_intra_bsize = BLOCK_64X64;
   sf->reuse_inter_pred_sby = 0;
   // This setting only takes effect when partition_search_type is set
@@ -541,8 +544,7 @@ void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
       sf->exhaustive_searches_thresh = sf->exhaustive_searches_thresh << 1;
 
     for (i = 0; i < MAX_MESH_STEP; ++i) {
-      sf->mesh_patterns[i].range =
-          good_quality_mesh_patterns[speed][i].range;
+      sf->mesh_patterns[i].range = good_quality_mesh_patterns[speed][i].range;
       sf->mesh_patterns[i].interval =
           good_quality_mesh_patterns[speed][i].interval;
     }
@@ -550,8 +552,7 @@ void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
 
   // Slow quant, dct and trellis not worthwhile for first pass
   // so make sure they are always turned off.
-  if (oxcf->pass == 1)
-    sf->optimize_coefficients = 0;
+  if (oxcf->pass == 1) sf->optimize_coefficients = 0;
 
   // No recode for 1 pass.
   if (oxcf->pass == 0) {
@@ -566,7 +567,8 @@ void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
   } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_MORE) {
     cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_more;
   } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_EVENMORE) {
-    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_evenmore;
+    cpi->find_fractional_mv_step =
+        vp10_find_best_sub_pixel_tree_pruned_evenmore;
   }
 
   x->optimize = sf->optimize_coefficients == 1 && oxcf->pass != 1;
diff --git a/vp10/encoder/speed_features.h b/vp10/encoder/speed_features.h
index 3b91999298f089b7ab13783cf9ca48b72ce80890..72b58474dcad40eea0b9ad6e614144ede2d0b00c 100644
--- a/vp10/encoder/speed_features.h
+++ b/vp10/encoder/speed_features.h
@@ -18,17 +18,14 @@ extern "C" {
 #endif
 
 enum {
-  INTRA_ALL       = (1 << DC_PRED) |
-                    (1 << V_PRED) | (1 << H_PRED) |
-                    (1 << D45_PRED) | (1 << D135_PRED) |
-                    (1 << D117_PRED) | (1 << D153_PRED) |
-                    (1 << D207_PRED) | (1 << D63_PRED) |
-                    (1 << TM_PRED),
-  INTRA_DC        = (1 << DC_PRED),
-  INTRA_DC_TM     = (1 << DC_PRED) | (1 << TM_PRED),
-  INTRA_DC_H_V    = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED),
-  INTRA_DC_TM_H_V = (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) |
-                    (1 << H_PRED)
+  INTRA_ALL = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED) | (1 << D45_PRED) |
+              (1 << D135_PRED) | (1 << D117_PRED) | (1 << D153_PRED) |
+              (1 << D207_PRED) | (1 << D63_PRED) | (1 << TM_PRED),
+  INTRA_DC = (1 << DC_PRED),
+  INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED),
+  INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED),
+  INTRA_DC_TM_H_V =
+      (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) | (1 << H_PRED)
 };
 
 enum {
@@ -42,20 +39,15 @@ enum {
 };
 
 enum {
-  DISABLE_ALL_INTER_SPLIT   = (1 << THR_COMP_GA) |
-                              (1 << THR_COMP_LA) |
-                              (1 << THR_ALTR) |
-                              (1 << THR_GOLD) |
-                              (1 << THR_LAST),
+  DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) |
+                            (1 << THR_ALTR) | (1 << THR_GOLD) | (1 << THR_LAST),
 
-  DISABLE_ALL_SPLIT         = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT,
+  DISABLE_ALL_SPLIT = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT,
 
-  DISABLE_COMPOUND_SPLIT    = (1 << THR_COMP_GA) | (1 << THR_COMP_LA),
+  DISABLE_COMPOUND_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA),
 
-  LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) |
-                              (1 << THR_COMP_LA) |
-                              (1 << THR_ALTR) |
-                              (1 << THR_GOLD)
+  LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) |
+                              (1 << THR_ALTR) | (1 << THR_GOLD)
 };
 
 typedef enum {
diff --git a/vp10/encoder/subexp.c b/vp10/encoder/subexp.c
index d4074775b6b79be42e226a686ba4749c9af3a0e8..db76a27f8b66b8d2d4a749881a9dae79a9b1b844 100644
--- a/vp10/encoder/subexp.c
+++ b/vp10/encoder/subexp.c
@@ -14,26 +14,29 @@
 #include "vp10/encoder/cost.h"
 #include "vp10/encoder/subexp.h"
 
-#define vp10_cost_upd256  ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd)))
+#define vp10_cost_upd256 ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd)))
 
 static const uint8_t update_bits[255] = {
-   5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,
-   6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,
-   8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,
-   8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,
-  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-  10, 11 - CONFIG_MISC_FIXES,
-          11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,  0,
+  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,
+  5,  5,  5,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,
+  6,  6,  6,  6,  6,  6,  8,  8,  8,  8,  8,  8,  8,
+  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,
+  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  10,
+  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11 - CONFIG_MISC_FIXES,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 0,
 };
 
 static int recenter_nonneg(int v, int m) {
@@ -50,23 +53,23 @@ static int remap_prob(int v, int m) {
   static const uint8_t map_table[MAX_PROB - 1] = {
     // generated by:
     //   map_table[j] = split_index(j, MAX_PROB - 1, MODULUS_PARAM);
-     20,  21,  22,  23,  24,  25,   0,  26,  27,  28,  29,  30,  31,  32,  33,
-     34,  35,  36,  37,   1,  38,  39,  40,  41,  42,  43,  44,  45,  46,  47,
-     48,  49,   2,  50,  51,  52,  53,  54,  55,  56,  57,  58,  59,  60,  61,
-      3,  62,  63,  64,  65,  66,  67,  68,  69,  70,  71,  72,  73,   4,  74,
-     75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,   5,  86,  87,  88,
-     89,  90,  91,  92,  93,  94,  95,  96,  97,   6,  98,  99, 100, 101, 102,
-    103, 104, 105, 106, 107, 108, 109,   7, 110, 111, 112, 113, 114, 115, 116,
-    117, 118, 119, 120, 121,   8, 122, 123, 124, 125, 126, 127, 128, 129, 130,
-    131, 132, 133,   9, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
-    145,  10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,  11,
-    158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,  12, 170, 171,
-    172, 173, 174, 175, 176, 177, 178, 179, 180, 181,  13, 182, 183, 184, 185,
-    186, 187, 188, 189, 190, 191, 192, 193,  14, 194, 195, 196, 197, 198, 199,
-    200, 201, 202, 203, 204, 205,  15, 206, 207, 208, 209, 210, 211, 212, 213,
-    214, 215, 216, 217,  16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
-    228, 229,  17, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
-     18, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253,  19,
+    20,  21,  22,  23,  24,  25,  0,   26,  27,  28,  29,  30,  31,  32,  33,
+    34,  35,  36,  37,  1,   38,  39,  40,  41,  42,  43,  44,  45,  46,  47,
+    48,  49,  2,   50,  51,  52,  53,  54,  55,  56,  57,  58,  59,  60,  61,
+    3,   62,  63,  64,  65,  66,  67,  68,  69,  70,  71,  72,  73,  4,   74,
+    75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,  5,   86,  87,  88,
+    89,  90,  91,  92,  93,  94,  95,  96,  97,  6,   98,  99,  100, 101, 102,
+    103, 104, 105, 106, 107, 108, 109, 7,   110, 111, 112, 113, 114, 115, 116,
+    117, 118, 119, 120, 121, 8,   122, 123, 124, 125, 126, 127, 128, 129, 130,
+    131, 132, 133, 9,   134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+    145, 10,  146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11,
+    158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12,  170, 171,
+    172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13,  182, 183, 184, 185,
+    186, 187, 188, 189, 190, 191, 192, 193, 14,  194, 195, 196, 197, 198, 199,
+    200, 201, 202, 203, 204, 205, 15,  206, 207, 208, 209, 210, 211, 212, 213,
+    214, 215, 216, 217, 16,  218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+    228, 229, 17,  230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+    18,  242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19,
   };
   v--;
   m--;
@@ -117,9 +120,8 @@ void vp10_write_prob_diff_update(vpx_writer *w, vpx_prob newp, vpx_prob oldp) {
   encode_term_subexp(w, delp);
 }
 
-int vp10_prob_diff_update_savings_search(const unsigned int *ct,
-                                        vpx_prob oldp, vpx_prob *bestp,
-                                        vpx_prob upd) {
+int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
+                                         vpx_prob *bestp, vpx_prob upd) {
   const int old_b = cost_branch256(ct, oldp);
   int bestsavings = 0;
   vpx_prob newp, bestnewp = oldp;
@@ -139,10 +141,9 @@ int vp10_prob_diff_update_savings_search(const unsigned int *ct,
 }
 
 int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
-                                              const vpx_prob *oldp,
-                                              vpx_prob *bestp,
-                                              vpx_prob upd,
-                                              int stepsize) {
+                                               const vpx_prob *oldp,
+                                               vpx_prob *bestp, vpx_prob upd,
+                                               int stepsize) {
   int i, old_b, new_b, update_b, savings, bestsavings, step;
   int newp;
   vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
@@ -158,15 +159,14 @@ int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
   if (*bestp > oldp[PIVOT_NODE]) {
     step = -stepsize;
     for (newp = *bestp; newp > oldp[PIVOT_NODE]; newp += step) {
-      if (newp < 1 || newp > 255)
-        continue;
+      if (newp < 1 || newp > 255) continue;
       newplist[PIVOT_NODE] = newp;
       vp10_model_to_full_probs(newplist, newplist);
       for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
         new_b += cost_branch256(ct + 2 * i, newplist[i]);
       new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
-      update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) +
-          vp10_cost_upd256;
+      update_b =
+          prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
       savings = old_b - new_b - update_b;
       if (savings > bestsavings) {
         bestsavings = savings;
@@ -176,15 +176,14 @@ int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
   } else {
     step = stepsize;
     for (newp = *bestp; newp < oldp[PIVOT_NODE]; newp += step) {
-      if (newp < 1 || newp > 255)
-        continue;
+      if (newp < 1 || newp > 255) continue;
       newplist[PIVOT_NODE] = newp;
       vp10_model_to_full_probs(newplist, newplist);
       for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
         new_b += cost_branch256(ct + 2 * i, newplist[i]);
       new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
-      update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) +
-          vp10_cost_upd256;
+      update_b =
+          prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
       savings = old_b - new_b - update_b;
       if (savings > bestsavings) {
         bestsavings = savings;
@@ -198,11 +197,11 @@ int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
 }
 
 void vp10_cond_prob_diff_update(vpx_writer *w, vpx_prob *oldp,
-                               const unsigned int ct[2]) {
+                                const unsigned int ct[2]) {
   const vpx_prob upd = DIFF_UPDATE_PROB;
   vpx_prob newp = get_binary_prob(ct[0], ct[1]);
-  const int savings = vp10_prob_diff_update_savings_search(ct, *oldp, &newp,
-                                                          upd);
+  const int savings =
+      vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
   assert(newp >= 1);
   if (savings > 0) {
     vpx_write(w, 1, upd);
@@ -217,7 +216,7 @@ int vp10_cond_prob_diff_update_savings(vpx_prob *oldp,
                                        const unsigned int ct[2]) {
   const vpx_prob upd = DIFF_UPDATE_PROB;
   vpx_prob newp = get_binary_prob(ct[0], ct[1]);
-  const int savings = vp10_prob_diff_update_savings_search(ct, *oldp, &newp,
-                                                           upd);
+  const int savings =
+      vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
   return savings;
 }
diff --git a/vp10/encoder/subexp.h b/vp10/encoder/subexp.h
index 091334f1f207aeb1befae8fc46025fe7d575873e..5adc6e501aa6e7716b71803d4fedcb4b86dd9532 100644
--- a/vp10/encoder/subexp.h
+++ b/vp10/encoder/subexp.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VP10_ENCODER_SUBEXP_H_
 #define VP10_ENCODER_SUBEXP_H_
 
@@ -20,22 +19,19 @@ extern "C" {
 
 struct vpx_writer;
 
-void vp10_write_prob_diff_update(struct vpx_writer *w,
-                                vpx_prob newp, vpx_prob oldp);
+void vp10_write_prob_diff_update(struct vpx_writer *w, vpx_prob newp,
+                                 vpx_prob oldp);
 
 void vp10_cond_prob_diff_update(struct vpx_writer *w, vpx_prob *oldp,
-                               const unsigned int ct[2]);
-
-int vp10_prob_diff_update_savings_search(const unsigned int *ct,
-                                        vpx_prob oldp, vpx_prob *bestp,
-                                        vpx_prob upd);
+                                const unsigned int ct[2]);
 
+int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
+                                         vpx_prob *bestp, vpx_prob upd);
 
 int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
-                                              const vpx_prob *oldp,
-                                              vpx_prob *bestp,
-                                              vpx_prob upd,
-                                              int stepsize);
+                                               const vpx_prob *oldp,
+                                               vpx_prob *bestp, vpx_prob upd,
+                                               int stepsize);
 
 int vp10_cond_prob_diff_update_savings(vpx_prob *oldp,
                                        const unsigned int ct[2]);
diff --git a/vp10/encoder/temporal_filter.c b/vp10/encoder/temporal_filter.c
index 51c73245286d0674226fa86bf3a52772e6b07995..a88564df0bbf11a62fbc72c827149c3902147b24 100644
--- a/vp10/encoder/temporal_filter.c
+++ b/vp10/encoder/temporal_filter.c
@@ -31,22 +31,14 @@
 
 static int fixed_divide[512];
 
-static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
-                                            uint8_t *y_mb_ptr,
-                                            uint8_t *u_mb_ptr,
-                                            uint8_t *v_mb_ptr,
-                                            int stride,
-                                            int uv_block_width,
-                                            int uv_block_height,
-                                            int mv_row,
-                                            int mv_col,
-                                            uint8_t *pred,
-                                            struct scale_factors *scale,
-                                            int x, int y) {
+static void temporal_filter_predictors_mb_c(
+    MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr,
+    int stride, int uv_block_width, int uv_block_height, int mv_row, int mv_col,
+    uint8_t *pred, struct scale_factors *scale, int x, int y) {
   const int which_mv = 0;
   const MV mv = { mv_row, mv_col };
   const InterpKernel *const kernel =
-    vp10_filter_kernels[xd->mi[0]->mbmi.interp_filter];
+      vp10_filter_kernels[xd->mi[0]->mbmi.interp_filter];
 
   enum mv_precision mv_precision_uv;
   int uv_stride;
@@ -60,74 +52,46 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vp10_highbd_build_inter_predictor(y_mb_ptr, stride,
-                                     &pred[0], 16,
-                                     &mv,
-                                     scale,
-                                     16, 16,
-                                     which_mv,
-                                     kernel, MV_PRECISION_Q3, x, y, xd->bd);
-
-    vp10_highbd_build_inter_predictor(u_mb_ptr, uv_stride,
-                                     &pred[256], uv_block_width,
-                                     &mv,
-                                     scale,
-                                     uv_block_width, uv_block_height,
-                                     which_mv,
-                                     kernel, mv_precision_uv, x, y, xd->bd);
-
-    vp10_highbd_build_inter_predictor(v_mb_ptr, uv_stride,
-                                     &pred[512], uv_block_width,
-                                     &mv,
-                                     scale,
-                                     uv_block_width, uv_block_height,
-                                     which_mv,
-                                     kernel, mv_precision_uv, x, y, xd->bd);
+    vp10_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv,
+                                      scale, 16, 16, which_mv, kernel,
+                                      MV_PRECISION_Q3, x, y, xd->bd);
+
+    vp10_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256],
+                                      uv_block_width, &mv, scale,
+                                      uv_block_width, uv_block_height, which_mv,
+                                      kernel, mv_precision_uv, x, y, xd->bd);
+
+    vp10_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512],
+                                      uv_block_width, &mv, scale,
+                                      uv_block_width, uv_block_height, which_mv,
+                                      kernel, mv_precision_uv, x, y, xd->bd);
     return;
   }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
-  vp10_build_inter_predictor(y_mb_ptr, stride,
-                            &pred[0], 16,
-                            &mv,
-                            scale,
-                            16, 16,
-                            which_mv,
-                            kernel, MV_PRECISION_Q3, x, y);
-
-  vp10_build_inter_predictor(u_mb_ptr, uv_stride,
-                            &pred[256], uv_block_width,
-                            &mv,
-                            scale,
-                            uv_block_width, uv_block_height,
-                            which_mv,
-                            kernel, mv_precision_uv, x, y);
-
-  vp10_build_inter_predictor(v_mb_ptr, uv_stride,
-                            &pred[512], uv_block_width,
-                            &mv,
-                            scale,
-                            uv_block_width, uv_block_height,
-                            which_mv,
-                            kernel, mv_precision_uv, x, y);
+  vp10_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
+                             which_mv, kernel, MV_PRECISION_Q3, x, y);
+
+  vp10_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
+                             &mv, scale, uv_block_width, uv_block_height,
+                             which_mv, kernel, mv_precision_uv, x, y);
+
+  vp10_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
+                             &mv, scale, uv_block_width, uv_block_height,
+                             which_mv, kernel, mv_precision_uv, x, y);
 }
 
 void vp10_temporal_filter_init(void) {
   int i;
 
   fixed_divide[0] = 0;
-  for (i = 1; i < 512; ++i)
-    fixed_divide[i] = 0x80000 / i;
+  for (i = 1; i < 512; ++i) fixed_divide[i] = 0x80000 / i;
 }
 
-void vp10_temporal_filter_apply_c(uint8_t *frame1,
-                                 unsigned int stride,
-                                 uint8_t *frame2,
-                                 unsigned int block_width,
-                                 unsigned int block_height,
-                                 int strength,
-                                 int filter_weight,
-                                 unsigned int *accumulator,
-                                 uint16_t *count) {
+void vp10_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
+                                  uint8_t *frame2, unsigned int block_width,
+                                  unsigned int block_height, int strength,
+                                  int filter_weight, unsigned int *accumulator,
+                                  uint16_t *count) {
   unsigned int i, j, k;
   int modifier;
   int byte = 0;
@@ -138,17 +102,16 @@ void vp10_temporal_filter_apply_c(uint8_t *frame1,
       int src_byte = frame1[byte];
       int pixel_value = *frame2++;
 
-      modifier   = src_byte - pixel_value;
+      modifier = src_byte - pixel_value;
       // This is an integer approximation of:
       // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
       // modifier =  (int)roundf(coeff > 16 ? 0 : 16-coeff);
-      modifier  *= modifier;
-      modifier  *= 3;
-      modifier  += rounding;
+      modifier *= modifier;
+      modifier *= 3;
+      modifier += rounding;
       modifier >>= strength;
 
-      if (modifier > 16)
-        modifier = 16;
+      if (modifier > 16) modifier = 16;
 
       modifier = 16 - modifier;
       modifier *= filter_weight;
@@ -164,15 +127,10 @@ void vp10_temporal_filter_apply_c(uint8_t *frame1,
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_temporal_filter_apply_c(uint8_t *frame1_8,
-                                        unsigned int stride,
-                                        uint8_t *frame2_8,
-                                        unsigned int block_width,
-                                        unsigned int block_height,
-                                        int strength,
-                                        int filter_weight,
-                                        unsigned int *accumulator,
-                                        uint16_t *count) {
+void vp10_highbd_temporal_filter_apply_c(
+    uint8_t *frame1_8, unsigned int stride, uint8_t *frame2_8,
+    unsigned int block_width, unsigned int block_height, int strength,
+    int filter_weight, unsigned int *accumulator, uint16_t *count) {
   uint16_t *frame1 = CONVERT_TO_SHORTPTR(frame1_8);
   uint16_t *frame2 = CONVERT_TO_SHORTPTR(frame2_8);
   unsigned int i, j, k;
@@ -185,7 +143,7 @@ void vp10_highbd_temporal_filter_apply_c(uint8_t *frame1_8,
       int src_byte = frame1[byte];
       int pixel_value = *frame2++;
 
-      modifier   = src_byte - pixel_value;
+      modifier = src_byte - pixel_value;
       // This is an integer approximation of:
       // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
       // modifier =  (int)roundf(coeff > 16 ? 0 : 16-coeff);
@@ -194,8 +152,7 @@ void vp10_highbd_temporal_filter_apply_c(uint8_t *frame1_8,
       modifier += rounding;
       modifier >>= strength;
 
-      if (modifier > 16)
-        modifier = 16;
+      if (modifier > 16) modifier = 16;
 
       modifier = 16 - modifier;
       modifier *= filter_weight;
@@ -225,7 +182,7 @@ static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
   unsigned int sse;
   int cost_list[5];
 
-  MV best_ref_mv1 = {0, 0};
+  MV best_ref_mv1 = { 0, 0 };
   MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
   MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv;
 
@@ -247,19 +204,15 @@ static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
 
   // Ignore mv costing by sending NULL pointer instead of cost arrays
   vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
-                 cond_cost_list(cpi, cost_list),
-                 &cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv);
+                  cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
+                  &best_ref_mv1, ref_mv);
 
   // Ignore mv costing by sending NULL pointer instead of cost array
-  bestsme = cpi->find_fractional_mv_step(x, ref_mv,
-                                         &best_ref_mv1,
-                                         cpi->common.allow_high_precision_mv,
-                                         x->errorperbit,
-                                         &cpi->fn_ptr[BLOCK_16X16],
-                                         0, mv_sf->subpel_iters_per_step,
-                                         cond_cost_list(cpi, cost_list),
-                                         NULL, NULL,
-                                         &distortion, &sse, NULL, 0, 0);
+  bestsme = cpi->find_fractional_mv_step(
+      x, ref_mv, &best_ref_mv1, cpi->common.allow_high_precision_mv,
+      x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], 0,
+      mv_sf->subpel_iters_per_step, cond_cost_list(cpi, cost_list), NULL, NULL,
+      &distortion, &sse, NULL, 0, 0);
 
   // Restore input state
   x->plane[0].src = src;
@@ -270,8 +223,7 @@ static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
 
 static void temporal_filter_iterate_c(VP10_COMP *cpi,
                                       YV12_BUFFER_CONFIG **frames,
-                                      int frame_count,
-                                      int alt_ref_index,
+                                      int frame_count, int alt_ref_index,
                                       int strength,
                                       struct scale_factors *scale) {
   int byte;
@@ -288,17 +240,17 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
   YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
   uint8_t *dst1, *dst2;
 #if CONFIG_VPX_HIGHBITDEPTH
-  DECLARE_ALIGNED(16, uint16_t,  predictor16[16 * 16 * 3]);
-  DECLARE_ALIGNED(16, uint8_t,  predictor8[16 * 16 * 3]);
+  DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
+  DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
   uint8_t *predictor;
 #else
-  DECLARE_ALIGNED(16, uint8_t,  predictor[16 * 16 * 3]);
+  DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]);
 #endif
   const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y;
-  const int mb_uv_width  = 16 >> mbd->plane[1].subsampling_x;
+  const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x;
 
   // Save input state
-  uint8_t* input_buffer[MAX_MB_PLANE];
+  uint8_t *input_buffer[MAX_MB_PLANE];
   int i;
 #if CONFIG_VPX_HIGHBITDEPTH
   if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
@@ -308,8 +260,7 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
   }
 #endif
 
-  for (i = 0; i < MAX_MB_PLANE; i++)
-    input_buffer[i] = mbd->plane[i].pre[0].buf;
+  for (i = 0; i < MAX_MB_PLANE; i++) input_buffer[i] = mbd->plane[i].pre[0].buf;
 
   for (mb_row = 0; mb_row < mb_rows; mb_row++) {
     // Source frames are extended to 16 pixels. This is different than
@@ -324,8 +275,8 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
     // To keep the mv in play for both Y and UV planes the max that it
     //  can be on a border is therefore 16 - (2*VPX_INTERP_EXTEND+1).
     cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VPX_INTERP_EXTEND));
-    cpi->td.mb.mv_row_max = ((mb_rows - 1 - mb_row) * 16)
-                         + (17 - 2 * VPX_INTERP_EXTEND);
+    cpi->td.mb.mv_row_max =
+        ((mb_rows - 1 - mb_row) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
 
     for (mb_col = 0; mb_col < mb_cols; mb_col++) {
       int i, j, k;
@@ -335,15 +286,14 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
       memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
 
       cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VPX_INTERP_EXTEND));
-      cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16)
-                           + (17 - 2 * VPX_INTERP_EXTEND);
+      cpi->td.mb.mv_col_max =
+          ((mb_cols - 1 - mb_col) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
 
       for (frame = 0; frame < frame_count; frame++) {
-        const int thresh_low  = 10000;
+        const int thresh_low = 10000;
         const int thresh_high = 20000;
 
-        if (frames[frame] == NULL)
-          continue;
+        if (frames[frame] == NULL) continue;
 
         mbd->mi[0]->bmi[0].as_mv[0].as_mv.row = 0;
         mbd->mi[0]->bmi[0].as_mv[0].as_mv.col = 0;
@@ -352,84 +302,68 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
           filter_weight = 2;
         } else {
           // Find best match in this frame by MC
-          int err = temporal_filter_find_matching_mb_c(cpi,
-              frames[alt_ref_index]->y_buffer + mb_y_offset,
-              frames[frame]->y_buffer + mb_y_offset,
-              frames[frame]->y_stride);
+          int err = temporal_filter_find_matching_mb_c(
+              cpi, frames[alt_ref_index]->y_buffer + mb_y_offset,
+              frames[frame]->y_buffer + mb_y_offset, frames[frame]->y_stride);
 
           // Assign higher weight to matching MB if it's error
           // score is lower. If not applying MC default behavior
           // is to weight all MBs equal.
-          filter_weight = err < thresh_low
-                          ? 2 : err < thresh_high ? 1 : 0;
+          filter_weight = err < thresh_low ? 2 : err < thresh_high ? 1 : 0;
         }
 
         if (filter_weight != 0) {
           // Construct the predictors
-          temporal_filter_predictors_mb_c(mbd,
-              frames[frame]->y_buffer + mb_y_offset,
+          temporal_filter_predictors_mb_c(
+              mbd, frames[frame]->y_buffer + mb_y_offset,
               frames[frame]->u_buffer + mb_uv_offset,
-              frames[frame]->v_buffer + mb_uv_offset,
-              frames[frame]->y_stride,
-              mb_uv_width, mb_uv_height,
-              mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
-              mbd->mi[0]->bmi[0].as_mv[0].as_mv.col,
-              predictor, scale,
+              frames[frame]->v_buffer + mb_uv_offset, frames[frame]->y_stride,
+              mb_uv_width, mb_uv_height, mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
+              mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, predictor, scale,
               mb_col * 16, mb_row * 16);
 
 #if CONFIG_VPX_HIGHBITDEPTH
           if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
             int adj_strength = strength + 2 * (mbd->bd - 8);
             // Apply the filter (YUV)
-            vp10_highbd_temporal_filter_apply(f->y_buffer + mb_y_offset,
-                                             f->y_stride,
-                                             predictor, 16, 16, adj_strength,
-                                             filter_weight,
-                                             accumulator, count);
-            vp10_highbd_temporal_filter_apply(f->u_buffer + mb_uv_offset,
-                                             f->uv_stride, predictor + 256,
-                                             mb_uv_width, mb_uv_height,
-                                             adj_strength,
-                                             filter_weight, accumulator + 256,
-                                             count + 256);
-            vp10_highbd_temporal_filter_apply(f->v_buffer + mb_uv_offset,
-                                             f->uv_stride, predictor + 512,
-                                             mb_uv_width, mb_uv_height,
-                                             adj_strength, filter_weight,
-                                             accumulator + 512, count + 512);
+            vp10_highbd_temporal_filter_apply(
+                f->y_buffer + mb_y_offset, f->y_stride, predictor, 16, 16,
+                adj_strength, filter_weight, accumulator, count);
+            vp10_highbd_temporal_filter_apply(
+                f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
+                mb_uv_width, mb_uv_height, adj_strength, filter_weight,
+                accumulator + 256, count + 256);
+            vp10_highbd_temporal_filter_apply(
+                f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
+                mb_uv_width, mb_uv_height, adj_strength, filter_weight,
+                accumulator + 512, count + 512);
           } else {
             // Apply the filter (YUV)
             vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
-                                      predictor, 16, 16,
-                                      strength, filter_weight,
-                                      accumulator, count);
+                                       predictor, 16, 16, strength,
+                                       filter_weight, accumulator, count);
             vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
-                                      predictor + 256,
-                                      mb_uv_width, mb_uv_height, strength,
-                                      filter_weight, accumulator + 256,
-                                      count + 256);
+                                       predictor + 256, mb_uv_width,
+                                       mb_uv_height, strength, filter_weight,
+                                       accumulator + 256, count + 256);
             vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
-                                      predictor + 512,
-                                      mb_uv_width, mb_uv_height, strength,
-                                      filter_weight, accumulator + 512,
-                                      count + 512);
+                                       predictor + 512, mb_uv_width,
+                                       mb_uv_height, strength, filter_weight,
+                                       accumulator + 512, count + 512);
           }
 #else
           // Apply the filter (YUV)
           vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
-                                    predictor, 16, 16,
-                                    strength, filter_weight,
-                                    accumulator, count);
+                                     predictor, 16, 16, strength, filter_weight,
+                                     accumulator, count);
           vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
-                                    predictor + 256,
-                                    mb_uv_width, mb_uv_height, strength,
-                                    filter_weight, accumulator + 256,
-                                    count + 256);
+                                     predictor + 256, mb_uv_width, mb_uv_height,
+                                     strength, filter_weight, accumulator + 256,
+                                     count + 256);
           vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
-                                    predictor + 512,
-                                    mb_uv_width, mb_uv_height, strength,
-                                    filter_weight, accumulator + 512,
-                                    count + 512);
+                                     predictor + 512, mb_uv_width, mb_uv_height,
+                                     strength, filter_weight, accumulator + 512,
+                                     count + 512);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
         }
       }
@@ -584,13 +518,11 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
   }
 
   // Restore input state
-  for (i = 0; i < MAX_MB_PLANE; i++)
-    mbd->plane[i].pre[0].buf = input_buffer[i];
+  for (i = 0; i < MAX_MB_PLANE; i++) mbd->plane[i].pre[0].buf = input_buffer[i];
 }
 
 // Apply buffer limits and context specific adjustments to arnr filter.
-static void adjust_arnr_filter(VP10_COMP *cpi,
-                               int distance, int group_boost,
+static void adjust_arnr_filter(VP10_COMP *cpi, int distance, int group_boost,
                                int *arnr_frames, int *arnr_strength) {
   const VP10EncoderConfig *const oxcf = &cpi->oxcf;
   const int frames_after_arf =
@@ -600,34 +532,30 @@ static void adjust_arnr_filter(VP10_COMP *cpi,
   int q, frames, strength;
 
   // Define the forward and backwards filter limits for this arnr group.
-  if (frames_fwd > frames_after_arf)
-    frames_fwd = frames_after_arf;
-  if (frames_fwd > distance)
-    frames_fwd = distance;
+  if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf;
+  if (frames_fwd > distance) frames_fwd = distance;
 
   frames_bwd = frames_fwd;
 
   // For even length filter there is one more frame backward
   // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
-  if (frames_bwd < distance)
-    frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
+  if (frames_bwd < distance) frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
 
   // Set the baseline active filter size.
   frames = frames_bwd + 1 + frames_fwd;
 
   // Adjust the strength based on active max q.
   if (cpi->common.current_video_frame > 1)
-    q = ((int)vp10_convert_qindex_to_q(
-        cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth));
+    q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
+                                       cpi->common.bit_depth));
   else
-    q = ((int)vp10_convert_qindex_to_q(
-        cpi->rc.avg_frame_qindex[KEY_FRAME], cpi->common.bit_depth));
+    q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
+                                       cpi->common.bit_depth));
   if (q > 16) {
     strength = oxcf->arnr_strength;
   } else {
     strength = oxcf->arnr_strength - ((16 - q) / 2);
-    if (strength < 0)
-      strength = 0;
+    if (strength < 0) strength = 0;
   }
 
   // Adjust number of frames in filter and strength based on gf boost level.
@@ -661,7 +589,7 @@ void vp10_temporal_filter(VP10_COMP *cpi, int distance) {
   int frames_to_blur_backward;
   int frames_to_blur_forward;
   struct scale_factors sf;
-  YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = {NULL};
+  YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = { NULL };
 
   // Apply context specific adjustments to the arnr filter parameters.
   adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur, &strength);
@@ -672,28 +600,24 @@ void vp10_temporal_filter(VP10_COMP *cpi, int distance) {
   // Setup frame pointers, NULL indicates frame not included in filter.
   for (frame = 0; frame < frames_to_blur; ++frame) {
     const int which_buffer = start_frame - frame;
-    struct lookahead_entry *buf = vp10_lookahead_peek(cpi->lookahead,
-                                                     which_buffer);
+    struct lookahead_entry *buf =
+        vp10_lookahead_peek(cpi->lookahead, which_buffer);
     frames[frames_to_blur - 1 - frame] = &buf->img;
   }
 
   if (frames_to_blur > 0) {
-    // Setup scaling factors. Scaling on each of the arnr frames is not
-    // supported.
-    // ARF is produced at the native frame size and resized when coded.
+// Setup scaling factors. Scaling on each of the arnr frames is not
+// supported.
+// ARF is produced at the native frame size and resized when coded.
 #if CONFIG_VPX_HIGHBITDEPTH
-    vp10_setup_scale_factors_for_frame(&sf,
-                                      frames[0]->y_crop_width,
-                                      frames[0]->y_crop_height,
-                                      frames[0]->y_crop_width,
-                                      frames[0]->y_crop_height,
-                                      cpi->common.use_highbitdepth);
+    vp10_setup_scale_factors_for_frame(
+        &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
+        frames[0]->y_crop_width, frames[0]->y_crop_height,
+        cpi->common.use_highbitdepth);
 #else
-    vp10_setup_scale_factors_for_frame(&sf,
-                                      frames[0]->y_crop_width,
-                                      frames[0]->y_crop_height,
-                                      frames[0]->y_crop_width,
-                                      frames[0]->y_crop_height);
+    vp10_setup_scale_factors_for_frame(
+        &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
+        frames[0]->y_crop_width, frames[0]->y_crop_height);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   }
 
diff --git a/vp10/encoder/tokenize.c b/vp10/encoder/tokenize.c
index 46f3b6579b9982d52b1abd2ed7a8b22c44e4d1cb..60960c6a60800946c9a91741cf8dfb9ecf76b070 100644
--- a/vp10/encoder/tokenize.c
+++ b/vp10/encoder/tokenize.c
@@ -25,439 +25,456 @@
 #include "vp10/encoder/tokenize.h"
 
 static const TOKENVALUE dct_cat_lt_10_value_tokens[] = {
-  {9, 63}, {9, 61}, {9, 59}, {9, 57}, {9, 55}, {9, 53}, {9, 51}, {9, 49},
-  {9, 47}, {9, 45}, {9, 43}, {9, 41}, {9, 39}, {9, 37}, {9, 35}, {9, 33},
-  {9, 31}, {9, 29}, {9, 27}, {9, 25}, {9, 23}, {9, 21}, {9, 19}, {9, 17},
-  {9, 15}, {9, 13}, {9, 11}, {9, 9}, {9, 7}, {9, 5}, {9, 3}, {9, 1},
-  {8, 31}, {8, 29}, {8, 27}, {8, 25}, {8, 23}, {8, 21},
-  {8, 19}, {8, 17}, {8, 15}, {8, 13}, {8, 11}, {8, 9},
-  {8, 7}, {8, 5}, {8, 3}, {8, 1},
-  {7, 15}, {7, 13}, {7, 11}, {7, 9}, {7, 7}, {7, 5}, {7, 3}, {7, 1},
-  {6, 7}, {6, 5}, {6, 3}, {6, 1}, {5, 3}, {5, 1},
-  {4, 1}, {3, 1}, {2, 1}, {1, 1}, {0, 0},
-  {1, 0},  {2, 0}, {3, 0}, {4, 0},
-  {5, 0}, {5, 2}, {6, 0}, {6, 2}, {6, 4}, {6, 6},
-  {7, 0}, {7, 2}, {7, 4}, {7, 6}, {7, 8}, {7, 10}, {7, 12}, {7, 14},
-  {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {8, 10}, {8, 12},
-  {8, 14}, {8, 16}, {8, 18}, {8, 20}, {8, 22}, {8, 24},
-  {8, 26}, {8, 28}, {8, 30}, {9, 0}, {9, 2},
-  {9, 4}, {9, 6}, {9, 8}, {9, 10}, {9, 12}, {9, 14}, {9, 16},
-  {9, 18}, {9, 20}, {9, 22}, {9, 24}, {9, 26}, {9, 28},
-  {9, 30}, {9, 32}, {9, 34}, {9, 36}, {9, 38}, {9, 40},
-  {9, 42}, {9, 44}, {9, 46}, {9, 48}, {9, 50}, {9, 52},
-  {9, 54}, {9, 56}, {9, 58}, {9, 60}, {9, 62}
+  { 9, 63 }, { 9, 61 }, { 9, 59 }, { 9, 57 }, { 9, 55 }, { 9, 53 }, { 9, 51 },
+  { 9, 49 }, { 9, 47 }, { 9, 45 }, { 9, 43 }, { 9, 41 }, { 9, 39 }, { 9, 37 },
+  { 9, 35 }, { 9, 33 }, { 9, 31 }, { 9, 29 }, { 9, 27 }, { 9, 25 }, { 9, 23 },
+  { 9, 21 }, { 9, 19 }, { 9, 17 }, { 9, 15 }, { 9, 13 }, { 9, 11 }, { 9, 9 },
+  { 9, 7 },  { 9, 5 },  { 9, 3 },  { 9, 1 },  { 8, 31 }, { 8, 29 }, { 8, 27 },
+  { 8, 25 }, { 8, 23 }, { 8, 21 }, { 8, 19 }, { 8, 17 }, { 8, 15 }, { 8, 13 },
+  { 8, 11 }, { 8, 9 },  { 8, 7 },  { 8, 5 },  { 8, 3 },  { 8, 1 },  { 7, 15 },
+  { 7, 13 }, { 7, 11 }, { 7, 9 },  { 7, 7 },  { 7, 5 },  { 7, 3 },  { 7, 1 },
+  { 6, 7 },  { 6, 5 },  { 6, 3 },  { 6, 1 },  { 5, 3 },  { 5, 1 },  { 4, 1 },
+  { 3, 1 },  { 2, 1 },  { 1, 1 },  { 0, 0 },  { 1, 0 },  { 2, 0 },  { 3, 0 },
+  { 4, 0 },  { 5, 0 },  { 5, 2 },  { 6, 0 },  { 6, 2 },  { 6, 4 },  { 6, 6 },
+  { 7, 0 },  { 7, 2 },  { 7, 4 },  { 7, 6 },  { 7, 8 },  { 7, 10 }, { 7, 12 },
+  { 7, 14 }, { 8, 0 },  { 8, 2 },  { 8, 4 },  { 8, 6 },  { 8, 8 },  { 8, 10 },
+  { 8, 12 }, { 8, 14 }, { 8, 16 }, { 8, 18 }, { 8, 20 }, { 8, 22 }, { 8, 24 },
+  { 8, 26 }, { 8, 28 }, { 8, 30 }, { 9, 0 },  { 9, 2 },  { 9, 4 },  { 9, 6 },
+  { 9, 8 },  { 9, 10 }, { 9, 12 }, { 9, 14 }, { 9, 16 }, { 9, 18 }, { 9, 20 },
+  { 9, 22 }, { 9, 24 }, { 9, 26 }, { 9, 28 }, { 9, 30 }, { 9, 32 }, { 9, 34 },
+  { 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, { 9, 44 }, { 9, 46 }, { 9, 48 },
+  { 9, 50 }, { 9, 52 }, { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 }
 };
-const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens = dct_cat_lt_10_value_tokens +
-    (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens))
-    / 2;
+const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens =
+    dct_cat_lt_10_value_tokens +
+    (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) /
+        2;
 
 // Array indices are identical to previously-existing CONTEXT_NODE indices
 const vpx_tree_index vp10_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
-  -EOB_TOKEN, 2,                       // 0  = EOB
-  -ZERO_TOKEN, 4,                      // 1  = ZERO
-  -ONE_TOKEN, 6,                       // 2  = ONE
-  8, 12,                               // 3  = LOW_VAL
-  -TWO_TOKEN, 10,                      // 4  = TWO
-  -THREE_TOKEN, -FOUR_TOKEN,           // 5  = THREE
-  14, 16,                              // 6  = HIGH_LOW
-  -CATEGORY1_TOKEN, -CATEGORY2_TOKEN,  // 7  = CAT_ONE
-  18, 20,                              // 8  = CAT_THREEFOUR
-  -CATEGORY3_TOKEN, -CATEGORY4_TOKEN,  // 9  = CAT_THREE
-  -CATEGORY5_TOKEN, -CATEGORY6_TOKEN   // 10 = CAT_FIVE
+  -EOB_TOKEN,
+  2,  // 0  = EOB
+  -ZERO_TOKEN,
+  4,  // 1  = ZERO
+  -ONE_TOKEN,
+  6,  // 2  = ONE
+  8,
+  12,  // 3  = LOW_VAL
+  -TWO_TOKEN,
+  10,  // 4  = TWO
+  -THREE_TOKEN,
+  -FOUR_TOKEN,  // 5  = THREE
+  14,
+  16,  // 6  = HIGH_LOW
+  -CATEGORY1_TOKEN,
+  -CATEGORY2_TOKEN,  // 7  = CAT_ONE
+  18,
+  20,  // 8  = CAT_THREEFOUR
+  -CATEGORY3_TOKEN,
+  -CATEGORY4_TOKEN,  // 9  = CAT_THREE
+  -CATEGORY5_TOKEN,
+  -CATEGORY6_TOKEN  // 10 = CAT_FIVE
 };
 
-static const vpx_tree_index cat1[2] = {0, 0};
-static const vpx_tree_index cat2[4] = {2, 2, 0, 0};
-static const vpx_tree_index cat3[6] = {2, 2, 4, 4, 0, 0};
-static const vpx_tree_index cat4[8] = {2, 2, 4, 4, 6, 6, 0, 0};
-static const vpx_tree_index cat5[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0};
-static const vpx_tree_index cat6[28] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12,
-    14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 0, 0};
-
-static const int16_t zero_cost[] = {0};
-static const int16_t sign_cost[] = {255, 257};
-static const int16_t cat1_cost[] = {429, 431, 616, 618};
-static const int16_t cat2_cost[] = {624, 626, 727, 729, 848, 850, 951, 953};
-static const int16_t cat3_cost[] = {
-  820, 822, 893, 895, 940, 942, 1013, 1015, 1096, 1098, 1169, 1171, 1216, 1218,
-  1289, 1291
-};
-static const int16_t cat4_cost[] = {
-  1032, 1034, 1075, 1077, 1105, 1107, 1148, 1150, 1194, 1196, 1237, 1239,
-  1267, 1269, 1310, 1312, 1328, 1330, 1371, 1373, 1401, 1403, 1444, 1446,
-  1490, 1492, 1533, 1535, 1563, 1565, 1606, 1608
-};
+static const vpx_tree_index cat1[2] = { 0, 0 };
+static const vpx_tree_index cat2[4] = { 2, 2, 0, 0 };
+static const vpx_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 };
+static const vpx_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vpx_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vpx_tree_index cat6[28] = { 2,  2,  4,  4,  6,  6,  8,  8,  10, 10,
+                                         12, 12, 14, 14, 16, 16, 18, 18, 20, 20,
+                                         22, 22, 24, 24, 26, 26, 0,  0 };
+
+static const int16_t zero_cost[] = { 0 };
+static const int16_t sign_cost[] = { 255, 257 };
+static const int16_t cat1_cost[] = { 429, 431, 616, 618 };
+static const int16_t cat2_cost[] = { 624, 626, 727, 729, 848, 850, 951, 953 };
+static const int16_t cat3_cost[] = { 820,  822,  893,  895,  940,  942,
+                                     1013, 1015, 1096, 1098, 1169, 1171,
+                                     1216, 1218, 1289, 1291 };
+static const int16_t cat4_cost[] = { 1032, 1034, 1075, 1077, 1105, 1107, 1148,
+                                     1150, 1194, 1196, 1237, 1239, 1267, 1269,
+                                     1310, 1312, 1328, 1330, 1371, 1373, 1401,
+                                     1403, 1444, 1446, 1490, 1492, 1533, 1535,
+                                     1563, 1565, 1606, 1608 };
 static const int16_t cat5_cost[] = {
-  1269, 1271, 1283, 1285, 1306, 1308, 1320,
-  1322, 1347, 1349, 1361, 1363, 1384, 1386, 1398, 1400, 1443, 1445, 1457,
-  1459, 1480, 1482, 1494, 1496, 1521, 1523, 1535, 1537, 1558, 1560, 1572,
-  1574, 1592, 1594, 1606, 1608, 1629, 1631, 1643, 1645, 1670, 1672, 1684,
-  1686, 1707, 1709, 1721, 1723, 1766, 1768, 1780, 1782, 1803, 1805, 1817,
-  1819, 1844, 1846, 1858, 1860, 1881, 1883, 1895, 1897
+  1269, 1271, 1283, 1285, 1306, 1308, 1320, 1322, 1347, 1349, 1361, 1363, 1384,
+  1386, 1398, 1400, 1443, 1445, 1457, 1459, 1480, 1482, 1494, 1496, 1521, 1523,
+  1535, 1537, 1558, 1560, 1572, 1574, 1592, 1594, 1606, 1608, 1629, 1631, 1643,
+  1645, 1670, 1672, 1684, 1686, 1707, 1709, 1721, 1723, 1766, 1768, 1780, 1782,
+  1803, 1805, 1817, 1819, 1844, 1846, 1858, 1860, 1881, 1883, 1895, 1897
 };
 const int16_t vp10_cat6_low_cost[256] = {
-  1638, 1640, 1646, 1648, 1652, 1654, 1660, 1662,
-  1670, 1672, 1678, 1680, 1684, 1686, 1692, 1694, 1711, 1713, 1719, 1721,
-  1725, 1727, 1733, 1735, 1743, 1745, 1751, 1753, 1757, 1759, 1765, 1767,
-  1787, 1789, 1795, 1797, 1801, 1803, 1809, 1811, 1819, 1821, 1827, 1829,
-  1833, 1835, 1841, 1843, 1860, 1862, 1868, 1870, 1874, 1876, 1882, 1884,
-  1892, 1894, 1900, 1902, 1906, 1908, 1914, 1916, 1940, 1942, 1948, 1950,
-  1954, 1956, 1962, 1964, 1972, 1974, 1980, 1982, 1986, 1988, 1994, 1996,
-  2013, 2015, 2021, 2023, 2027, 2029, 2035, 2037, 2045, 2047, 2053, 2055,
-  2059, 2061, 2067, 2069, 2089, 2091, 2097, 2099, 2103, 2105, 2111, 2113,
-  2121, 2123, 2129, 2131, 2135, 2137, 2143, 2145, 2162, 2164, 2170, 2172,
-  2176, 2178, 2184, 2186, 2194, 2196, 2202, 2204, 2208, 2210, 2216, 2218,
-  2082, 2084, 2090, 2092, 2096, 2098, 2104, 2106, 2114, 2116, 2122, 2124,
-  2128, 2130, 2136, 2138, 2155, 2157, 2163, 2165, 2169, 2171, 2177, 2179,
-  2187, 2189, 2195, 2197, 2201, 2203, 2209, 2211, 2231, 2233, 2239, 2241,
-  2245, 2247, 2253, 2255, 2263, 2265, 2271, 2273, 2277, 2279, 2285, 2287,
-  2304, 2306, 2312, 2314, 2318, 2320, 2326, 2328, 2336, 2338, 2344, 2346,
-  2350, 2352, 2358, 2360, 2384, 2386, 2392, 2394, 2398, 2400, 2406, 2408,
-  2416, 2418, 2424, 2426, 2430, 2432, 2438, 2440, 2457, 2459, 2465, 2467,
-  2471, 2473, 2479, 2481, 2489, 2491, 2497, 2499, 2503, 2505, 2511, 2513,
-  2533, 2535, 2541, 2543, 2547, 2549, 2555, 2557, 2565, 2567, 2573, 2575,
-  2579, 2581, 2587, 2589, 2606, 2608, 2614, 2616, 2620, 2622, 2628, 2630,
-  2638, 2640, 2646, 2648, 2652, 2654, 2660, 2662
+  1638, 1640, 1646, 1648, 1652, 1654, 1660, 1662, 1670, 1672, 1678, 1680, 1684,
+  1686, 1692, 1694, 1711, 1713, 1719, 1721, 1725, 1727, 1733, 1735, 1743, 1745,
+  1751, 1753, 1757, 1759, 1765, 1767, 1787, 1789, 1795, 1797, 1801, 1803, 1809,
+  1811, 1819, 1821, 1827, 1829, 1833, 1835, 1841, 1843, 1860, 1862, 1868, 1870,
+  1874, 1876, 1882, 1884, 1892, 1894, 1900, 1902, 1906, 1908, 1914, 1916, 1940,
+  1942, 1948, 1950, 1954, 1956, 1962, 1964, 1972, 1974, 1980, 1982, 1986, 1988,
+  1994, 1996, 2013, 2015, 2021, 2023, 2027, 2029, 2035, 2037, 2045, 2047, 2053,
+  2055, 2059, 2061, 2067, 2069, 2089, 2091, 2097, 2099, 2103, 2105, 2111, 2113,
+  2121, 2123, 2129, 2131, 2135, 2137, 2143, 2145, 2162, 2164, 2170, 2172, 2176,
+  2178, 2184, 2186, 2194, 2196, 2202, 2204, 2208, 2210, 2216, 2218, 2082, 2084,
+  2090, 2092, 2096, 2098, 2104, 2106, 2114, 2116, 2122, 2124, 2128, 2130, 2136,
+  2138, 2155, 2157, 2163, 2165, 2169, 2171, 2177, 2179, 2187, 2189, 2195, 2197,
+  2201, 2203, 2209, 2211, 2231, 2233, 2239, 2241, 2245, 2247, 2253, 2255, 2263,
+  2265, 2271, 2273, 2277, 2279, 2285, 2287, 2304, 2306, 2312, 2314, 2318, 2320,
+  2326, 2328, 2336, 2338, 2344, 2346, 2350, 2352, 2358, 2360, 2384, 2386, 2392,
+  2394, 2398, 2400, 2406, 2408, 2416, 2418, 2424, 2426, 2430, 2432, 2438, 2440,
+  2457, 2459, 2465, 2467, 2471, 2473, 2479, 2481, 2489, 2491, 2497, 2499, 2503,
+  2505, 2511, 2513, 2533, 2535, 2541, 2543, 2547, 2549, 2555, 2557, 2565, 2567,
+  2573, 2575, 2579, 2581, 2587, 2589, 2606, 2608, 2614, 2616, 2620, 2622, 2628,
+  2630, 2638, 2640, 2646, 2648, 2652, 2654, 2660, 2662
 };
 const int16_t vp10_cat6_high_cost[128] = {
-  72, 892, 1183, 2003, 1448, 2268, 2559, 3379,
-  1709, 2529, 2820, 3640, 3085, 3905, 4196, 5016, 2118, 2938, 3229, 4049,
-  3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062,
-  2118, 2938, 3229, 4049, 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686,
-  5131, 5951, 6242, 7062, 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471,
-  5801, 6621, 6912, 7732, 7177, 7997, 8288, 9108, 2118, 2938, 3229, 4049,
-  3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062,
-  4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471, 5801, 6621, 6912, 7732,
-  7177, 7997, 8288, 9108, 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471,
-  5801, 6621, 6912, 7732, 7177, 7997, 8288, 9108, 6210, 7030, 7321, 8141,
-  7586, 8406, 8697, 9517, 7847, 8667, 8958, 9778, 9223, 10043, 10334, 11154
+  72,   892,  1183, 2003, 1448, 2268,  2559,  3379, 1709, 2529, 2820, 3640,
+  3085, 3905, 4196, 5016, 2118, 2938,  3229,  4049, 3494, 4314, 4605, 5425,
+  3755, 4575, 4866, 5686, 5131, 5951,  6242,  7062, 2118, 2938, 3229, 4049,
+  3494, 4314, 4605, 5425, 3755, 4575,  4866,  5686, 5131, 5951, 6242, 7062,
+  4164, 4984, 5275, 6095, 5540, 6360,  6651,  7471, 5801, 6621, 6912, 7732,
+  7177, 7997, 8288, 9108, 2118, 2938,  3229,  4049, 3494, 4314, 4605, 5425,
+  3755, 4575, 4866, 5686, 5131, 5951,  6242,  7062, 4164, 4984, 5275, 6095,
+  5540, 6360, 6651, 7471, 5801, 6621,  6912,  7732, 7177, 7997, 8288, 9108,
+  4164, 4984, 5275, 6095, 5540, 6360,  6651,  7471, 5801, 6621, 6912, 7732,
+  7177, 7997, 8288, 9108, 6210, 7030,  7321,  8141, 7586, 8406, 8697, 9517,
+  7847, 8667, 8958, 9778, 9223, 10043, 10334, 11154
 };
 
 #if CONFIG_VPX_HIGHBITDEPTH
 const int16_t vp10_cat6_high10_high_cost[512] = {
-  74, 894, 1185, 2005, 1450, 2270, 2561,
-  3381, 1711, 2531, 2822, 3642, 3087, 3907, 4198, 5018, 2120, 2940, 3231,
-  4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244,
-  7064, 2120, 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868,
-  5688, 5133, 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
-  7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 2120, 2940, 3231,
-  4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244,
-  7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914,
-  7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
-  7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323,
-  8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336,
-  11156, 2120, 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868,
-  5688, 5133, 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
-  7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277,
-  6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290,
-  9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
-  9780, 9225, 10045, 10336, 11156, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
-  7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323,
-  8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336,
-  11156, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
-  9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454,
-  10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 2120,
-  2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133,
-  5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803,
-  6623, 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, 6097, 5542,
-  6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212,
-  7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225,
-  10045, 10336, 11156, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803,
-  6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323, 8143, 7588,
-  8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 6212,
-  7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225,
-  10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454, 10745, 11565,
-  9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 4166, 4986, 5277,
-  6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290,
-  9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
-  9780, 9225, 10045, 10336, 11156, 6212, 7032, 7323, 8143, 7588, 8408, 8699,
-  9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369,
-  10189, 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091,
-  12382, 13202, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669,
-  8960, 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454,
-  10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 8258,
-  9078, 9369, 10189, 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826,
-  11271, 12091, 12382, 13202, 10304, 11124, 11415, 12235, 11680, 12500, 12791,
-  13611, 11941, 12761, 13052, 13872, 13317, 14137, 14428, 15248,
+  74,    894,   1185,  2005,  1450,  2270,  2561,  3381,  1711,  2531,  2822,
+  3642,  3087,  3907,  4198,  5018,  2120,  2940,  3231,  4051,  3496,  4316,
+  4607,  5427,  3757,  4577,  4868,  5688,  5133,  5953,  6244,  7064,  2120,
+  2940,  3231,  4051,  3496,  4316,  4607,  5427,  3757,  4577,  4868,  5688,
+  5133,  5953,  6244,  7064,  4166,  4986,  5277,  6097,  5542,  6362,  6653,
+  7473,  5803,  6623,  6914,  7734,  7179,  7999,  8290,  9110,  2120,  2940,
+  3231,  4051,  3496,  4316,  4607,  5427,  3757,  4577,  4868,  5688,  5133,
+  5953,  6244,  7064,  4166,  4986,  5277,  6097,  5542,  6362,  6653,  7473,
+  5803,  6623,  6914,  7734,  7179,  7999,  8290,  9110,  4166,  4986,  5277,
+  6097,  5542,  6362,  6653,  7473,  5803,  6623,  6914,  7734,  7179,  7999,
+  8290,  9110,  6212,  7032,  7323,  8143,  7588,  8408,  8699,  9519,  7849,
+  8669,  8960,  9780,  9225,  10045, 10336, 11156, 2120,  2940,  3231,  4051,
+  3496,  4316,  4607,  5427,  3757,  4577,  4868,  5688,  5133,  5953,  6244,
+  7064,  4166,  4986,  5277,  6097,  5542,  6362,  6653,  7473,  5803,  6623,
+  6914,  7734,  7179,  7999,  8290,  9110,  4166,  4986,  5277,  6097,  5542,
+  6362,  6653,  7473,  5803,  6623,  6914,  7734,  7179,  7999,  8290,  9110,
+  6212,  7032,  7323,  8143,  7588,  8408,  8699,  9519,  7849,  8669,  8960,
+  9780,  9225,  10045, 10336, 11156, 4166,  4986,  5277,  6097,  5542,  6362,
+  6653,  7473,  5803,  6623,  6914,  7734,  7179,  7999,  8290,  9110,  6212,
+  7032,  7323,  8143,  7588,  8408,  8699,  9519,  7849,  8669,  8960,  9780,
+  9225,  10045, 10336, 11156, 6212,  7032,  7323,  8143,  7588,  8408,  8699,
+  9519,  7849,  8669,  8960,  9780,  9225,  10045, 10336, 11156, 8258,  9078,
+  9369,  10189, 9634,  10454, 10745, 11565, 9895,  10715, 11006, 11826, 11271,
+  12091, 12382, 13202, 2120,  2940,  3231,  4051,  3496,  4316,  4607,  5427,
+  3757,  4577,  4868,  5688,  5133,  5953,  6244,  7064,  4166,  4986,  5277,
+  6097,  5542,  6362,  6653,  7473,  5803,  6623,  6914,  7734,  7179,  7999,
+  8290,  9110,  4166,  4986,  5277,  6097,  5542,  6362,  6653,  7473,  5803,
+  6623,  6914,  7734,  7179,  7999,  8290,  9110,  6212,  7032,  7323,  8143,
+  7588,  8408,  8699,  9519,  7849,  8669,  8960,  9780,  9225,  10045, 10336,
+  11156, 4166,  4986,  5277,  6097,  5542,  6362,  6653,  7473,  5803,  6623,
+  6914,  7734,  7179,  7999,  8290,  9110,  6212,  7032,  7323,  8143,  7588,
+  8408,  8699,  9519,  7849,  8669,  8960,  9780,  9225,  10045, 10336, 11156,
+  6212,  7032,  7323,  8143,  7588,  8408,  8699,  9519,  7849,  8669,  8960,
+  9780,  9225,  10045, 10336, 11156, 8258,  9078,  9369,  10189, 9634,  10454,
+  10745, 11565, 9895,  10715, 11006, 11826, 11271, 12091, 12382, 13202, 4166,
+  4986,  5277,  6097,  5542,  6362,  6653,  7473,  5803,  6623,  6914,  7734,
+  7179,  7999,  8290,  9110,  6212,  7032,  7323,  8143,  7588,  8408,  8699,
+  9519,  7849,  8669,  8960,  9780,  9225,  10045, 10336, 11156, 6212,  7032,
+  7323,  8143,  7588,  8408,  8699,  9519,  7849,  8669,  8960,  9780,  9225,
+  10045, 10336, 11156, 8258,  9078,  9369,  10189, 9634,  10454, 10745, 11565,
+  9895,  10715, 11006, 11826, 11271, 12091, 12382, 13202, 6212,  7032,  7323,
+  8143,  7588,  8408,  8699,  9519,  7849,  8669,  8960,  9780,  9225,  10045,
+  10336, 11156, 8258,  9078,  9369,  10189, 9634,  10454, 10745, 11565, 9895,
+  10715, 11006, 11826, 11271, 12091, 12382, 13202, 8258,  9078,  9369,  10189,
+  9634,  10454, 10745, 11565, 9895,  10715, 11006, 11826, 11271, 12091, 12382,
+  13202, 10304, 11124, 11415, 12235, 11680, 12500, 12791, 13611, 11941, 12761,
+  13052, 13872, 13317, 14137, 14428, 15248,
 };
 const int16_t vp10_cat6_high12_high_cost[2048] = {
-  76, 896, 1187, 2007, 1452, 2272, 2563,
-  3383, 1713, 2533, 2824, 3644, 3089, 3909, 4200, 5020, 2122, 2942, 3233,
-  4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246,
-  7066, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870,
-  5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
-  7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 2122, 2942, 3233,
-  4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246,
-  7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
-  7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
-  7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325,
-  8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
-  11158, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870,
-  5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
-  7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279,
-  6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
-  9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
-  9782, 9227, 10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
-  7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325,
-  8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
-  11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
-  9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
-  10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 2122,
-  2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135,
-  5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805,
-  6625, 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544,
-  6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214,
-  7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
-  10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805,
-  6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590,
-  8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214,
-  7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
-  10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
-  9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168, 4988, 5279,
-  6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
-  9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
-  9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
-  9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371,
-  10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
-  12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671,
-  8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
-  10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
-  9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
-  11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
-  13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 2122, 2942,
-  3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955,
-  6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
-  6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364,
-  6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034,
-  7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
-  10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
-  6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410,
-  8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034,
-  7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
-  10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
-  10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168, 4988, 5279, 6099,
-  5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112,
-  6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782,
-  9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521,
-  7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191,
-  9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
-  13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
-  9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
-  10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
-  9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
-  11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
-  13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 4168, 4988,
-  5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001,
-  8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671,
-  8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410,
-  8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080,
-  9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273,
-  12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851,
-  8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636,
-  10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
-  8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
-  11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502,
-  12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 6214,
-  7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
-  10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
-  9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371,
-  10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
-  12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
-  12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260, 9080, 9371, 10191,
-  9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
-  13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763,
-  13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682,
-  12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250,
-  12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100,
-  15920, 15365, 16185, 16476, 17296, 2122, 2942, 3233, 4053, 3498, 4318, 4609,
-  5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279,
-  6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
-  9112, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
-  7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
-  9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 4168, 4988, 5279,
-  6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
-  9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
-  9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
-  9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371,
-  10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
-  12384, 13204, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
-  6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410,
-  8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034,
-  7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
-  10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
-  10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145,
-  7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158,
-  8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
-  11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456,
-  10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306,
-  11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874,
-  13319, 14139, 14430, 15250, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475,
-  5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145,
-  7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158,
-  6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782,
-  9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
-  11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034,
-  7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
-  10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
-  10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191,
-  9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
+  76,    896,   1187,  2007,  1452,  2272,  2563,  3383,  1713,  2533,  2824,
+  3644,  3089,  3909,  4200,  5020,  2122,  2942,  3233,  4053,  3498,  4318,
+  4609,  5429,  3759,  4579,  4870,  5690,  5135,  5955,  6246,  7066,  2122,
+  2942,  3233,  4053,  3498,  4318,  4609,  5429,  3759,  4579,  4870,  5690,
+  5135,  5955,  6246,  7066,  4168,  4988,  5279,  6099,  5544,  6364,  6655,
+  7475,  5805,  6625,  6916,  7736,  7181,  8001,  8292,  9112,  2122,  2942,
+  3233,  4053,  3498,  4318,  4609,  5429,  3759,  4579,  4870,  5690,  5135,
+  5955,  6246,  7066,  4168,  4988,  5279,  6099,  5544,  6364,  6655,  7475,
+  5805,  6625,  6916,  7736,  7181,  8001,  8292,  9112,  4168,  4988,  5279,
+  6099,  5544,  6364,  6655,  7475,  5805,  6625,  6916,  7736,  7181,  8001,
+  8292,  9112,  6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,
+  8671,  8962,  9782,  9227,  10047, 10338, 11158, 2122,  2942,  3233,  4053,
+  3498,  4318,  4609,  5429,  3759,  4579,  4870,  5690,  5135,  5955,  6246,
+  7066,  4168,  4988,  5279,  6099,  5544,  6364,  6655,  7475,  5805,  6625,
+  6916,  7736,  7181,  8001,  8292,  9112,  4168,  4988,  5279,  6099,  5544,
+  6364,  6655,  7475,  5805,  6625,  6916,  7736,  7181,  8001,  8292,  9112,
+  6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,
+  9782,  9227,  10047, 10338, 11158, 4168,  4988,  5279,  6099,  5544,  6364,
+  6655,  7475,  5805,  6625,  6916,  7736,  7181,  8001,  8292,  9112,  6214,
+  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,
+  9227,  10047, 10338, 11158, 6214,  7034,  7325,  8145,  7590,  8410,  8701,
+  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 8260,  9080,
+  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273,
+  12093, 12384, 13204, 2122,  2942,  3233,  4053,  3498,  4318,  4609,  5429,
+  3759,  4579,  4870,  5690,  5135,  5955,  6246,  7066,  4168,  4988,  5279,
+  6099,  5544,  6364,  6655,  7475,  5805,  6625,  6916,  7736,  7181,  8001,
+  8292,  9112,  4168,  4988,  5279,  6099,  5544,  6364,  6655,  7475,  5805,
+  6625,  6916,  7736,  7181,  8001,  8292,  9112,  6214,  7034,  7325,  8145,
+  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338,
+  11158, 4168,  4988,  5279,  6099,  5544,  6364,  6655,  7475,  5805,  6625,
+  6916,  7736,  7181,  8001,  8292,  9112,  6214,  7034,  7325,  8145,  7590,
+  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158,
+  6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,
+  9782,  9227,  10047, 10338, 11158, 8260,  9080,  9371,  10191, 9636,  10456,
+  10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168,
+  4988,  5279,  6099,  5544,  6364,  6655,  7475,  5805,  6625,  6916,  7736,
+  7181,  8001,  8292,  9112,  6214,  7034,  7325,  8145,  7590,  8410,  8701,
+  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 6214,  7034,
+  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,
+  10047, 10338, 11158, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567,
+  9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214,  7034,  7325,
+  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047,
+  10338, 11158, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,
+  10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,  9080,  9371,  10191,
+  9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384,
   13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763,
-  13054, 13874, 13319, 14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590,
-  8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260,
-  9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
-  11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
-  11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
+  13054, 13874, 13319, 14139, 14430, 15250, 2122,  2942,  3233,  4053,  3498,
+  4318,  4609,  5429,  3759,  4579,  4870,  5690,  5135,  5955,  6246,  7066,
+  4168,  4988,  5279,  6099,  5544,  6364,  6655,  7475,  5805,  6625,  6916,
+  7736,  7181,  8001,  8292,  9112,  4168,  4988,  5279,  6099,  5544,  6364,
+  6655,  7475,  5805,  6625,  6916,  7736,  7181,  8001,  8292,  9112,  6214,
+  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,
+  9227,  10047, 10338, 11158, 4168,  4988,  5279,  6099,  5544,  6364,  6655,
+  7475,  5805,  6625,  6916,  7736,  7181,  8001,  8292,  9112,  6214,  7034,
+  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,
+  10047, 10338, 11158, 6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,
+  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 8260,  9080,  9371,
+  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093,
+  12384, 13204, 4168,  4988,  5279,  6099,  5544,  6364,  6655,  7475,  5805,
+  6625,  6916,  7736,  7181,  8001,  8292,  9112,  6214,  7034,  7325,  8145,
+  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338,
+  11158, 6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,
+  8962,  9782,  9227,  10047, 10338, 11158, 8260,  9080,  9371,  10191, 9636,
+  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204,
+  6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,
+  9782,  9227,  10047, 10338, 11158, 8260,  9080,  9371,  10191, 9636,  10456,
+  10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
+  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828,
+  11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
+  13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 4168,  4988,
+  5279,  6099,  5544,  6364,  6655,  7475,  5805,  6625,  6916,  7736,  7181,
+  8001,  8292,  9112,  6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,
+  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 6214,  7034,  7325,
+  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047,
+  10338, 11158, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,
+  10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214,  7034,  7325,  8145,
+  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338,
+  11158, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717,
+  11008, 11828, 11273, 12093, 12384, 13204, 8260,  9080,  9371,  10191, 9636,
+  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204,
+  10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054,
+  13874, 13319, 14139, 14430, 15250, 6214,  7034,  7325,  8145,  7590,  8410,
+  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 8260,
+  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828,
+  11273, 12093, 12384, 13204, 8260,  9080,  9371,  10191, 9636,  10456, 10747,
+  11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
   11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
-  14139, 14430, 15250, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
-  9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417,
+  14139, 14430, 15250, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567,
+  9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417,
   12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139,
   14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
   12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283,
   13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476,
-  17296, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
-  7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
-  9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325,
-  8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
-  11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
-  11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590,
-  8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260,
-  9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
-  11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
-  11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
-  11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
-  14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851,
-  8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636,
-  10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
-  8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
-  11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502,
-  12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260,
-  9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+  17296, 2122,  2942,  3233,  4053,  3498,  4318,  4609,  5429,  3759,  4579,
+  4870,  5690,  5135,  5955,  6246,  7066,  4168,  4988,  5279,  6099,  5544,
+  6364,  6655,  7475,  5805,  6625,  6916,  7736,  7181,  8001,  8292,  9112,
+  4168,  4988,  5279,  6099,  5544,  6364,  6655,  7475,  5805,  6625,  6916,
+  7736,  7181,  8001,  8292,  9112,  6214,  7034,  7325,  8145,  7590,  8410,
+  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 4168,
+  4988,  5279,  6099,  5544,  6364,  6655,  7475,  5805,  6625,  6916,  7736,
+  7181,  8001,  8292,  9112,  6214,  7034,  7325,  8145,  7590,  8410,  8701,
+  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 6214,  7034,
+  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,
+  10047, 10338, 11158, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567,
+  9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168,  4988,  5279,
+  6099,  5544,  6364,  6655,  7475,  5805,  6625,  6916,  7736,  7181,  8001,
+  8292,  9112,  6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,
+  8671,  8962,  9782,  9227,  10047, 10338, 11158, 6214,  7034,  7325,  8145,
+  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338,
+  11158, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717,
+  11008, 11828, 11273, 12093, 12384, 13204, 6214,  7034,  7325,  8145,  7590,
+  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158,
+  8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008,
+  11828, 11273, 12093, 12384, 13204, 8260,  9080,  9371,  10191, 9636,  10456,
+  10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306,
+  11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874,
+  13319, 14139, 14430, 15250, 4168,  4988,  5279,  6099,  5544,  6364,  6655,
+  7475,  5805,  6625,  6916,  7736,  7181,  8001,  8292,  9112,  6214,  7034,
+  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,
+  10047, 10338, 11158, 6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,
+  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 8260,  9080,  9371,
+  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093,
+  12384, 13204, 6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,
+  8671,  8962,  9782,  9227,  10047, 10338, 11158, 8260,  9080,  9371,  10191,
+  9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384,
+  13204, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717,
+  11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682,
+  12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250,
+  6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,
+  9782,  9227,  10047, 10338, 11158, 8260,  9080,  9371,  10191, 9636,  10456,
+  10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
+  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828,
   11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
-  13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126,
+  13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260,  9080,
+  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273,
+  12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613,
+  11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417,
+  12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139,
+  14430, 15250, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989,
+  14809, 15100, 15920, 15365, 16185, 16476, 17296, 4168,  4988,  5279,  6099,
+  5544,  6364,  6655,  7475,  5805,  6625,  6916,  7736,  7181,  8001,  8292,
+  9112,  6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,
+  8962,  9782,  9227,  10047, 10338, 11158, 6214,  7034,  7325,  8145,  7590,
+  8410,  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158,
+  8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008,
+  11828, 11273, 12093, 12384, 13204, 6214,  7034,  7325,  8145,  7590,  8410,
+  8701,  9521,  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 8260,
+  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828,
+  11273, 12093, 12384, 13204, 8260,  9080,  9371,  10191, 9636,  10456, 10747,
+  11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
   11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
-  14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659,
-  13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296, 6214, 7034, 7325,
-  8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
-  11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
-  11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636,
-  10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
+  14139, 14430, 15250, 6214,  7034,  7325,  8145,  7590,  8410,  8701,  9521,
+  7851,  8671,  8962,  9782,  9227,  10047, 10338, 11158, 8260,  9080,  9371,
+  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093,
+  12384, 13204, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,
+  10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237,
+  11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430,
+  15250, 8260,  9080,  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717,
+  11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682,
+  12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250,
   10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054,
-  13874, 13319, 14139, 14430, 15250, 8260, 9080, 9371, 10191, 9636, 10456,
-  10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306,
-  11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874,
-  13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
+  13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728, 14548,
+  14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296, 6214,
+  7034,  7325,  8145,  7590,  8410,  8701,  9521,  7851,  8671,  8962,  9782,
+  9227,  10047, 10338, 11158, 8260,  9080,  9371,  10191, 9636,  10456, 10747,
+  11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,  9080,
+  9371,  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273,
+  12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613,
+  11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260,  9080,  9371,
+  10191, 9636,  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093,
+  12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
+  12763, 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237,
+  11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430,
+  15250, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809,
+  15100, 15920, 15365, 16185, 16476, 17296, 8260,  9080,  9371,  10191, 9636,
+  10456, 10747, 11567, 9897,  10717, 11008, 11828, 11273, 12093, 12384, 13204,
+  10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054,
+  13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502,
+  12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352,
+  13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920,
+  15365, 16185, 16476, 17296, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
   13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172,
   13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365,
-  16185, 16476, 17296, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
-  9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417,
-  12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139,
-  14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
-  12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283,
-  13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476,
-  17296, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763,
-  13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728,
-  14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296,
-  12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100,
-  15920, 15365, 16185, 16476, 17296, 14398, 15218, 15509, 16329, 15774, 16594,
-  16885, 17705, 16035, 16855, 17146, 17966, 17411, 18231, 18522, 19342
+  16185, 16476, 17296, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659,
+  13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296, 14398, 15218, 15509,
+  16329, 15774, 16594, 16885, 17705, 16035, 16855, 17146, 17966, 17411, 18231,
+  18522, 19342
 };
 #endif
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static const vpx_tree_index cat1_high10[2] = {0, 0};
-static const vpx_tree_index cat2_high10[4] = {2, 2, 0, 0};
-static const vpx_tree_index cat3_high10[6] = {2, 2, 4, 4, 0, 0};
-static const vpx_tree_index cat4_high10[8] = {2, 2, 4, 4, 6, 6, 0, 0};
-static const vpx_tree_index cat5_high10[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0};
-static const vpx_tree_index cat6_high10[32] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10,
-  12, 12, 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28,
-  30, 30, 0, 0};
-static const vpx_tree_index cat1_high12[2] = {0, 0};
-static const vpx_tree_index cat2_high12[4] = {2, 2, 0, 0};
-static const vpx_tree_index cat3_high12[6] = {2, 2, 4, 4, 0, 0};
-static const vpx_tree_index cat4_high12[8] = {2, 2, 4, 4, 6, 6, 0, 0};
-static const vpx_tree_index cat5_high12[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0};
-static const vpx_tree_index cat6_high12[36] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10,
-  12, 12, 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28,
-  30, 30, 32, 32, 34, 34, 0, 0};
+static const vpx_tree_index cat1_high10[2] = { 0, 0 };
+static const vpx_tree_index cat2_high10[4] = { 2, 2, 0, 0 };
+static const vpx_tree_index cat3_high10[6] = { 2, 2, 4, 4, 0, 0 };
+static const vpx_tree_index cat4_high10[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vpx_tree_index cat5_high10[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vpx_tree_index cat6_high10[32] = { 2,  2,  4,  4,  6,  6,  8,  8,
+                                                10, 10, 12, 12, 14, 14, 16, 16,
+                                                18, 18, 20, 20, 22, 22, 24, 24,
+                                                26, 26, 28, 28, 30, 30, 0,  0 };
+static const vpx_tree_index cat1_high12[2] = { 0, 0 };
+static const vpx_tree_index cat2_high12[4] = { 2, 2, 0, 0 };
+static const vpx_tree_index cat3_high12[6] = { 2, 2, 4, 4, 0, 0 };
+static const vpx_tree_index cat4_high12[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vpx_tree_index cat5_high12[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vpx_tree_index cat6_high12[36] = {
+  2,  2,  4,  4,  6,  6,  8,  8,  10, 10, 12, 12, 14, 14, 16, 16, 18, 18,
+  20, 20, 22, 22, 24, 24, 26, 26, 28, 28, 30, 30, 32, 32, 34, 34, 0,  0
+};
 #endif
 
 const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS] = {
-  {0, 0, 0, 0, zero_cost},                              // ZERO_TOKEN
-  {0, 0, 0, 1, sign_cost},                              // ONE_TOKEN
-  {0, 0, 0, 2, sign_cost},                              // TWO_TOKEN
-  {0, 0, 0, 3, sign_cost},                              // THREE_TOKEN
-  {0, 0, 0, 4, sign_cost},                              // FOUR_TOKEN
-  {cat1, vp10_cat1_prob, 1,  CAT1_MIN_VAL, cat1_cost},  // CATEGORY1_TOKEN
-  {cat2, vp10_cat2_prob, 2,  CAT2_MIN_VAL, cat2_cost},  // CATEGORY2_TOKEN
-  {cat3, vp10_cat3_prob, 3,  CAT3_MIN_VAL, cat3_cost},  // CATEGORY3_TOKEN
-  {cat4, vp10_cat4_prob, 4,  CAT4_MIN_VAL, cat4_cost},  // CATEGORY4_TOKEN
-  {cat5, vp10_cat5_prob, 5,  CAT5_MIN_VAL, cat5_cost},  // CATEGORY5_TOKEN
-  {cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0},          // CATEGORY6_TOKEN
-  {0, 0, 0, 0, zero_cost}                               // EOB_TOKEN
+  { 0, 0, 0, 0, zero_cost },                             // ZERO_TOKEN
+  { 0, 0, 0, 1, sign_cost },                             // ONE_TOKEN
+  { 0, 0, 0, 2, sign_cost },                             // TWO_TOKEN
+  { 0, 0, 0, 3, sign_cost },                             // THREE_TOKEN
+  { 0, 0, 0, 4, sign_cost },                             // FOUR_TOKEN
+  { cat1, vp10_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost },  // CATEGORY1_TOKEN
+  { cat2, vp10_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost },  // CATEGORY2_TOKEN
+  { cat3, vp10_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost },  // CATEGORY3_TOKEN
+  { cat4, vp10_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost },  // CATEGORY4_TOKEN
+  { cat5, vp10_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost },  // CATEGORY5_TOKEN
+  { cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0 },         // CATEGORY6_TOKEN
+  { 0, 0, 0, 0, zero_cost }                              // EOB_TOKEN
 };
 
 #if CONFIG_VPX_HIGHBITDEPTH
 const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS] = {
-  {0, 0, 0, 0, zero_cost},                                            // ZERO
-  {0, 0, 0, 1, sign_cost},                                            // ONE
-  {0, 0, 0, 2, sign_cost},                                            // TWO
-  {0, 0, 0, 3, sign_cost},                                            // THREE
-  {0, 0, 0, 4, sign_cost},                                            // FOUR
-  {cat1_high10, vp10_cat1_prob_high10, 1,  CAT1_MIN_VAL, cat1_cost},  // CAT1
-  {cat2_high10, vp10_cat2_prob_high10, 2,  CAT2_MIN_VAL, cat2_cost},  // CAT2
-  {cat3_high10, vp10_cat3_prob_high10, 3,  CAT3_MIN_VAL, cat3_cost},  // CAT3
-  {cat4_high10, vp10_cat4_prob_high10, 4,  CAT4_MIN_VAL, cat4_cost},  // CAT4
-  {cat5_high10, vp10_cat5_prob_high10, 5,  CAT5_MIN_VAL, cat5_cost},  // CAT5
-  {cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0},          // CAT6
-  {0, 0, 0, 0, zero_cost}                                             // EOB
+  { 0, 0, 0, 0, zero_cost },                                           // ZERO
+  { 0, 0, 0, 1, sign_cost },                                           // ONE
+  { 0, 0, 0, 2, sign_cost },                                           // TWO
+  { 0, 0, 0, 3, sign_cost },                                           // THREE
+  { 0, 0, 0, 4, sign_cost },                                           // FOUR
+  { cat1_high10, vp10_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
+  { cat2_high10, vp10_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
+  { cat3_high10, vp10_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
+  { cat4_high10, vp10_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
+  { cat5_high10, vp10_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
+  { cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 },         // CAT6
+  { 0, 0, 0, 0, zero_cost }                                            // EOB
 };
 const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS] = {
-  {0, 0, 0, 0, zero_cost},                                            // ZERO
-  {0, 0, 0, 1, sign_cost},                                            // ONE
-  {0, 0, 0, 2, sign_cost},                                            // TWO
-  {0, 0, 0, 3, sign_cost},                                            // THREE
-  {0, 0, 0, 4, sign_cost},                                            // FOUR
-  {cat1_high12, vp10_cat1_prob_high12, 1,  CAT1_MIN_VAL, cat1_cost},  // CAT1
-  {cat2_high12, vp10_cat2_prob_high12, 2,  CAT2_MIN_VAL, cat2_cost},  // CAT2
-  {cat3_high12, vp10_cat3_prob_high12, 3,  CAT3_MIN_VAL, cat3_cost},  // CAT3
-  {cat4_high12, vp10_cat4_prob_high12, 4,  CAT4_MIN_VAL, cat4_cost},  // CAT4
-  {cat5_high12, vp10_cat5_prob_high12, 5,  CAT5_MIN_VAL, cat5_cost},  // CAT5
-  {cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0},          // CAT6
-  {0, 0, 0, 0, zero_cost}                                             // EOB
+  { 0, 0, 0, 0, zero_cost },                                           // ZERO
+  { 0, 0, 0, 1, sign_cost },                                           // ONE
+  { 0, 0, 0, 2, sign_cost },                                           // TWO
+  { 0, 0, 0, 3, sign_cost },                                           // THREE
+  { 0, 0, 0, 4, sign_cost },                                           // FOUR
+  { cat1_high12, vp10_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
+  { cat2_high12, vp10_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
+  { cat3_high12, vp10_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
+  { cat4_high12, vp10_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
+  { cat5_high12, vp10_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
+  { cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 },         // CAT6
+  { 0, 0, 0, 0, zero_cost }                                            // EOB
 };
 #endif
 
 const struct vp10_token vp10_coef_encodings[ENTROPY_TOKENS] = {
-  {2, 2}, {6, 3}, {28, 5}, {58, 6}, {59, 6}, {60, 6}, {61, 6}, {124, 7},
-  {125, 7}, {126, 7}, {127, 7}, {0, 1}
+  { 2, 2 },  { 6, 3 },   { 28, 5 },  { 58, 6 },  { 59, 6 },  { 60, 6 },
+  { 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 }
 };
 
-
 struct tokenize_b_args {
   VP10_COMP *cpi;
   ThreadData *td;
   TOKENEXTRA **tp;
 };
 
-static void set_entropy_context_b(int plane, int block,
-                                  int blk_row, int blk_col,
-                                  BLOCK_SIZE plane_bsize,
+static void set_entropy_context_b(int plane, int block, int blk_row,
+                                  int blk_col, BLOCK_SIZE plane_bsize,
                                   TX_SIZE tx_size, void *arg) {
-  struct tokenize_b_args* const args = arg;
+  struct tokenize_b_args *const args = arg;
   ThreadData *const td = args->td;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblock_plane *p = &x->plane[plane];
   struct macroblockd_plane *pd = &xd->plane[plane];
-  vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0,
-                    blk_col, blk_row);
+  vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
+                    blk_row);
 }
 
 static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree,
                              int32_t extra, uint8_t token,
-                             uint8_t skip_eob_node,
-                             unsigned int *counts) {
+                             uint8_t skip_eob_node, unsigned int *counts) {
   (*t)->token = token;
   (*t)->extra = extra;
   (*t)->context_tree = context_tree;
@@ -468,8 +485,7 @@ static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree,
 
 static INLINE void add_token_no_extra(TOKENEXTRA **t,
                                       const vpx_prob *context_tree,
-                                      uint8_t token,
-                                      uint8_t skip_eob_node,
+                                      uint8_t token, uint8_t skip_eob_node,
                                       unsigned int *counts) {
   (*t)->token = token;
   (*t)->context_tree = context_tree;
@@ -485,9 +501,8 @@ static INLINE int get_tx_eob(const struct segmentation *seg, int segment_id,
 }
 
 static void tokenize_b(int plane, int block, int blk_row, int blk_col,
-                       BLOCK_SIZE plane_bsize,
-                       TX_SIZE tx_size, void *arg) {
-  struct tokenize_b_args* const args = arg;
+                       BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
+  struct tokenize_b_args *const args = arg;
   VP10_COMP *cpi = args->cpi;
   ThreadData *const td = args->td;
   MACROBLOCK *const x = &td->mb;
@@ -499,7 +514,7 @@ static void tokenize_b(int plane, int block, int blk_row, int blk_col,
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   int pt; /* near block/prev token context index */
   int c;
-  TOKENEXTRA *t = *tp;        /* store tokens starting here */
+  TOKENEXTRA *t = *tp; /* store tokens starting here */
   int eob = p->eobs[block];
   const PLANE_TYPE type = pd->plane_type;
   const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
@@ -567,8 +582,7 @@ struct is_skippable_args {
   int *skippable;
 };
 static void is_skippable(int plane, int block, int blk_row, int blk_col,
-                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
-                         void *argv) {
+                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *argv) {
   struct is_skippable_args *args = argv;
   (void)plane;
   (void)plane_bsize;
@@ -582,9 +596,9 @@ static void is_skippable(int plane, int block, int blk_row, int blk_col,
 //              vp10_foreach_transform_block() and simplify is_skippable().
 int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   int result = 1;
-  struct is_skippable_args args = {x->plane[plane].eobs, &result};
+  struct is_skippable_args args = { x->plane[plane].eobs, &result };
   vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
-                                         &args);
+                                          &args);
   return result;
 }
 
@@ -593,35 +607,34 @@ static void has_high_freq_coeff(int plane, int block, int blk_row, int blk_col,
                                 void *argv) {
   struct is_skippable_args *args = argv;
   int eobs = (tx_size == TX_4X4) ? 3 : 10;
-  (void) plane;
-  (void) plane_bsize;
-  (void) blk_row;
-  (void) blk_col;
+  (void)plane;
+  (void)plane_bsize;
+  (void)blk_row;
+  (void)blk_col;
 
   *(args->skippable) |= (args->eobs[block] > eobs);
 }
 
 int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   int result = 0;
-  struct is_skippable_args args = {x->plane[plane].eobs, &result};
+  struct is_skippable_args args = { x->plane[plane].eobs, &result };
   vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
-                                         has_high_freq_coeff, &args);
+                                          has_high_freq_coeff, &args);
   return result;
 }
 
 void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
-                     int dry_run, BLOCK_SIZE bsize) {
+                      int dry_run, BLOCK_SIZE bsize) {
   VP10_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const int ctx = vp10_get_skip_context(xd);
-  const int skip_inc = !segfeature_active(&cm->seg, mbmi->segment_id,
-                                          SEG_LVL_SKIP);
-  struct tokenize_b_args arg = {cpi, td, t};
+  const int skip_inc =
+      !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
+  struct tokenize_b_args arg = { cpi, td, t };
   if (mbmi->skip) {
-    if (!dry_run)
-      td->counts->skip[ctx][1] += skip_inc;
+    if (!dry_run) td->counts->skip[ctx][1] += skip_inc;
     reset_skip_context(xd, bsize);
     return;
   }
diff --git a/vp10/encoder/tokenize.h b/vp10/encoder/tokenize.h
index 536a55c5ec3f0aa4592fbaccff044b59a33f19f2..0a0204831f691d5f23aed89be32a5036a2372f1a 100644
--- a/vp10/encoder/tokenize.h
+++ b/vp10/encoder/tokenize.h
@@ -20,15 +20,14 @@
 extern "C" {
 #endif
 
-#define EOSB_TOKEN 127     // Not signalled, encoder only
+#define EOSB_TOKEN 127  // Not signalled, encoder only
 
 #if CONFIG_VPX_HIGHBITDEPTH
-  typedef int32_t EXTRABIT;
+typedef int32_t EXTRABIT;
 #else
-  typedef int16_t EXTRABIT;
+typedef int16_t EXTRABIT;
 #endif
 
-
 typedef struct {
   int16_t token;
   EXTRABIT extra;
@@ -52,7 +51,7 @@ struct VP10_COMP;
 struct ThreadData;
 
 void vp10_tokenize_sb(struct VP10_COMP *cpi, struct ThreadData *td,
-                     TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
+                      TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
 
 extern const int16_t *vp10_dct_value_cost_ptr;
 /* TODO: The Token field should be broken out into a separate char array to
@@ -66,27 +65,26 @@ extern const int16_t vp10_cat6_high_cost[128];
 extern const int16_t vp10_cat6_high10_high_cost[512];
 extern const int16_t vp10_cat6_high12_high_cost[2048];
 static INLINE int16_t vp10_get_cost(int16_t token, EXTRABIT extrabits,
-                                   const int16_t *cat6_high_table) {
-  if (token != CATEGORY6_TOKEN)
-    return vp10_extra_bits[token].cost[extrabits];
-  return vp10_cat6_low_cost[extrabits & 0xff]
-      + cat6_high_table[extrabits >> 8];
+                                    const int16_t *cat6_high_table) {
+  if (token != CATEGORY6_TOKEN) return vp10_extra_bits[token].cost[extrabits];
+  return vp10_cat6_low_cost[extrabits & 0xff] + cat6_high_table[extrabits >> 8];
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static INLINE const int16_t* vp10_get_high_cost_table(int bit_depth) {
+static INLINE const int16_t *vp10_get_high_cost_table(int bit_depth) {
   return bit_depth == 8 ? vp10_cat6_high_cost
-      : (bit_depth == 10 ? vp10_cat6_high10_high_cost :
-         vp10_cat6_high12_high_cost);
+                        : (bit_depth == 10 ? vp10_cat6_high10_high_cost
+                                           : vp10_cat6_high12_high_cost);
 }
 #else
-static INLINE const int16_t* vp10_get_high_cost_table(int bit_depth) {
-  (void) bit_depth;
+static INLINE const int16_t *vp10_get_high_cost_table(int bit_depth) {
+  (void)bit_depth;
   return vp10_cat6_high_cost;
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-static INLINE void vp10_get_token_extra(int v, int16_t *token, EXTRABIT *extra) {
+static INLINE void vp10_get_token_extra(int v, int16_t *token,
+                                        EXTRABIT *extra) {
   if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
     *token = CATEGORY6_TOKEN;
     if (v >= CAT6_MIN_VAL)
@@ -99,12 +97,10 @@ static INLINE void vp10_get_token_extra(int v, int16_t *token, EXTRABIT *extra)
   *extra = vp10_dct_cat_lt_10_value_tokens[v].extra;
 }
 static INLINE int16_t vp10_get_token(int v) {
-  if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL)
-    return 10;
+  if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) return 10;
   return vp10_dct_cat_lt_10_value_tokens[v].token;
 }
 
-
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/vp10/encoder/treewriter.c b/vp10/encoder/treewriter.c
index 1f42f32a11e6f15911c08357f12213393d6bc774..152bf40f3c3d58bfd0df52eb37549d989dbfed62 100644
--- a/vp10/encoder/treewriter.c
+++ b/vp10/encoder/treewriter.c
@@ -27,7 +27,7 @@ static void tree2tok(struct vp10_token *tokens, const vpx_tree_index *tree,
 }
 
 void vp10_tokens_from_tree(struct vp10_token *tokens,
-                          const vpx_tree_index *tree) {
+                           const vpx_tree_index *tree) {
   tree2tok(tokens, tree, 0, 0, 0);
 }
 
@@ -52,7 +52,7 @@ static unsigned int convert_distribution(unsigned int i, vpx_tree tree,
 }
 
 void vp10_tree_probs_from_distribution(vpx_tree tree,
-                                      unsigned int branch_ct[/* n-1 */][2],
-                                      const unsigned int num_events[/* n */]) {
+                                       unsigned int branch_ct[/* n-1 */][2],
+                                       const unsigned int num_events[/* n */]) {
   convert_distribution(0, tree, branch_ct, num_events);
 }
diff --git a/vp10/encoder/treewriter.h b/vp10/encoder/treewriter.h
index 6b76a03e4704db0f293bf2c200cabc3243e4714e..05de00219b887edbaf6f0df4ac0f3e37a487f1c8 100644
--- a/vp10/encoder/treewriter.h
+++ b/vp10/encoder/treewriter.h
@@ -18,19 +18,19 @@ extern "C" {
 #endif
 
 void vp10_tree_probs_from_distribution(vpx_tree tree,
-                                      unsigned int branch_ct[ /* n - 1 */ ][2],
-                                      const unsigned int num_events[ /* n */ ]);
+                                       unsigned int branch_ct[/* n - 1 */][2],
+                                       const unsigned int num_events[/* n */]);
 
 struct vp10_token {
   int value;
   int len;
 };
 
-void vp10_tokens_from_tree(struct vp10_token*, const vpx_tree_index *);
+void vp10_tokens_from_tree(struct vp10_token *, const vpx_tree_index *);
 
 static INLINE void vp10_write_tree(vpx_writer *w, const vpx_tree_index *tree,
-                                  const vpx_prob *probs, int bits, int len,
-                                  vpx_tree_index i) {
+                                   const vpx_prob *probs, int bits, int len,
+                                   vpx_tree_index i) {
   do {
     const int bit = (bits >> --len) & 1;
     vpx_write(w, bit, probs[i >> 1]);
@@ -39,8 +39,8 @@ static INLINE void vp10_write_tree(vpx_writer *w, const vpx_tree_index *tree,
 }
 
 static INLINE void vp10_write_token(vpx_writer *w, const vpx_tree_index *tree,
-                                   const vpx_prob *probs,
-                                   const struct vp10_token *token) {
+                                    const vpx_prob *probs,
+                                    const struct vp10_token *token) {
   vp10_write_tree(w, tree, probs, token->value, token->len, 0);
 }
 
diff --git a/vp10/encoder/x86/dct_sse2.c b/vp10/encoder/x86/dct_sse2.c
index e1111570a26d1a8afab915c001c41f8b5a7bae78..7a61ada46e61be597a12e29a613dba11b54a2619 100644
--- a/vp10/encoder/x86/dct_sse2.c
+++ b/vp10/encoder/x86/dct_sse2.c
@@ -78,8 +78,8 @@ static void fdct4_sse2(__m128i *in) {
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
 
   __m128i u[4], v[4];
-  u[0]=_mm_unpacklo_epi16(in[0], in[1]);
-  u[1]=_mm_unpacklo_epi16(in[3], in[2]);
+  u[0] = _mm_unpacklo_epi16(in[0], in[1]);
+  u[1] = _mm_unpacklo_epi16(in[3], in[2]);
 
   v[0] = _mm_add_epi16(u[0], u[1]);
   v[1] = _mm_sub_epi16(u[0], u[1]);
@@ -151,14 +151,12 @@ static void fadst4_sse2(__m128i *in) {
   transpose_4x4(in);
 }
 
-void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output,
-                     int stride, int tx_type) {
+void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+                      int tx_type) {
   __m128i in[4];
 
   switch (tx_type) {
-    case DCT_DCT:
-      vpx_fdct4x4_sse2(input, output, stride);
-      break;
+    case DCT_DCT: vpx_fdct4x4_sse2(input, output, stride); break;
     case ADST_DCT:
       load_buffer_4x4(input, in, stride);
       fadst4_sse2(in);
@@ -177,21 +175,16 @@ void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output,
       fadst4_sse2(in);
       write_buffer_4x4(output, in);
       break;
-   default:
-     assert(0);
-     break;
+    default: assert(0); break;
   }
 }
 
-void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
-                            int16_t* coeff_ptr, intptr_t n_coeffs,
-                            int skip_block, const int16_t* zbin_ptr,
-                            const int16_t* round_ptr, const int16_t* quant_ptr,
-                            const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
-                            int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
-                            uint16_t* eob_ptr,
-                            const int16_t* scan_ptr,
-                            const int16_t* iscan_ptr) {
+void vp10_fdct8x8_quant_sse2(
+    const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
+    int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
+    const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
+    int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+    uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
   __m128i zero;
   int pass;
   // Constants
@@ -208,14 +201,14 @@ void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
   const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   // Load input
-  __m128i in0  = _mm_load_si128((const __m128i *)(input + 0 * stride));
-  __m128i in1  = _mm_load_si128((const __m128i *)(input + 1 * stride));
-  __m128i in2  = _mm_load_si128((const __m128i *)(input + 2 * stride));
-  __m128i in3  = _mm_load_si128((const __m128i *)(input + 3 * stride));
-  __m128i in4  = _mm_load_si128((const __m128i *)(input + 4 * stride));
-  __m128i in5  = _mm_load_si128((const __m128i *)(input + 5 * stride));
-  __m128i in6  = _mm_load_si128((const __m128i *)(input + 6 * stride));
-  __m128i in7  = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+  __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+  __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
+  __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+  __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+  __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+  __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
   __m128i *in[8];
   int index = 0;
 
@@ -469,9 +462,9 @@ void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
 
       // Setup global values
       {
-        round = _mm_load_si128((const __m128i*)round_ptr);
-        quant = _mm_load_si128((const __m128i*)quant_ptr);
-        dequant = _mm_load_si128((const __m128i*)dequant_ptr);
+        round = _mm_load_si128((const __m128i *)round_ptr);
+        quant = _mm_load_si128((const __m128i *)quant_ptr);
+        dequant = _mm_load_si128((const __m128i *)dequant_ptr);
       }
 
       {
@@ -503,15 +496,15 @@ void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
         qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
         qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
 
-        _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
-        _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
 
         coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
         dequant = _mm_unpackhi_epi64(dequant, dequant);
         coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
 
-        _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
-        _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
       }
 
       {
@@ -524,8 +517,8 @@ void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
         zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
         nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
         nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
-        iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
-        iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+        iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+        iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
         // Add one to convert from indices to counts
         iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
         iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -568,14 +561,14 @@ void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
         qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
         qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
 
-        _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
-        _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
 
         coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
         coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
 
-        _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
-        _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
       }
 
       {
@@ -588,8 +581,8 @@ void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
         zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
         nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
         nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
-        iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
-        iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+        iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+        iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
         // Add one to convert from indices to counts
         iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
         iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -615,10 +608,10 @@ void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
     }
   } else {
     do {
-      _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero);
-      _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero);
-      _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero);
-      _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero);
+      _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+      _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+      _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+      _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
       n_coeffs += 8 * 2;
     } while (n_coeffs < 0);
     *eob_ptr = 0;
@@ -628,14 +621,14 @@ void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
 // load 8x8 array
 static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in,
                                    int stride) {
-  in[0]  = _mm_load_si128((const __m128i *)(input + 0 * stride));
-  in[1]  = _mm_load_si128((const __m128i *)(input + 1 * stride));
-  in[2]  = _mm_load_si128((const __m128i *)(input + 2 * stride));
-  in[3]  = _mm_load_si128((const __m128i *)(input + 3 * stride));
-  in[4]  = _mm_load_si128((const __m128i *)(input + 4 * stride));
-  in[5]  = _mm_load_si128((const __m128i *)(input + 5 * stride));
-  in[6]  = _mm_load_si128((const __m128i *)(input + 6 * stride));
-  in[7]  = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+  in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+  in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+  in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+  in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+  in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+  in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
 
   in[0] = _mm_slli_epi16(in[0], 2);
   in[1] = _mm_slli_epi16(in[1], 2);
@@ -930,14 +923,14 @@ static void fadst8_sse2(__m128i *in) {
   __m128i in0, in1, in2, in3, in4, in5, in6, in7;
 
   // properly aligned for butterfly input
-  in0  = in[7];
-  in1  = in[0];
-  in2  = in[5];
-  in3  = in[2];
-  in4  = in[3];
-  in5  = in[4];
-  in6  = in[1];
-  in7  = in[6];
+  in0 = in[7];
+  in1 = in[0];
+  in2 = in[5];
+  in3 = in[2];
+  in4 = in[3];
+  in5 = in[4];
+  in6 = in[1];
+  in7 = in[6];
 
   // column transformation
   // stage 1
@@ -1135,14 +1128,12 @@ static void fadst8_sse2(__m128i *in) {
   array_transpose_8x8(in, in);
 }
 
-void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output,
-                     int stride, int tx_type) {
+void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+                      int tx_type) {
   __m128i in[8];
 
   switch (tx_type) {
-    case DCT_DCT:
-      vpx_fdct8x8_sse2(input, output, stride);
-      break;
+    case DCT_DCT: vpx_fdct8x8_sse2(input, output, stride); break;
     case ADST_DCT:
       load_buffer_8x8(input, in, stride);
       fadst8_sse2(in);
@@ -1164,13 +1155,11 @@ void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output,
       right_shift_8x8(in, 1);
       write_buffer_8x8(output, in, 8);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
 
-static INLINE void load_buffer_16x16(const int16_t* input, __m128i *in0,
+static INLINE void load_buffer_16x16(const int16_t *input, __m128i *in0,
                                      __m128i *in1, int stride) {
   // load first 8 columns
   load_buffer_8x8(input, in0, stride);
@@ -1530,13 +1519,13 @@ static void fdct16_8col(__m128i *in) {
   v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
   v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
 
-  in[1]  = _mm_packs_epi32(v[0], v[1]);
-  in[9]  = _mm_packs_epi32(v[2], v[3]);
-  in[5]  = _mm_packs_epi32(v[4], v[5]);
+  in[1] = _mm_packs_epi32(v[0], v[1]);
+  in[9] = _mm_packs_epi32(v[2], v[3]);
+  in[5] = _mm_packs_epi32(v[4], v[5]);
   in[13] = _mm_packs_epi32(v[6], v[7]);
-  in[3]  = _mm_packs_epi32(v[8], v[9]);
+  in[3] = _mm_packs_epi32(v[8], v[9]);
   in[11] = _mm_packs_epi32(v[10], v[11]);
-  in[7]  = _mm_packs_epi32(v[12], v[13]);
+  in[7] = _mm_packs_epi32(v[12], v[13]);
   in[15] = _mm_packs_epi32(v[14], v[15]);
 }
 
@@ -2022,14 +2011,12 @@ static void fadst16_sse2(__m128i *in0, __m128i *in1) {
   array_transpose_16x16(in0, in1);
 }
 
-void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output,
-                       int stride, int tx_type) {
+void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
+                        int tx_type) {
   __m128i in0[16], in1[16];
 
   switch (tx_type) {
-    case DCT_DCT:
-      vpx_fdct16x16_sse2(input, output, stride);
-      break;
+    case DCT_DCT: vpx_fdct16x16_sse2(input, output, stride); break;
     case ADST_DCT:
       load_buffer_16x16(input, in0, in1, stride);
       fadst16_sse2(in0, in1);
@@ -2051,8 +2038,6 @@ void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output,
       fadst16_sse2(in0, in1);
       write_buffer_16x16(output, in0, in1, 16);
       break;
-    default:
-      assert(0);
-      break;
+    default: assert(0); break;
   }
 }
diff --git a/vp10/encoder/x86/dct_ssse3.c b/vp10/encoder/x86/dct_ssse3.c
index df298d871104af31f9aaa92d304d1638290ec84b..74794b1d73b43d246a5767f5dee51a51bd1e1f60 100644
--- a/vp10/encoder/x86/dct_ssse3.c
+++ b/vp10/encoder/x86/dct_ssse3.c
@@ -20,16 +20,12 @@
 #include "vpx_dsp/x86/inv_txfm_sse2.h"
 #include "vpx_dsp/x86/txfm_common_sse2.h"
 
-void vp10_fdct8x8_quant_ssse3(const int16_t *input, int stride,
-                             int16_t* coeff_ptr, intptr_t n_coeffs,
-                             int skip_block, const int16_t* zbin_ptr,
-                             const int16_t* round_ptr, const int16_t* quant_ptr,
-                             const int16_t* quant_shift_ptr,
-                             int16_t* qcoeff_ptr,
-                             int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
-                             uint16_t* eob_ptr,
-                             const int16_t* scan_ptr,
-                             const int16_t* iscan_ptr) {
+void vp10_fdct8x8_quant_ssse3(
+    const int16_t* input, int stride, int16_t* coeff_ptr, intptr_t n_coeffs,
+    int skip_block, const int16_t* zbin_ptr, const int16_t* round_ptr,
+    const int16_t* quant_ptr, const int16_t* quant_shift_ptr,
+    int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
+    uint16_t* eob_ptr, const int16_t* scan_ptr, const int16_t* iscan_ptr) {
   __m128i zero;
   int pass;
   // Constants
@@ -47,15 +43,15 @@ void vp10_fdct8x8_quant_ssse3(const int16_t *input, int stride,
   const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   // Load input
-  __m128i in0  = _mm_load_si128((const __m128i *)(input + 0 * stride));
-  __m128i in1  = _mm_load_si128((const __m128i *)(input + 1 * stride));
-  __m128i in2  = _mm_load_si128((const __m128i *)(input + 2 * stride));
-  __m128i in3  = _mm_load_si128((const __m128i *)(input + 3 * stride));
-  __m128i in4  = _mm_load_si128((const __m128i *)(input + 4 * stride));
-  __m128i in5  = _mm_load_si128((const __m128i *)(input + 5 * stride));
-  __m128i in6  = _mm_load_si128((const __m128i *)(input + 6 * stride));
-  __m128i in7  = _mm_load_si128((const __m128i *)(input + 7 * stride));
-  __m128i *in[8];
+  __m128i in0 = _mm_load_si128((const __m128i*)(input + 0 * stride));
+  __m128i in1 = _mm_load_si128((const __m128i*)(input + 1 * stride));
+  __m128i in2 = _mm_load_si128((const __m128i*)(input + 2 * stride));
+  __m128i in3 = _mm_load_si128((const __m128i*)(input + 3 * stride));
+  __m128i in4 = _mm_load_si128((const __m128i*)(input + 4 * stride));
+  __m128i in5 = _mm_load_si128((const __m128i*)(input + 5 * stride));
+  __m128i in6 = _mm_load_si128((const __m128i*)(input + 6 * stride));
+  __m128i in7 = _mm_load_si128((const __m128i*)(input + 7 * stride));
+  __m128i* in[8];
   int index = 0;
 
   (void)scan_ptr;
@@ -393,7 +389,7 @@ void vp10_fdct8x8_quant_ssse3(const int16_t *input, int stride,
         qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
 
         nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) |
-            _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
+                 _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
 
         if (nzflag) {
           qcoeff0 = _mm_adds_epi16(qcoeff0, round);
diff --git a/vp10/encoder/x86/error_intrin_avx2.c b/vp10/encoder/x86/error_intrin_avx2.c
index 9766be27bf9c84a4137141b1f1f657e14205e46a..560cd4a87ea5b73ef9b75d3a474c8212de0529e0 100644
--- a/vp10/encoder/x86/error_intrin_avx2.c
+++ b/vp10/encoder/x86/error_intrin_avx2.c
@@ -13,10 +13,8 @@
 #include "./vp10_rtcd.h"
 #include "vpx/vpx_integer.h"
 
-int64_t vp10_block_error_avx2(const int16_t *coeff,
-                             const int16_t *dqcoeff,
-                             intptr_t block_size,
-                             int64_t *ssz) {
+int64_t vp10_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
+                              intptr_t block_size, int64_t *ssz) {
   __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
   __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
   __m256i sse_reg_64hi, ssz_reg_64hi;
@@ -29,7 +27,7 @@ int64_t vp10_block_error_avx2(const int16_t *coeff,
   sse_reg = _mm256_set1_epi16(0);
   ssz_reg = _mm256_set1_epi16(0);
 
-  for (i = 0 ; i < block_size ; i+= 16) {
+  for (i = 0; i < block_size; i += 16) {
     // load 32 bytes from coeff and dqcoeff
     coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i));
     dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i));
@@ -66,8 +64,8 @@ int64_t vp10_block_error_avx2(const int16_t *coeff,
                              _mm256_extractf128_si256(ssz_reg, 1));
 
   // store the results
-  _mm_storel_epi64((__m128i*)(&sse), sse_reg128);
+  _mm_storel_epi64((__m128i *)(&sse), sse_reg128);
 
-  _mm_storel_epi64((__m128i*)(ssz), ssz_reg128);
+  _mm_storel_epi64((__m128i *)(ssz), ssz_reg128);
   return sse;
 }
diff --git a/vp10/encoder/x86/highbd_block_error_intrin_sse2.c b/vp10/encoder/x86/highbd_block_error_intrin_sse2.c
index 6b4cf50994cadde4c61bc6bdaf2fc06339b85e73..eefca1ce1c94597f3f04cbc60a315b3cb5730519 100644
--- a/vp10/encoder/x86/highbd_block_error_intrin_sse2.c
+++ b/vp10/encoder/x86/highbd_block_error_intrin_sse2.c
@@ -14,8 +14,8 @@
 #include "vp10/common/common.h"
 
 int64_t vp10_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
-                                    intptr_t block_size, int64_t *ssz,
-                                    int bps) {
+                                     intptr_t block_size, int64_t *ssz,
+                                     int bps) {
   int i, j, test;
   uint32_t temp[4];
   __m128i max, min, cmp0, cmp1, cmp2, cmp3;
@@ -23,41 +23,41 @@ int64_t vp10_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
   const int shift = 2 * (bps - 8);
   const int rounding = shift > 0 ? 1 << (shift - 1) : 0;
 
-  for (i = 0; i < block_size; i+=8) {
+  for (i = 0; i < block_size; i += 8) {
     // Load the data into xmm registers
-    __m128i mm_coeff = _mm_load_si128((__m128i*) (coeff + i));
-    __m128i mm_coeff2 = _mm_load_si128((__m128i*) (coeff + i + 4));
-    __m128i mm_dqcoeff = _mm_load_si128((__m128i*) (dqcoeff + i));
-    __m128i mm_dqcoeff2 = _mm_load_si128((__m128i*) (dqcoeff + i + 4));
+    __m128i mm_coeff = _mm_load_si128((__m128i *)(coeff + i));
+    __m128i mm_coeff2 = _mm_load_si128((__m128i *)(coeff + i + 4));
+    __m128i mm_dqcoeff = _mm_load_si128((__m128i *)(dqcoeff + i));
+    __m128i mm_dqcoeff2 = _mm_load_si128((__m128i *)(dqcoeff + i + 4));
     // Check if any values require more than 15 bit
     max = _mm_set1_epi32(0x3fff);
     min = _mm_set1_epi32(0xffffc000);
     cmp0 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff, max),
-            _mm_cmplt_epi32(mm_coeff, min));
+                         _mm_cmplt_epi32(mm_coeff, min));
     cmp1 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff2, max),
-            _mm_cmplt_epi32(mm_coeff2, min));
+                         _mm_cmplt_epi32(mm_coeff2, min));
     cmp2 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff, max),
-            _mm_cmplt_epi32(mm_dqcoeff, min));
+                         _mm_cmplt_epi32(mm_dqcoeff, min));
     cmp3 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff2, max),
-            _mm_cmplt_epi32(mm_dqcoeff2, min));
-    test = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(cmp0, cmp1),
-            _mm_or_si128(cmp2, cmp3)));
+                         _mm_cmplt_epi32(mm_dqcoeff2, min));
+    test = _mm_movemask_epi8(
+        _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3)));
 
     if (!test) {
-      __m128i mm_diff, error_sse2, sqcoeff_sse2;;
+      __m128i mm_diff, error_sse2, sqcoeff_sse2;
       mm_coeff = _mm_packs_epi32(mm_coeff, mm_coeff2);
       mm_dqcoeff = _mm_packs_epi32(mm_dqcoeff, mm_dqcoeff2);
       mm_diff = _mm_sub_epi16(mm_coeff, mm_dqcoeff);
       error_sse2 = _mm_madd_epi16(mm_diff, mm_diff);
       sqcoeff_sse2 = _mm_madd_epi16(mm_coeff, mm_coeff);
-      _mm_storeu_si128((__m128i*)temp, error_sse2);
+      _mm_storeu_si128((__m128i *)temp, error_sse2);
       error = error + temp[0] + temp[1] + temp[2] + temp[3];
-      _mm_storeu_si128((__m128i*)temp, sqcoeff_sse2);
+      _mm_storeu_si128((__m128i *)temp, sqcoeff_sse2);
       sqcoeff += temp[0] + temp[1] + temp[2] + temp[3];
     } else {
       for (j = 0; j < 8; j++) {
         const int64_t diff = coeff[i + j] - dqcoeff[i + j];
-        error +=  diff * diff;
+        error += diff * diff;
         sqcoeff += (int64_t)coeff[i + j] * (int64_t)coeff[i + j];
       }
     }
diff --git a/vp10/encoder/x86/quantize_sse2.c b/vp10/encoder/x86/quantize_sse2.c
index dabd3bd1275bc81d8f1900afa31f18b9d94efa8c..78d5952297889378638f71e21f041fea136e5011 100644
--- a/vp10/encoder/x86/quantize_sse2.c
+++ b/vp10/encoder/x86/quantize_sse2.c
@@ -15,13 +15,12 @@
 #include "vpx/vpx_integer.h"
 
 void vp10_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs,
-                          int skip_block, const int16_t* zbin_ptr,
-                          const int16_t* round_ptr, const int16_t* quant_ptr,
-                          const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
-                          int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
-                          uint16_t* eob_ptr,
-                          const int16_t* scan_ptr,
-                          const int16_t* iscan_ptr) {
+                           int skip_block, const int16_t* zbin_ptr,
+                           const int16_t* round_ptr, const int16_t* quant_ptr,
+                           const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
+                           int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
+                           uint16_t* eob_ptr, const int16_t* scan_ptr,
+                           const int16_t* iscan_ptr) {
   __m128i zero;
   __m128i thr;
   int16_t nzflag;
@@ -133,7 +132,7 @@ void vp10_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs,
         qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
 
         nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) |
-            _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
+                 _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
 
         if (nzflag) {
           qcoeff0 = _mm_adds_epi16(qcoeff0, round);
diff --git a/vp10/vp10_cx_iface.c b/vp10/vp10_cx_iface.c
index 8dfe1f9c8d9691859e650ec5b14566fa7f85dcdd..0e737881ea2befe931d355c800966b7be6a113f3 100644
--- a/vp10/vp10_cx_iface.c
+++ b/vp10/vp10_cx_iface.c
@@ -22,103 +22,100 @@
 #include "vp10/vp10_iface_common.h"
 
 struct vp10_extracfg {
-  int                         cpu_used;  // available cpu percentage in 1/16
-  unsigned int                enable_auto_alt_ref;
-  unsigned int                noise_sensitivity;
-  unsigned int                sharpness;
-  unsigned int                static_thresh;
-  unsigned int                tile_columns;
-  unsigned int                tile_rows;
-  unsigned int                arnr_max_frames;
-  unsigned int                arnr_strength;
-  unsigned int                min_gf_interval;
-  unsigned int                max_gf_interval;
-  vpx_tune_metric             tuning;
-  unsigned int                cq_level;  // constrained quality level
-  unsigned int                rc_max_intra_bitrate_pct;
-  unsigned int                rc_max_inter_bitrate_pct;
-  unsigned int                gf_cbr_boost_pct;
-  unsigned int                lossless;
-  unsigned int                frame_parallel_decoding_mode;
-  AQ_MODE                     aq_mode;
-  unsigned int                frame_periodic_boost;
-  vpx_bit_depth_t             bit_depth;
-  vpx_tune_content            content;
-  vpx_color_space_t           color_space;
-  int                         color_range;
-  int                         render_width;
-  int                         render_height;
+  int cpu_used;  // available cpu percentage in 1/16
+  unsigned int enable_auto_alt_ref;
+  unsigned int noise_sensitivity;
+  unsigned int sharpness;
+  unsigned int static_thresh;
+  unsigned int tile_columns;
+  unsigned int tile_rows;
+  unsigned int arnr_max_frames;
+  unsigned int arnr_strength;
+  unsigned int min_gf_interval;
+  unsigned int max_gf_interval;
+  vpx_tune_metric tuning;
+  unsigned int cq_level;  // constrained quality level
+  unsigned int rc_max_intra_bitrate_pct;
+  unsigned int rc_max_inter_bitrate_pct;
+  unsigned int gf_cbr_boost_pct;
+  unsigned int lossless;
+  unsigned int frame_parallel_decoding_mode;
+  AQ_MODE aq_mode;
+  unsigned int frame_periodic_boost;
+  vpx_bit_depth_t bit_depth;
+  vpx_tune_content content;
+  vpx_color_space_t color_space;
+  int color_range;
+  int render_width;
+  int render_height;
 };
 
 static struct vp10_extracfg default_extra_cfg = {
-  0,                          // cpu_used
-  1,                          // enable_auto_alt_ref
-  0,                          // noise_sensitivity
-  0,                          // sharpness
-  0,                          // static_thresh
-  6,                          // tile_columns
-  0,                          // tile_rows
-  7,                          // arnr_max_frames
-  5,                          // arnr_strength
-  0,                          // min_gf_interval; 0 -> default decision
-  0,                          // max_gf_interval; 0 -> default decision
-  VPX_TUNE_PSNR,              // tuning
-  10,                         // cq_level
-  0,                          // rc_max_intra_bitrate_pct
-  0,                          // rc_max_inter_bitrate_pct
-  0,                          // gf_cbr_boost_pct
-  0,                          // lossless
-  1,                          // frame_parallel_decoding_mode
-  NO_AQ,                      // aq_mode
-  0,                          // frame_periodic_delta_q
-  VPX_BITS_8,                 // Bit depth
-  VPX_CONTENT_DEFAULT,       // content
-  VPX_CS_UNKNOWN,             // color space
-  0,                          // color range
-  0,                          // render width
-  0,                          // render height
+  0,                    // cpu_used
+  1,                    // enable_auto_alt_ref
+  0,                    // noise_sensitivity
+  0,                    // sharpness
+  0,                    // static_thresh
+  6,                    // tile_columns
+  0,                    // tile_rows
+  7,                    // arnr_max_frames
+  5,                    // arnr_strength
+  0,                    // min_gf_interval; 0 -> default decision
+  0,                    // max_gf_interval; 0 -> default decision
+  VPX_TUNE_PSNR,        // tuning
+  10,                   // cq_level
+  0,                    // rc_max_intra_bitrate_pct
+  0,                    // rc_max_inter_bitrate_pct
+  0,                    // gf_cbr_boost_pct
+  0,                    // lossless
+  1,                    // frame_parallel_decoding_mode
+  NO_AQ,                // aq_mode
+  0,                    // frame_periodic_delta_q
+  VPX_BITS_8,           // Bit depth
+  VPX_CONTENT_DEFAULT,  // content
+  VPX_CS_UNKNOWN,       // color space
+  0,                    // color range
+  0,                    // render width
+  0,                    // render height
 };
 
 struct vpx_codec_alg_priv {
-  vpx_codec_priv_t        base;
-  vpx_codec_enc_cfg_t     cfg;
-  struct vp10_extracfg    extra_cfg;
-  VP10EncoderConfig       oxcf;
-  VP10_COMP               *cpi;
-  unsigned char           *cx_data;
-  size_t                  cx_data_sz;
-  unsigned char           *pending_cx_data;
-  size_t                  pending_cx_data_sz;
-  int                     pending_frame_count;
-  size_t                  pending_frame_sizes[8];
+  vpx_codec_priv_t base;
+  vpx_codec_enc_cfg_t cfg;
+  struct vp10_extracfg extra_cfg;
+  VP10EncoderConfig oxcf;
+  VP10_COMP *cpi;
+  unsigned char *cx_data;
+  size_t cx_data_sz;
+  unsigned char *pending_cx_data;
+  size_t pending_cx_data_sz;
+  int pending_frame_count;
+  size_t pending_frame_sizes[8];
 #if !CONFIG_MISC_FIXES
-  size_t                  pending_frame_magnitude;
+  size_t pending_frame_magnitude;
 #endif
-  vpx_image_t             preview_img;
-  vpx_enc_frame_flags_t   next_frame_flags;
-  vp8_postproc_cfg_t      preview_ppcfg;
+  vpx_image_t preview_img;
+  vpx_enc_frame_flags_t next_frame_flags;
+  vp8_postproc_cfg_t preview_ppcfg;
   vpx_codec_pkt_list_decl(256) pkt_list;
-  unsigned int            fixed_kf_cntr;
+  unsigned int fixed_kf_cntr;
   vpx_codec_priv_output_cx_pkt_cb_pair_t output_cx_pkt_cb;
   // BufferPool that holds all reference frames.
-  BufferPool              *buffer_pool;
+  BufferPool *buffer_pool;
 };
 
 static VPX_REFFRAME ref_frame_to_vp10_reframe(vpx_ref_frame_type_t frame) {
   switch (frame) {
-    case VP8_LAST_FRAME:
-      return VPX_LAST_FLAG;
-    case VP8_GOLD_FRAME:
-      return VPX_GOLD_FLAG;
-    case VP8_ALTR_FRAME:
-      return VPX_ALT_FLAG;
+    case VP8_LAST_FRAME: return VPX_LAST_FLAG;
+    case VP8_GOLD_FRAME: return VPX_GOLD_FLAG;
+    case VP8_ALTR_FRAME: return VPX_ALT_FLAG;
   }
   assert(0 && "Invalid Reference Frame");
   return VPX_LAST_FLAG;
 }
 
-static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
-    const struct vpx_internal_error_info *error) {
+static vpx_codec_err_t update_error_state(
+    vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
   const vpx_codec_err_t res = error->error_code;
 
   if (res != VPX_CODEC_OK)
@@ -127,58 +124,60 @@ static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
   return res;
 }
 
-
 #undef ERROR
-#define ERROR(str) do {\
-    ctx->base.err_detail = str;\
-    return VPX_CODEC_INVALID_PARAM;\
+#define ERROR(str)                  \
+  do {                              \
+    ctx->base.err_detail = str;     \
+    return VPX_CODEC_INVALID_PARAM; \
   } while (0)
 
-#define RANGE_CHECK(p, memb, lo, hi) do {\
+#define RANGE_CHECK(p, memb, lo, hi)                                 \
+  do {                                                               \
     if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
-      ERROR(#memb " out of range ["#lo".."#hi"]");\
+      ERROR(#memb " out of range [" #lo ".." #hi "]");               \
   } while (0)
 
-#define RANGE_CHECK_HI(p, memb, hi) do {\
-    if (!((p)->memb <= (hi))) \
-      ERROR(#memb " out of range [.."#hi"]");\
+#define RANGE_CHECK_HI(p, memb, hi)                                     \
+  do {                                                                  \
+    if (!((p)->memb <= (hi))) ERROR(#memb " out of range [.." #hi "]"); \
   } while (0)
 
-#define RANGE_CHECK_LO(p, memb, lo) do {\
-    if (!((p)->memb >= (lo))) \
-      ERROR(#memb " out of range ["#lo"..]");\
+#define RANGE_CHECK_LO(p, memb, lo)                                     \
+  do {                                                                  \
+    if (!((p)->memb >= (lo))) ERROR(#memb " out of range [" #lo "..]"); \
   } while (0)
 
-#define RANGE_CHECK_BOOL(p, memb) do {\
-    if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean");\
+#define RANGE_CHECK_BOOL(p, memb)                                     \
+  do {                                                                \
+    if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean"); \
   } while (0)
 
 static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
                                        const vpx_codec_enc_cfg_t *cfg,
                                        const struct vp10_extracfg *extra_cfg) {
-  RANGE_CHECK(cfg, g_w,                   1, 65535);  // 16 bits available
-  RANGE_CHECK(cfg, g_h,                   1, 65535);  // 16 bits available
-  RANGE_CHECK(cfg, g_timebase.den,        1, 1000000000);
-  RANGE_CHECK(cfg, g_timebase.num,        1, cfg->g_timebase.den);
-  RANGE_CHECK_HI(cfg, g_profile,          3);
-
-  RANGE_CHECK_HI(cfg, rc_max_quantizer,   63);
-  RANGE_CHECK_HI(cfg, rc_min_quantizer,   cfg->rc_max_quantizer);
+  RANGE_CHECK(cfg, g_w, 1, 65535);  // 16 bits available
+  RANGE_CHECK(cfg, g_h, 1, 65535);  // 16 bits available
+  RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
+  RANGE_CHECK(cfg, g_timebase.num, 1, cfg->g_timebase.den);
+  RANGE_CHECK_HI(cfg, g_profile, 3);
+
+  RANGE_CHECK_HI(cfg, rc_max_quantizer, 63);
+  RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer);
   RANGE_CHECK_BOOL(extra_cfg, lossless);
-  RANGE_CHECK(extra_cfg, aq_mode,           0, AQ_MODE_COUNT - 1);
+  RANGE_CHECK(extra_cfg, aq_mode, 0, AQ_MODE_COUNT - 1);
   RANGE_CHECK(extra_cfg, frame_periodic_boost, 0, 1);
-  RANGE_CHECK_HI(cfg, g_threads,          64);
-  RANGE_CHECK_HI(cfg, g_lag_in_frames,    MAX_LAG_BUFFERS);
-  RANGE_CHECK(cfg, rc_end_usage,          VPX_VBR, VPX_Q);
-  RANGE_CHECK_HI(cfg, rc_undershoot_pct,  100);
-  RANGE_CHECK_HI(cfg, rc_overshoot_pct,   100);
+  RANGE_CHECK_HI(cfg, g_threads, 64);
+  RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS);
+  RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
+  RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100);
+  RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100);
   RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
-  RANGE_CHECK(cfg, kf_mode,               VPX_KF_DISABLED, VPX_KF_AUTO);
-  RANGE_CHECK_BOOL(cfg,                   rc_resize_allowed);
-  RANGE_CHECK_HI(cfg, rc_dropframe_thresh,   100);
-  RANGE_CHECK_HI(cfg, rc_resize_up_thresh,   100);
+  RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
+  RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
+  RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
+  RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
   RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100);
-  RANGE_CHECK(cfg,        g_pass,         VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
+  RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
   RANGE_CHECK(extra_cfg, min_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
   RANGE_CHECK(extra_cfg, max_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
   if (extra_cfg->max_gf_interval > 0) {
@@ -186,7 +185,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
   }
   if (extra_cfg->min_gf_interval > 0 && extra_cfg->max_gf_interval > 0) {
     RANGE_CHECK(extra_cfg, max_gf_interval, extra_cfg->min_gf_interval,
-      (MAX_LAG_BUFFERS - 1));
+                (MAX_LAG_BUFFERS - 1));
   }
 
   if (cfg->rc_resize_allowed == 1) {
@@ -200,11 +199,11 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
   RANGE_CHECK(cfg, ts_number_layers, 1, 1);
   // VP10 does not support a lower bound on the keyframe interval in
   // automatic keyframe placement mode.
-  if (cfg->kf_mode != VPX_KF_DISABLED &&
-      cfg->kf_min_dist != cfg->kf_max_dist &&
+  if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
       cfg->kf_min_dist > 0)
-    ERROR("kf_min_dist not supported in auto mode, use 0 "
-          "or kf_max_dist instead.");
+    ERROR(
+        "kf_min_dist not supported in auto mode, use 0 "
+        "or kf_max_dist instead.");
 
   RANGE_CHECK(extra_cfg, enable_auto_alt_ref, 0, 2);
   RANGE_CHECK(extra_cfg, cpu_used, -8, 8);
@@ -217,12 +216,11 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
   RANGE_CHECK(extra_cfg, cq_level, 0, 63);
   RANGE_CHECK(cfg, g_bit_depth, VPX_BITS_8, VPX_BITS_12);
   RANGE_CHECK(cfg, g_input_bit_depth, 8, 12);
-  RANGE_CHECK(extra_cfg, content,
-              VPX_CONTENT_DEFAULT, VPX_CONTENT_INVALID - 1);
+  RANGE_CHECK(extra_cfg, content, VPX_CONTENT_DEFAULT, VPX_CONTENT_INVALID - 1);
 
   // TODO(yaowu): remove this when ssim tuning is implemented for vp10
   if (extra_cfg->tuning == VPX_TUNE_SSIM)
-      ERROR("Option --tune=ssim is not currently supported in VP10.");
+    ERROR("Option --tune=ssim is not currently supported in VP10.");
 
   if (cfg->g_pass == VPX_RC_LAST_PASS) {
     const size_t packet_sz = sizeof(FIRSTPASS_STATS);
@@ -254,8 +252,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
       cfg->g_bit_depth > VPX_BITS_8) {
     ERROR("Codec high bit-depth not supported in profile < 2");
   }
-  if (cfg->g_profile <= (unsigned int)PROFILE_1 &&
-      cfg->g_input_bit_depth > 8) {
+  if (cfg->g_profile <= (unsigned int)PROFILE_1 && cfg->g_input_bit_depth > 8) {
     ERROR("Source high bit-depth not supported in profile < 2");
   }
   if (cfg->g_profile > (unsigned int)PROFILE_1 &&
@@ -272,14 +269,14 @@ static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
   switch (img->fmt) {
     case VPX_IMG_FMT_YV12:
     case VPX_IMG_FMT_I420:
-    case VPX_IMG_FMT_I42016:
-      break;
+    case VPX_IMG_FMT_I42016: break;
     case VPX_IMG_FMT_I422:
     case VPX_IMG_FMT_I444:
     case VPX_IMG_FMT_I440:
       if (ctx->cfg.g_profile != (unsigned int)PROFILE_1) {
-        ERROR("Invalid image format. I422, I444, I440 images are "
-              "not supported in profile.");
+        ERROR(
+            "Invalid image format. I422, I444, I440 images are "
+            "not supported in profile.");
       }
       break;
     case VPX_IMG_FMT_I42216:
@@ -287,13 +284,15 @@ static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
     case VPX_IMG_FMT_I44016:
       if (ctx->cfg.g_profile != (unsigned int)PROFILE_1 &&
           ctx->cfg.g_profile != (unsigned int)PROFILE_3) {
-        ERROR("Invalid image format. 16-bit I422, I444, I440 images are "
-              "not supported in profile.");
+        ERROR(
+            "Invalid image format. 16-bit I422, I444, I440 images are "
+            "not supported in profile.");
       }
       break;
     default:
-      ERROR("Invalid image format. Only YV12, I420, I422, I444 images are "
-            "supported.");
+      ERROR(
+          "Invalid image format. Only YV12, I420, I422, I444 images are "
+          "supported.");
       break;
   }
 
@@ -320,37 +319,29 @@ static int get_image_bps(const vpx_image_t *img) {
 }
 
 static vpx_codec_err_t set_encoder_config(
-  VP10EncoderConfig *oxcf,
-  const vpx_codec_enc_cfg_t *cfg,
-  const struct vp10_extracfg *extra_cfg) {
+    VP10EncoderConfig *oxcf, const vpx_codec_enc_cfg_t *cfg,
+    const struct vp10_extracfg *extra_cfg) {
   const int is_vbr = cfg->rc_end_usage == VPX_VBR;
   oxcf->profile = cfg->g_profile;
   oxcf->max_threads = (int)cfg->g_threads;
-  oxcf->width   = cfg->g_w;
-  oxcf->height  = cfg->g_h;
+  oxcf->width = cfg->g_w;
+  oxcf->height = cfg->g_h;
   oxcf->bit_depth = cfg->g_bit_depth;
   oxcf->input_bit_depth = cfg->g_input_bit_depth;
   // guess a frame rate if out of whack, use 30
   oxcf->init_framerate = (double)cfg->g_timebase.den / cfg->g_timebase.num;
-  if (oxcf->init_framerate > 180)
-    oxcf->init_framerate = 30;
+  if (oxcf->init_framerate > 180) oxcf->init_framerate = 30;
 
   oxcf->mode = GOOD;
 
   switch (cfg->g_pass) {
-    case VPX_RC_ONE_PASS:
-      oxcf->pass = 0;
-      break;
-    case VPX_RC_FIRST_PASS:
-      oxcf->pass = 1;
-      break;
-    case VPX_RC_LAST_PASS:
-      oxcf->pass = 2;
-      break;
+    case VPX_RC_ONE_PASS: oxcf->pass = 0; break;
+    case VPX_RC_FIRST_PASS: oxcf->pass = 1; break;
+    case VPX_RC_LAST_PASS: oxcf->pass = 2; break;
   }
 
-  oxcf->lag_in_frames = cfg->g_pass == VPX_RC_FIRST_PASS ? 0
-                                                         : cfg->g_lag_in_frames;
+  oxcf->lag_in_frames =
+      cfg->g_pass == VPX_RC_FIRST_PASS ? 0 : cfg->g_lag_in_frames;
   oxcf->rc_mode = cfg->rc_end_usage;
 
   // Convert target bandwidth from Kbit/s to Bit/s
@@ -363,55 +354,56 @@ static vpx_codec_err_t set_encoder_config(
       extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_min_quantizer);
   oxcf->worst_allowed_q =
       extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_max_quantizer);
-  oxcf->cq_level        = vp10_quantizer_to_qindex(extra_cfg->cq_level);
+  oxcf->cq_level = vp10_quantizer_to_qindex(extra_cfg->cq_level);
   oxcf->fixed_q = -1;
 
-  oxcf->under_shoot_pct         = cfg->rc_undershoot_pct;
-  oxcf->over_shoot_pct          = cfg->rc_overshoot_pct;
+  oxcf->under_shoot_pct = cfg->rc_undershoot_pct;
+  oxcf->over_shoot_pct = cfg->rc_overshoot_pct;
 
-  oxcf->scaled_frame_width  = cfg->rc_scaled_width;
+  oxcf->scaled_frame_width = cfg->rc_scaled_width;
   oxcf->scaled_frame_height = cfg->rc_scaled_height;
   if (cfg->rc_resize_allowed == 1) {
     oxcf->resize_mode =
-        (oxcf->scaled_frame_width == 0 || oxcf->scaled_frame_height == 0) ?
-            RESIZE_DYNAMIC : RESIZE_FIXED;
+        (oxcf->scaled_frame_width == 0 || oxcf->scaled_frame_height == 0)
+            ? RESIZE_DYNAMIC
+            : RESIZE_FIXED;
   } else {
     oxcf->resize_mode = RESIZE_NONE;
   }
 
-  oxcf->maximum_buffer_size_ms   = is_vbr ? 240000 : cfg->rc_buf_sz;
+  oxcf->maximum_buffer_size_ms = is_vbr ? 240000 : cfg->rc_buf_sz;
   oxcf->starting_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_initial_sz;
-  oxcf->optimal_buffer_level_ms  = is_vbr ? 60000 : cfg->rc_buf_optimal_sz;
+  oxcf->optimal_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_optimal_sz;
 
-  oxcf->drop_frames_water_mark   = cfg->rc_dropframe_thresh;
+  oxcf->drop_frames_water_mark = cfg->rc_dropframe_thresh;
 
-  oxcf->two_pass_vbrbias         = cfg->rc_2pass_vbr_bias_pct;
-  oxcf->two_pass_vbrmin_section  = cfg->rc_2pass_vbr_minsection_pct;
-  oxcf->two_pass_vbrmax_section  = cfg->rc_2pass_vbr_maxsection_pct;
+  oxcf->two_pass_vbrbias = cfg->rc_2pass_vbr_bias_pct;
+  oxcf->two_pass_vbrmin_section = cfg->rc_2pass_vbr_minsection_pct;
+  oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct;
 
-  oxcf->auto_key               = cfg->kf_mode == VPX_KF_AUTO &&
-                                 cfg->kf_min_dist != cfg->kf_max_dist;
+  oxcf->auto_key =
+      cfg->kf_mode == VPX_KF_AUTO && cfg->kf_min_dist != cfg->kf_max_dist;
 
-  oxcf->key_freq               = cfg->kf_max_dist;
+  oxcf->key_freq = cfg->kf_max_dist;
 
-  oxcf->speed                  =  abs(extra_cfg->cpu_used);
-  oxcf->encode_breakout        =  extra_cfg->static_thresh;
-  oxcf->enable_auto_arf        =  extra_cfg->enable_auto_alt_ref;
-  oxcf->noise_sensitivity      =  extra_cfg->noise_sensitivity;
-  oxcf->sharpness              =  extra_cfg->sharpness;
+  oxcf->speed = abs(extra_cfg->cpu_used);
+  oxcf->encode_breakout = extra_cfg->static_thresh;
+  oxcf->enable_auto_arf = extra_cfg->enable_auto_alt_ref;
+  oxcf->noise_sensitivity = extra_cfg->noise_sensitivity;
+  oxcf->sharpness = extra_cfg->sharpness;
 
-  oxcf->two_pass_stats_in      =  cfg->rc_twopass_stats_in;
+  oxcf->two_pass_stats_in = cfg->rc_twopass_stats_in;
 
 #if CONFIG_FP_MB_STATS
-  oxcf->firstpass_mb_stats_in  = cfg->rc_firstpass_mb_stats_in;
+  oxcf->firstpass_mb_stats_in = cfg->rc_firstpass_mb_stats_in;
 #endif
 
   oxcf->color_space = extra_cfg->color_space;
   oxcf->color_range = extra_cfg->color_range;
-  oxcf->render_width  = extra_cfg->render_width;
+  oxcf->render_width = extra_cfg->render_width;
   oxcf->render_height = extra_cfg->render_height;
   oxcf->arnr_max_frames = extra_cfg->arnr_max_frames;
-  oxcf->arnr_strength   = extra_cfg->arnr_strength;
+  oxcf->arnr_strength = extra_cfg->arnr_strength;
   oxcf->min_gf_interval = extra_cfg->min_gf_interval;
   oxcf->max_gf_interval = extra_cfg->max_gf_interval;
 
@@ -419,14 +411,14 @@ static vpx_codec_err_t set_encoder_config(
   oxcf->content = extra_cfg->content;
 
   oxcf->tile_columns = extra_cfg->tile_columns;
-  oxcf->tile_rows    = extra_cfg->tile_rows;
+  oxcf->tile_rows = extra_cfg->tile_rows;
 
-  oxcf->error_resilient_mode         = cfg->g_error_resilient;
+  oxcf->error_resilient_mode = cfg->g_error_resilient;
   oxcf->frame_parallel_decoding_mode = extra_cfg->frame_parallel_decoding_mode;
 
   oxcf->aq_mode = extra_cfg->aq_mode;
 
-  oxcf->frame_periodic_boost =  extra_cfg->frame_periodic_boost;
+  oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
 
   /*
   printf("Current VP10 Settings: \n");
@@ -464,7 +456,7 @@ static vpx_codec_err_t set_encoder_config(
 }
 
 static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
-                                          const vpx_codec_enc_cfg_t  *cfg) {
+                                          const vpx_codec_enc_cfg_t *cfg) {
   vpx_codec_err_t res;
   int force_key = 0;
 
@@ -494,8 +486,7 @@ static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
     vp10_change_config(ctx->cpi, &ctx->oxcf);
   }
 
-  if (force_key)
-    ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
+  if (force_key) ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
 
   return res;
 }
@@ -503,8 +494,7 @@ static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
 static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx,
                                           va_list args) {
   int *const arg = va_arg(args, int *);
-  if (arg == NULL)
-    return VPX_CODEC_INVALID_PARAM;
+  if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
   *arg = vp10_get_quantizer(ctx->cpi);
   return VPX_CODEC_OK;
 }
@@ -512,8 +502,7 @@ static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx,
 static vpx_codec_err_t ctrl_get_quantizer64(vpx_codec_alg_priv_t *ctx,
                                             va_list args) {
   int *const arg = va_arg(args, int *);
-  if (arg == NULL)
-    return VPX_CODEC_INVALID_PARAM;
+  if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
   *arg = vp10_qindex_to_quantizer(vp10_get_quantizer(ctx->cpi));
   return VPX_CODEC_OK;
 }
@@ -629,11 +618,10 @@ static vpx_codec_err_t ctrl_set_rc_max_inter_bitrate_pct(
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
-static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(
-    vpx_codec_alg_priv_t *ctx, va_list args) {
+static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(vpx_codec_alg_priv_t *ctx,
+                                                    va_list args) {
   struct vp10_extracfg extra_cfg = ctx->extra_cfg;
-  extra_cfg.gf_cbr_boost_pct =
-      CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
+  extra_cfg.gf_cbr_boost_pct = CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
@@ -687,16 +675,13 @@ static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx,
 
   if (ctx->priv == NULL) {
     vpx_codec_alg_priv_t *const priv = vpx_calloc(1, sizeof(*priv));
-    if (priv == NULL)
-      return VPX_CODEC_MEM_ERROR;
+    if (priv == NULL) return VPX_CODEC_MEM_ERROR;
 
     ctx->priv = (vpx_codec_priv_t *)priv;
     ctx->priv->init_flags = ctx->init_flags;
     ctx->priv->enc.total_encoders = 1;
-    priv->buffer_pool =
-        (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
-    if (priv->buffer_pool == NULL)
-      return VPX_CODEC_MEM_ERROR;
+    priv->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
+    if (priv->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
 
 #if CONFIG_MULTITHREAD
     if (pthread_mutex_init(&priv->buffer_pool->pool_mutex, NULL)) {
@@ -755,7 +740,8 @@ static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
 
         // Convert duration parameter from stream timebase to microseconds.
         const uint64_t duration_us = (uint64_t)duration * 1000000 *
-           (uint64_t)cfg->g_timebase.num /(uint64_t)cfg->g_timebase.den;
+                                     (uint64_t)cfg->g_timebase.num /
+                                     (uint64_t)cfg->g_timebase.den;
 
         // If the deadline is more that the duration this frame is to be shown,
         // use good quality mode. Otherwise use realtime mode.
@@ -764,11 +750,8 @@ static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
         new_mode = BEST;
       }
       break;
-    case VPX_RC_FIRST_PASS:
-      break;
-    case VPX_RC_LAST_PASS:
-      new_mode = deadline > 0 ? GOOD : BEST;
-      break;
+    case VPX_RC_FIRST_PASS: break;
+    case VPX_RC_LAST_PASS: new_mode = deadline > 0 ? GOOD : BEST; break;
   }
 
   if (ctx->oxcf.mode != new_mode) {
@@ -795,7 +778,7 @@ static int write_superframe_index(vpx_codec_alg_priv_t *ctx) {
   marker |= ctx->pending_frame_count - 1;
 #if CONFIG_MISC_FIXES
   for (i = 0; i < ctx->pending_frame_count - 1; i++) {
-    const size_t frame_sz = (unsigned int) ctx->pending_frame_sizes[i] - 1;
+    const size_t frame_sz = (unsigned int)ctx->pending_frame_sizes[i] - 1;
     max_frame_sz = frame_sz > max_frame_sz ? frame_sz : max_frame_sz;
   }
 #endif
@@ -803,11 +786,9 @@ static int write_superframe_index(vpx_codec_alg_priv_t *ctx) {
   // Choose the magnitude
   for (mag = 0, mask = 0xff; mag < 4; mag++) {
 #if CONFIG_MISC_FIXES
-    if (max_frame_sz <= mask)
-      break;
+    if (max_frame_sz <= mask) break;
 #else
-    if (ctx->pending_frame_magnitude < mask)
-      break;
+    if (ctx->pending_frame_magnitude < mask) break;
 #endif
     mask <<= 8;
     mask |= 0xff;
@@ -872,16 +853,14 @@ static vpx_codec_frame_flags_t get_frame_pkt_flags(const VP10_COMP *cpi,
                                                    unsigned int lib_flags) {
   vpx_codec_frame_flags_t flags = lib_flags << 16;
 
-  if (lib_flags & FRAMEFLAGS_KEY)
-    flags |= VPX_FRAME_IS_KEY;
+  if (lib_flags & FRAMEFLAGS_KEY) flags |= VPX_FRAME_IS_KEY;
 
-  if (cpi->droppable)
-    flags |= VPX_FRAME_IS_DROPPABLE;
+  if (cpi->droppable) flags |= VPX_FRAME_IS_DROPPABLE;
 
   return flags;
 }
 
-static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t  *ctx,
+static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
                                       const vpx_image_t *img,
                                       vpx_codec_pts_t pts,
                                       unsigned long duration,
@@ -901,12 +880,11 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t  *ctx,
       // instance for its status to determine the compressed data size.
       data_sz = ctx->cfg.g_w * ctx->cfg.g_h * get_image_bps(img) / 8 *
                 (cpi->multi_arf_allowed ? 8 : 2);
-      if (data_sz < 4096)
-        data_sz = 4096;
+      if (data_sz < 4096) data_sz = 4096;
       if (ctx->cx_data == NULL || ctx->cx_data_sz < data_sz) {
         ctx->cx_data_sz = data_sz;
         free(ctx->cx_data);
-        ctx->cx_data = (unsigned char*)malloc(ctx->cx_data_sz);
+        ctx->cx_data = (unsigned char *)malloc(ctx->cx_data_sz);
         if (ctx->cx_data == NULL) {
           return VPX_CODEC_MEM_ERROR;
         }
@@ -919,7 +897,7 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t  *ctx,
 
   // Handle Flags
   if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) ||
-       ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
+      ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
     ctx->base.err_detail = "Conflicting flags.";
     return VPX_CODEC_INVALID_PARAM;
   }
@@ -946,16 +924,15 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t  *ctx,
     unsigned char *cx_data;
 
     // Set up internal flags
-    if (ctx->base.init_flags & VPX_CODEC_USE_PSNR)
-      cpi->b_calculate_psnr = 1;
+    if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1;
 
     if (img != NULL) {
       res = image2yuvconfig(img, &sd);
 
       // Store the original flags in to the frame buffer. Will extract the
       // key frame flag when we actually encode this frame.
-      if (vp10_receive_raw_frame(cpi, flags | ctx->next_frame_flags,
-                                &sd, dst_time_stamp, dst_end_time_stamp)) {
+      if (vp10_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
+                                 dst_time_stamp, dst_end_time_stamp)) {
         res = update_error_state(ctx, &cpi->common.error);
       }
       ctx->next_frame_flags = 0;
@@ -981,16 +958,15 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t  *ctx,
     }
 
     while (cx_data_sz >= ctx->cx_data_sz / 2 &&
-           -1 != vp10_get_compressed_data(cpi, &lib_flags, &size,
-                                         cx_data, &dst_time_stamp,
-                                         &dst_end_time_stamp, !img)) {
+           -1 != vp10_get_compressed_data(cpi, &lib_flags, &size, cx_data,
+                                          &dst_time_stamp, &dst_end_time_stamp,
+                                          !img)) {
       if (size) {
         vpx_codec_cx_pkt_t pkt;
 
         // Pack invisible frames with the next visible frame
         if (!cpi->common.show_frame) {
-          if (ctx->pending_cx_data == 0)
-            ctx->pending_cx_data = cx_data;
+          if (ctx->pending_cx_data == 0) ctx->pending_cx_data = cx_data;
           ctx->pending_cx_data_sz += size;
           ctx->pending_frame_sizes[ctx->pending_frame_count++] = size;
 #if !CONFIG_MISC_FIXES
@@ -1001,14 +977,13 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t  *ctx,
 
           if (ctx->output_cx_pkt_cb.output_cx_pkt) {
             pkt.kind = VPX_CODEC_CX_FRAME_PKT;
-            pkt.data.frame.pts = ticks_to_timebase_units(timebase,
-                                                         dst_time_stamp);
-            pkt.data.frame.duration =
-               (unsigned long)ticks_to_timebase_units(timebase,
-                   dst_end_time_stamp - dst_time_stamp);
+            pkt.data.frame.pts =
+                ticks_to_timebase_units(timebase, dst_time_stamp);
+            pkt.data.frame.duration = (unsigned long)ticks_to_timebase_units(
+                timebase, dst_end_time_stamp - dst_time_stamp);
             pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags);
             pkt.data.frame.buf = ctx->pending_cx_data;
-            pkt.data.frame.sz  = size;
+            pkt.data.frame.sz = size;
             ctx->pending_cx_data = NULL;
             ctx->pending_cx_data_sz = 0;
             ctx->pending_frame_count = 0;
@@ -1024,9 +999,8 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t  *ctx,
         // Add the frame packet to the list of returned packets.
         pkt.kind = VPX_CODEC_CX_FRAME_PKT;
         pkt.data.frame.pts = ticks_to_timebase_units(timebase, dst_time_stamp);
-        pkt.data.frame.duration =
-           (unsigned long)ticks_to_timebase_units(timebase,
-               dst_end_time_stamp - dst_time_stamp);
+        pkt.data.frame.duration = (unsigned long)ticks_to_timebase_units(
+            timebase, dst_end_time_stamp - dst_time_stamp);
         pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags);
 
         if (ctx->pending_cx_data) {
@@ -1039,7 +1013,7 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t  *ctx,
           if (!ctx->output_cx_pkt_cb.output_cx_pkt)
             size += write_superframe_index(ctx);
           pkt.data.frame.buf = ctx->pending_cx_data;
-          pkt.data.frame.sz  = ctx->pending_cx_data_sz;
+          pkt.data.frame.sz = ctx->pending_cx_data_sz;
           ctx->pending_cx_data = NULL;
           ctx->pending_cx_data_sz = 0;
           ctx->pending_frame_count = 0;
@@ -1048,11 +1022,11 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t  *ctx,
 #endif
         } else {
           pkt.data.frame.buf = cx_data;
-          pkt.data.frame.sz  = size;
+          pkt.data.frame.sz = size;
         }
         pkt.data.frame.partition_id = -1;
 
-        if(ctx->output_cx_pkt_cb.output_cx_pkt)
+        if (ctx->output_cx_pkt_cb.output_cx_pkt)
           ctx->output_cx_pkt_cb.output_cx_pkt(&pkt,
                                               ctx->output_cx_pkt_cb.user_priv);
         else
@@ -1080,8 +1054,8 @@ static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx,
     YV12_BUFFER_CONFIG sd;
 
     image2yuvconfig(&frame->img, &sd);
-    vp10_set_reference_enc(ctx->cpi, ref_frame_to_vp10_reframe(frame->frame_type),
-                          &sd);
+    vp10_set_reference_enc(ctx->cpi,
+                           ref_frame_to_vp10_reframe(frame->frame_type), &sd);
     return VPX_CODEC_OK;
   } else {
     return VPX_CODEC_INVALID_PARAM;
@@ -1097,7 +1071,7 @@ static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
 
     image2yuvconfig(&frame->img, &sd);
     vp10_copy_reference_enc(ctx->cpi,
-                           ref_frame_to_vp10_reframe(frame->frame_type), &sd);
+                            ref_frame_to_vp10_reframe(frame->frame_type), &sd);
     return VPX_CODEC_OK;
   } else {
     return VPX_CODEC_INVALID_PARAM;
@@ -1126,7 +1100,6 @@ static vpx_codec_err_t ctrl_set_previewpp(vpx_codec_alg_priv_t *ctx,
   return VPX_CODEC_INCAPABLE;
 }
 
-
 static vpx_image_t *encoder_get_preview(vpx_codec_alg_priv_t *ctx) {
   YV12_BUFFER_CONFIG sd;
 
@@ -1147,14 +1120,13 @@ static vpx_codec_err_t ctrl_set_roi_map(vpx_codec_alg_priv_t *ctx,
   return VPX_CODEC_INVALID_PARAM;
 }
 
-
 static vpx_codec_err_t ctrl_set_active_map(vpx_codec_alg_priv_t *ctx,
                                            va_list args) {
   vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
 
   if (map) {
-    if (!vp10_set_active_map(ctx->cpi, map->active_map,
-                            (int)map->rows, (int)map->cols))
+    if (!vp10_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
+                             (int)map->cols))
       return VPX_CODEC_OK;
     else
       return VPX_CODEC_INVALID_PARAM;
@@ -1168,8 +1140,8 @@ static vpx_codec_err_t ctrl_get_active_map(vpx_codec_alg_priv_t *ctx,
   vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
 
   if (map) {
-    if (!vp10_get_active_map(ctx->cpi, map->active_map,
-                            (int)map->rows, (int)map->cols))
+    if (!vp10_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
+                             (int)map->cols))
       return VPX_CODEC_OK;
     else
       return VPX_CODEC_INVALID_PARAM;
@@ -1183,9 +1155,9 @@ static vpx_codec_err_t ctrl_set_scale_mode(vpx_codec_alg_priv_t *ctx,
   vpx_scaling_mode_t *const mode = va_arg(args, vpx_scaling_mode_t *);
 
   if (mode) {
-    const int res = vp10_set_internal_size(ctx->cpi,
-                                          (VPX_SCALING)mode->h_scaling_mode,
-                                          (VPX_SCALING)mode->v_scaling_mode);
+    const int res =
+        vp10_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode,
+                               (VPX_SCALING)mode->v_scaling_mode);
     return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM;
   } else {
     return VPX_CODEC_INVALID_PARAM;
@@ -1227,121 +1199,120 @@ static vpx_codec_err_t ctrl_set_render_size(vpx_codec_alg_priv_t *ctx,
                                             va_list args) {
   struct vp10_extracfg extra_cfg = ctx->extra_cfg;
   int *const render_size = va_arg(args, int *);
-  extra_cfg.render_width  = render_size[0];
+  extra_cfg.render_width = render_size[0];
   extra_cfg.render_height = render_size[1];
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
-  {VP8_COPY_REFERENCE,                ctrl_copy_reference},
+  { VP8_COPY_REFERENCE, ctrl_copy_reference },
 
   // Setters
-  {VP8_SET_REFERENCE,                 ctrl_set_reference},
-  {VP8_SET_POSTPROC,                  ctrl_set_previewpp},
-  {VP8E_SET_ROI_MAP,                  ctrl_set_roi_map},
-  {VP8E_SET_ACTIVEMAP,                ctrl_set_active_map},
-  {VP8E_SET_SCALEMODE,                ctrl_set_scale_mode},
-  {VP8E_SET_CPUUSED,                  ctrl_set_cpuused},
-  {VP8E_SET_ENABLEAUTOALTREF,         ctrl_set_enable_auto_alt_ref},
-  {VP8E_SET_SHARPNESS,                ctrl_set_sharpness},
-  {VP8E_SET_STATIC_THRESHOLD,         ctrl_set_static_thresh},
-  {VP9E_SET_TILE_COLUMNS,             ctrl_set_tile_columns},
-  {VP9E_SET_TILE_ROWS,                ctrl_set_tile_rows},
-  {VP8E_SET_ARNR_MAXFRAMES,           ctrl_set_arnr_max_frames},
-  {VP8E_SET_ARNR_STRENGTH,            ctrl_set_arnr_strength},
-  {VP8E_SET_ARNR_TYPE,                ctrl_set_arnr_type},
-  {VP8E_SET_TUNING,                   ctrl_set_tuning},
-  {VP8E_SET_CQ_LEVEL,                 ctrl_set_cq_level},
-  {VP8E_SET_MAX_INTRA_BITRATE_PCT,    ctrl_set_rc_max_intra_bitrate_pct},
-  {VP9E_SET_MAX_INTER_BITRATE_PCT,    ctrl_set_rc_max_inter_bitrate_pct},
-  {VP9E_SET_GF_CBR_BOOST_PCT,         ctrl_set_rc_gf_cbr_boost_pct},
-  {VP9E_SET_LOSSLESS,                 ctrl_set_lossless},
-  {VP9E_SET_FRAME_PARALLEL_DECODING,  ctrl_set_frame_parallel_decoding_mode},
-  {VP9E_SET_AQ_MODE,                  ctrl_set_aq_mode},
-  {VP9E_SET_FRAME_PERIODIC_BOOST,     ctrl_set_frame_periodic_boost},
-  {VP9E_REGISTER_CX_CALLBACK,         ctrl_register_cx_callback},
-  {VP9E_SET_TUNE_CONTENT,             ctrl_set_tune_content},
-  {VP9E_SET_COLOR_SPACE,              ctrl_set_color_space},
-  {VP9E_SET_COLOR_RANGE,              ctrl_set_color_range},
-  {VP9E_SET_NOISE_SENSITIVITY,        ctrl_set_noise_sensitivity},
-  {VP9E_SET_MIN_GF_INTERVAL,          ctrl_set_min_gf_interval},
-  {VP9E_SET_MAX_GF_INTERVAL,          ctrl_set_max_gf_interval},
-  {VP9E_SET_RENDER_SIZE,              ctrl_set_render_size},
+  { VP8_SET_REFERENCE, ctrl_set_reference },
+  { VP8_SET_POSTPROC, ctrl_set_previewpp },
+  { VP8E_SET_ROI_MAP, ctrl_set_roi_map },
+  { VP8E_SET_ACTIVEMAP, ctrl_set_active_map },
+  { VP8E_SET_SCALEMODE, ctrl_set_scale_mode },
+  { VP8E_SET_CPUUSED, ctrl_set_cpuused },
+  { VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref },
+  { VP8E_SET_SHARPNESS, ctrl_set_sharpness },
+  { VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh },
+  { VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns },
+  { VP9E_SET_TILE_ROWS, ctrl_set_tile_rows },
+  { VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames },
+  { VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength },
+  { VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type },
+  { VP8E_SET_TUNING, ctrl_set_tuning },
+  { VP8E_SET_CQ_LEVEL, ctrl_set_cq_level },
+  { VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct },
+  { VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct },
+  { VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct },
+  { VP9E_SET_LOSSLESS, ctrl_set_lossless },
+  { VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode },
+  { VP9E_SET_AQ_MODE, ctrl_set_aq_mode },
+  { VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost },
+  { VP9E_REGISTER_CX_CALLBACK, ctrl_register_cx_callback },
+  { VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content },
+  { VP9E_SET_COLOR_SPACE, ctrl_set_color_space },
+  { VP9E_SET_COLOR_RANGE, ctrl_set_color_range },
+  { VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity },
+  { VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval },
+  { VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval },
+  { VP9E_SET_RENDER_SIZE, ctrl_set_render_size },
 
   // Getters
-  {VP8E_GET_LAST_QUANTIZER,           ctrl_get_quantizer},
-  {VP8E_GET_LAST_QUANTIZER_64,        ctrl_get_quantizer64},
-  {VP9_GET_REFERENCE,                 ctrl_get_reference},
-  {VP9E_GET_ACTIVEMAP,                ctrl_get_active_map},
+  { VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer },
+  { VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64 },
+  { VP9_GET_REFERENCE, ctrl_get_reference },
+  { VP9E_GET_ACTIVEMAP, ctrl_get_active_map },
 
-  { -1, NULL},
+  { -1, NULL },
 };
 
 static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
-  {
-    0,
-    {  // NOLINT
-      0,                  // g_usage
-      8,                  // g_threads
-      0,                  // g_profile
-
-      320,                // g_width
-      240,                // g_height
-      VPX_BITS_8,         // g_bit_depth
-      8,                  // g_input_bit_depth
-
-      {1, 30},            // g_timebase
-
-      0,                  // g_error_resilient
-
-      VPX_RC_ONE_PASS,    // g_pass
-
-      25,                 // g_lag_in_frames
-
-      0,                  // rc_dropframe_thresh
-      0,                  // rc_resize_allowed
-      0,                  // rc_scaled_width
-      0,                  // rc_scaled_height
-      60,                 // rc_resize_down_thresold
-      30,                 // rc_resize_up_thresold
-
-      VPX_VBR,            // rc_end_usage
-      {NULL, 0},          // rc_twopass_stats_in
-      {NULL, 0},          // rc_firstpass_mb_stats_in
-      256,                // rc_target_bandwidth
-      0,                  // rc_min_quantizer
-      63,                 // rc_max_quantizer
-      25,                 // rc_undershoot_pct
-      25,                 // rc_overshoot_pct
-
-      6000,               // rc_max_buffer_size
-      4000,               // rc_buffer_initial_size
-      5000,               // rc_buffer_optimal_size
-
-      50,                 // rc_two_pass_vbrbias
-      0,                  // rc_two_pass_vbrmin_section
-      2000,               // rc_two_pass_vbrmax_section
-
-      // keyframing settings (kf)
-      VPX_KF_AUTO,        // g_kfmode
-      0,                  // kf_min_dist
-      9999,               // kf_max_dist
-
-      // TODO(yunqingwang): Spatial/temporal scalability are not supported
-      // in VP10. The following 10 parameters are not used, which should
-      // be removed later.
-      1,                      // ss_number_layers
-      {0},
-      {0},                    // ss_target_bitrate
-      1,                      // ts_number_layers
-      {0},                    // ts_target_bitrate
-      {0},                    // ts_rate_decimator
-      0,                      // ts_periodicity
-      {0},                    // ts_layer_id
-      {0},                  // layer_taget_bitrate
-      0                     // temporal_layering_mode
-    }
-  },
+  { 0,
+    {
+        // NOLINT
+        0,  // g_usage
+        8,  // g_threads
+        0,  // g_profile
+
+        320,         // g_width
+        240,         // g_height
+        VPX_BITS_8,  // g_bit_depth
+        8,           // g_input_bit_depth
+
+        { 1, 30 },  // g_timebase
+
+        0,  // g_error_resilient
+
+        VPX_RC_ONE_PASS,  // g_pass
+
+        25,  // g_lag_in_frames
+
+        0,   // rc_dropframe_thresh
+        0,   // rc_resize_allowed
+        0,   // rc_scaled_width
+        0,   // rc_scaled_height
+        60,  // rc_resize_down_thresold
+        30,  // rc_resize_up_thresold
+
+        VPX_VBR,      // rc_end_usage
+        { NULL, 0 },  // rc_twopass_stats_in
+        { NULL, 0 },  // rc_firstpass_mb_stats_in
+        256,          // rc_target_bandwidth
+        0,            // rc_min_quantizer
+        63,           // rc_max_quantizer
+        25,           // rc_undershoot_pct
+        25,           // rc_overshoot_pct
+
+        6000,  // rc_max_buffer_size
+        4000,  // rc_buffer_initial_size
+        5000,  // rc_buffer_optimal_size
+
+        50,    // rc_two_pass_vbrbias
+        0,     // rc_two_pass_vbrmin_section
+        2000,  // rc_two_pass_vbrmax_section
+
+        // keyframing settings (kf)
+        VPX_KF_AUTO,  // g_kfmode
+        0,            // kf_min_dist
+        9999,         // kf_max_dist
+
+        // TODO(yunqingwang): Spatial/temporal scalability are not supported
+        // in VP10. The following 10 parameters are not used, which should
+        // be removed later.
+        1,  // ss_number_layers
+        { 0 },
+        { 0 },  // ss_target_bitrate
+        1,      // ts_number_layers
+        { 0 },  // ts_target_bitrate
+        { 0 },  // ts_rate_decimator
+        0,      // ts_periodicity
+        { 0 },  // ts_layer_id
+        { 0 },  // layer_taget_bitrate
+        0       // temporal_layering_mode
+    } },
 };
 
 #ifndef VERSION_STRING
@@ -1353,25 +1324,27 @@ CODEC_INTERFACE(vpx_codec_vp10_cx) = {
 #if CONFIG_VPX_HIGHBITDEPTH
   VPX_CODEC_CAP_HIGHBITDEPTH |
 #endif
-  VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR,  // vpx_codec_caps_t
-  encoder_init,       // vpx_codec_init_fn_t
-  encoder_destroy,    // vpx_codec_destroy_fn_t
-  encoder_ctrl_maps,  // vpx_codec_ctrl_fn_map_t
-  {  // NOLINT
-    NULL,  // vpx_codec_peek_si_fn_t
-    NULL,  // vpx_codec_get_si_fn_t
-    NULL,  // vpx_codec_decode_fn_t
-    NULL,  // vpx_codec_frame_get_fn_t
-    NULL   // vpx_codec_set_fb_fn_t
+      VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR,  // vpx_codec_caps_t
+  encoder_init,                                    // vpx_codec_init_fn_t
+  encoder_destroy,                                 // vpx_codec_destroy_fn_t
+  encoder_ctrl_maps,                               // vpx_codec_ctrl_fn_map_t
+  {
+      // NOLINT
+      NULL,  // vpx_codec_peek_si_fn_t
+      NULL,  // vpx_codec_get_si_fn_t
+      NULL,  // vpx_codec_decode_fn_t
+      NULL,  // vpx_codec_frame_get_fn_t
+      NULL   // vpx_codec_set_fb_fn_t
   },
-  {  // NOLINT
-    1,                      // 1 cfg map
-    encoder_usage_cfg_map,  // vpx_codec_enc_cfg_map_t
-    encoder_encode,         // vpx_codec_encode_fn_t
-    encoder_get_cxdata,     // vpx_codec_get_cx_data_fn_t
-    encoder_set_config,     // vpx_codec_enc_config_set_fn_t
-    NULL,        // vpx_codec_get_global_headers_fn_t
-    encoder_get_preview,    // vpx_codec_get_preview_frame_fn_t
-    NULL         // vpx_codec_enc_mr_get_mem_loc_fn_t
+  {
+      // NOLINT
+      1,                      // 1 cfg map
+      encoder_usage_cfg_map,  // vpx_codec_enc_cfg_map_t
+      encoder_encode,         // vpx_codec_encode_fn_t
+      encoder_get_cxdata,     // vpx_codec_get_cx_data_fn_t
+      encoder_set_config,     // vpx_codec_enc_config_set_fn_t
+      NULL,                   // vpx_codec_get_global_headers_fn_t
+      encoder_get_preview,    // vpx_codec_get_preview_frame_fn_t
+      NULL                    // vpx_codec_enc_mr_get_mem_loc_fn_t
   }
 };
diff --git a/vp10/vp10_dx_iface.c b/vp10/vp10_dx_iface.c
index 444c282a8b9a3b58ad8c6bdd7770a238603ff67a..23ba863b12a03d27fa4788167681add4f9d24a5d 100644
--- a/vp10/vp10_dx_iface.c
+++ b/vp10/vp10_dx_iface.c
@@ -33,7 +33,7 @@ typedef vpx_codec_stream_info_t vp10_stream_info_t;
 
 // This limit is due to framebuffer numbers.
 // TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
-#define FRAME_CACHE_SIZE 6   // Cache maximum 6 decoded frames.
+#define FRAME_CACHE_SIZE 6  // Cache maximum 6 decoded frames.
 
 typedef struct cache_frame {
   int fb_idx;
@@ -41,36 +41,36 @@ typedef struct cache_frame {
 } cache_frame;
 
 struct vpx_codec_alg_priv {
-  vpx_codec_priv_t        base;
-  vpx_codec_dec_cfg_t     cfg;
-  vp10_stream_info_t       si;
-  int                     postproc_cfg_set;
-  vp8_postproc_cfg_t      postproc_cfg;
-  vpx_decrypt_cb          decrypt_cb;
-  void                    *decrypt_state;
-  vpx_image_t             img;
-  int                     img_avail;
-  int                     flushed;
-  int                     invert_tile_order;
-  int                     last_show_frame;  // Index of last output frame.
-  int                     byte_alignment;
-  int                     skip_loop_filter;
+  vpx_codec_priv_t base;
+  vpx_codec_dec_cfg_t cfg;
+  vp10_stream_info_t si;
+  int postproc_cfg_set;
+  vp8_postproc_cfg_t postproc_cfg;
+  vpx_decrypt_cb decrypt_cb;
+  void *decrypt_state;
+  vpx_image_t img;
+  int img_avail;
+  int flushed;
+  int invert_tile_order;
+  int last_show_frame;  // Index of last output frame.
+  int byte_alignment;
+  int skip_loop_filter;
 
   // Frame parallel related.
-  int                     frame_parallel_decode;  // frame-based threading.
-  VPxWorker               *frame_workers;
-  int                     num_frame_workers;
-  int                     next_submit_worker_id;
-  int                     last_submit_worker_id;
-  int                     next_output_worker_id;
-  int                     available_threads;
-  cache_frame             frame_cache[FRAME_CACHE_SIZE];
-  int                     frame_cache_write;
-  int                     frame_cache_read;
-  int                     num_cache_frames;
-  int                     need_resync;      // wait for key/intra-only frame
+  int frame_parallel_decode;  // frame-based threading.
+  VPxWorker *frame_workers;
+  int num_frame_workers;
+  int next_submit_worker_id;
+  int last_submit_worker_id;
+  int next_output_worker_id;
+  int available_threads;
+  cache_frame frame_cache[FRAME_CACHE_SIZE];
+  int frame_cache_write;
+  int frame_cache_read;
+  int num_cache_frames;
+  int need_resync;  // wait for key/intra-only frame
   // BufferPool that holds all reference frames. Shared by all the FrameWorkers.
-  BufferPool              *buffer_pool;
+  BufferPool *buffer_pool;
 
   // External frame buffer info to save for VP10 common.
   void *ext_priv;  // Private data associated with the external frame buffers.
@@ -88,8 +88,7 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
   if (!ctx->priv) {
     vpx_codec_alg_priv_t *const priv =
         (vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv));
-    if (priv == NULL)
-      return VPX_CODEC_MEM_ERROR;
+    if (priv == NULL) return VPX_CODEC_MEM_ERROR;
 
     ctx->priv = (vpx_codec_priv_t *)priv;
     ctx->priv->init_flags = ctx->init_flags;
@@ -98,7 +97,9 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
     // Only do frame parallel decode when threads > 1.
     priv->frame_parallel_decode =
         (ctx->config.dec && (ctx->config.dec->threads > 1) &&
-         (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) ? 1 : 0;
+         (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING))
+            ? 1
+            : 0;
     if (ctx->config.dec) {
       priv->cfg = *ctx->config.dec;
       ctx->config.dec = &priv->cfg;
@@ -141,11 +142,10 @@ static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) {
   return VPX_CODEC_OK;
 }
 
-static int parse_bitdepth_colorspace_sampling(
-    BITSTREAM_PROFILE profile, struct vpx_read_bit_buffer *rb) {
+static int parse_bitdepth_colorspace_sampling(BITSTREAM_PROFILE profile,
+                                              struct vpx_read_bit_buffer *rb) {
   vpx_color_space_t color_space;
-  if (profile >= PROFILE_2)
-    rb->bit_offset += 1;  // Bit-depth 10 or 12.
+  if (profile >= PROFILE_2) rb->bit_offset += 1;  // Bit-depth 10 or 12.
   color_space = (vpx_color_space_t)vpx_rb_read_literal(rb, 3);
   if (color_space != VPX_CS_SRGB) {
     rb->bit_offset += 1;  // [16,235] (including xvycc) vs [0,255] range.
@@ -164,17 +164,13 @@ static int parse_bitdepth_colorspace_sampling(
   return 1;
 }
 
-static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
-                                                unsigned int data_sz,
-                                                vpx_codec_stream_info_t *si,
-                                                int *is_intra_only,
-                                                vpx_decrypt_cb decrypt_cb,
-                                                void *decrypt_state) {
+static vpx_codec_err_t decoder_peek_si_internal(
+    const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si,
+    int *is_intra_only, vpx_decrypt_cb decrypt_cb, void *decrypt_state) {
   int intra_only_flag = 0;
   uint8_t clear_buffer[9];
 
-  if (data + data_sz <= data)
-    return VPX_CODEC_INVALID_PARAM;
+  if (data + data_sz <= data) return VPX_CODEC_INVALID_PARAM;
 
   si->is_kf = 0;
   si->w = si->h = 0;
@@ -192,30 +188,26 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
     const int frame_marker = vpx_rb_read_literal(&rb, 2);
     const BITSTREAM_PROFILE profile = vp10_read_profile(&rb);
 
-    if (frame_marker != VPX_FRAME_MARKER)
-      return VPX_CODEC_UNSUP_BITSTREAM;
+    if (frame_marker != VPX_FRAME_MARKER) return VPX_CODEC_UNSUP_BITSTREAM;
 
-    if (profile >= MAX_PROFILES)
-      return VPX_CODEC_UNSUP_BITSTREAM;
+    if (profile >= MAX_PROFILES) return VPX_CODEC_UNSUP_BITSTREAM;
 
     if ((profile >= 2 && data_sz <= 1) || data_sz < 1)
       return VPX_CODEC_UNSUP_BITSTREAM;
 
-    if (vpx_rb_read_bit(&rb)) {  // show an existing frame
+    if (vpx_rb_read_bit(&rb)) {     // show an existing frame
       vpx_rb_read_literal(&rb, 3);  // Frame buffer to show.
       return VPX_CODEC_OK;
     }
 
-    if (data_sz <= 8)
-      return VPX_CODEC_UNSUP_BITSTREAM;
+    if (data_sz <= 8) return VPX_CODEC_UNSUP_BITSTREAM;
 
     si->is_kf = !vpx_rb_read_bit(&rb);
     show_frame = vpx_rb_read_bit(&rb);
     error_resilient = vpx_rb_read_bit(&rb);
 
     if (si->is_kf) {
-      if (!vp10_read_sync_code(&rb))
-        return VPX_CODEC_UNSUP_BITSTREAM;
+      if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
 
       if (!parse_bitdepth_colorspace_sampling(profile, &rb))
         return VPX_CODEC_UNSUP_BITSTREAM;
@@ -226,8 +218,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
       rb.bit_offset += error_resilient ? 0 : 2;  // reset_frame_context
 
       if (intra_only_flag) {
-        if (!vp10_read_sync_code(&rb))
-          return VPX_CODEC_UNSUP_BITSTREAM;
+        if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
         if (profile > PROFILE_0) {
           if (!parse_bitdepth_colorspace_sampling(profile, &rb))
             return VPX_CODEC_UNSUP_BITSTREAM;
@@ -237,8 +228,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
       }
     }
   }
-  if (is_intra_only != NULL)
-    *is_intra_only = intra_only_flag;
+  if (is_intra_only != NULL) *is_intra_only = intra_only_flag;
   return VPX_CODEC_OK;
 }
 
@@ -251,8 +241,8 @@ static vpx_codec_err_t decoder_peek_si(const uint8_t *data,
 static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx,
                                       vpx_codec_stream_info_t *si) {
   const size_t sz = (si->sz >= sizeof(vp10_stream_info_t))
-                       ? sizeof(vp10_stream_info_t)
-                       : sizeof(vpx_codec_stream_info_t);
+                        ? sizeof(vp10_stream_info_t)
+                        : sizeof(vpx_codec_stream_info_t);
   memcpy(si, &ctx->si, sz);
   si->sz = (unsigned int)sz;
 
@@ -264,8 +254,8 @@ static void set_error_detail(vpx_codec_alg_priv_t *ctx,
   ctx->base.err_detail = error;
 }
 
-static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
-                           const struct vpx_internal_error_info *error) {
+static vpx_codec_err_t update_error_state(
+    vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
   if (error->error_code)
     set_error_detail(ctx, error->has_detail ? error->detail : NULL);
 
@@ -313,10 +303,8 @@ static int frame_worker_hook(void *arg1, void *arg2) {
   const uint8_t *data = frame_worker_data->data;
   (void)arg2;
 
-  frame_worker_data->result =
-      vp10_receive_compressed_data(frame_worker_data->pbi,
-                                  frame_worker_data->data_size,
-                                  &data);
+  frame_worker_data->result = vp10_receive_compressed_data(
+      frame_worker_data->pbi, frame_worker_data->data_size, &data);
   frame_worker_data->data_end = data;
 
   if (frame_worker_data->pbi->common.frame_parallel_decode) {
@@ -358,25 +346,24 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
   ctx->num_cache_frames = 0;
   ctx->need_resync = 1;
   ctx->num_frame_workers =
-      (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1;
+      (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads : 1;
   if (ctx->num_frame_workers > MAX_DECODE_THREADS)
     ctx->num_frame_workers = MAX_DECODE_THREADS;
   ctx->available_threads = ctx->num_frame_workers;
   ctx->flushed = 0;
 
   ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
-  if (ctx->buffer_pool == NULL)
-    return VPX_CODEC_MEM_ERROR;
+  if (ctx->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
 
 #if CONFIG_MULTITHREAD
-    if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
-      set_error_detail(ctx, "Failed to allocate buffer pool mutex");
-      return VPX_CODEC_MEM_ERROR;
-    }
+  if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
+    set_error_detail(ctx, "Failed to allocate buffer pool mutex");
+    return VPX_CODEC_MEM_ERROR;
+  }
 #endif
 
-  ctx->frame_workers = (VPxWorker *)
-      vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers));
+  ctx->frame_workers = (VPxWorker *)vpx_malloc(ctx->num_frame_workers *
+                                               sizeof(*ctx->frame_workers));
   if (ctx->frame_workers == NULL) {
     set_error_detail(ctx, "Failed to allocate frame_workers");
     return VPX_CODEC_MEM_ERROR;
@@ -431,8 +418,7 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
 
   // If postprocessing was enabled by the application and a
   // configuration has not been provided, default it.
-  if (!ctx->postproc_cfg_set &&
-      (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
+  if (!ctx->postproc_cfg_set && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
     set_default_ppflags(&ctx->postproc_cfg);
 
   init_buffer_callbacks(ctx);
@@ -462,11 +448,9 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
     const vpx_codec_err_t res =
         decoder_peek_si_internal(*data, data_sz, &ctx->si, &is_intra_only,
                                  ctx->decrypt_cb, ctx->decrypt_state);
-    if (res != VPX_CODEC_OK)
-      return res;
+    if (res != VPX_CODEC_OK) return res;
 
-    if (!ctx->si.is_kf && !is_intra_only)
-      return VPX_CODEC_ERROR;
+    if (!ctx->si.is_kf && !is_intra_only) return VPX_CODEC_ERROR;
   }
 
   if (!ctx->frame_parallel_decode) {
@@ -560,8 +544,7 @@ static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
                     frame_worker_data->user_priv);
     ctx->frame_cache[ctx->frame_cache_write].img.fb_priv =
         frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
-    ctx->frame_cache_write =
-        (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
+    ctx->frame_cache_write = (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
     ++ctx->num_cache_frames;
   }
 }
@@ -570,7 +553,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
                                       const uint8_t *data, unsigned int data_sz,
                                       void *user_priv, long deadline) {
   const uint8_t *data_start = data;
-  const uint8_t * const data_end = data + data_sz;
+  const uint8_t *const data_end = data + data_sz;
   vpx_codec_err_t res;
   uint32_t frame_sizes[8];
   int frame_count;
@@ -586,14 +569,12 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
   // Initialize the decoder workers on the first frame.
   if (ctx->frame_workers == NULL) {
     const vpx_codec_err_t res = init_decoder(ctx);
-    if (res != VPX_CODEC_OK)
-      return res;
+    if (res != VPX_CODEC_OK) return res;
   }
 
   res = vp10_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
-                                   ctx->decrypt_cb, ctx->decrypt_state);
-  if (res != VPX_CODEC_OK)
-    return res;
+                                    ctx->decrypt_cb, ctx->decrypt_state);
+  if (res != VPX_CODEC_OK) return res;
 
   if (ctx->frame_parallel_decode) {
     // Decode in frame parallel mode. When decoding in this mode, the frame
@@ -606,8 +587,8 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
       for (i = 0; i < frame_count; ++i) {
         const uint8_t *data_start_copy = data_start;
         const uint32_t frame_size = frame_sizes[i];
-        if (data_start < data
-            || frame_size > (uint32_t) (data_end - data_start)) {
+        if (data_start < data ||
+            frame_size > (uint32_t)(data_end - data_start)) {
           set_error_detail(ctx, "Invalid frame size in index");
           return VPX_CODEC_CORRUPT_FRAME;
         }
@@ -624,10 +605,9 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
           }
         }
 
-        res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
-                         deadline);
-        if (res != VPX_CODEC_OK)
-          return res;
+        res =
+            decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
+        if (res != VPX_CODEC_OK) return res;
         data_start += frame_size;
       }
     } else {
@@ -644,8 +624,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
       }
 
       res = decode_one(ctx, &data, data_sz, user_priv, deadline);
-      if (res != VPX_CODEC_OK)
-        return res;
+      if (res != VPX_CODEC_OK) return res;
     }
   } else {
     // Decode in serial mode.
@@ -656,33 +635,30 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
         const uint8_t *data_start_copy = data_start;
         const uint32_t frame_size = frame_sizes[i];
         vpx_codec_err_t res;
-        if (data_start < data
-            || frame_size > (uint32_t) (data_end - data_start)) {
+        if (data_start < data ||
+            frame_size > (uint32_t)(data_end - data_start)) {
           set_error_detail(ctx, "Invalid frame size in index");
           return VPX_CODEC_CORRUPT_FRAME;
         }
 
-        res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
-                         deadline);
-        if (res != VPX_CODEC_OK)
-          return res;
+        res =
+            decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
+        if (res != VPX_CODEC_OK) return res;
 
         data_start += frame_size;
       }
     } else {
       while (data_start < data_end) {
-        const uint32_t frame_size = (uint32_t) (data_end - data_start);
-        const vpx_codec_err_t res = decode_one(ctx, &data_start, frame_size,
-                                               user_priv, deadline);
-        if (res != VPX_CODEC_OK)
-          return res;
+        const uint32_t frame_size = (uint32_t)(data_end - data_start);
+        const vpx_codec_err_t res =
+            decode_one(ctx, &data_start, frame_size, user_priv, deadline);
+        if (res != VPX_CODEC_OK) return res;
 
         // Account for suboptimal termination by the encoder.
         while (data_start < data_end) {
-          const uint8_t marker = read_marker(ctx->decrypt_cb,
-                                             ctx->decrypt_state, data_start);
-          if (marker)
-            break;
+          const uint8_t marker =
+              read_marker(ctx->decrypt_cb, ctx->decrypt_state, data_start);
+          if (marker) break;
           ++data_start;
         }
       }
@@ -717,9 +693,8 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
   // Output the frames in the cache first.
   if (ctx->num_cache_frames > 0) {
     release_last_output_frame(ctx);
-    ctx->last_show_frame  = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
-    if (ctx->need_resync)
-      return NULL;
+    ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
+    if (ctx->need_resync) return NULL;
     img = &ctx->frame_cache[ctx->frame_cache_read].img;
     ctx->frame_cache_read = (ctx->frame_cache_read + 1) % FRAME_CACHE_SIZE;
     --ctx->num_cache_frames;
@@ -732,8 +707,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
     do {
       YV12_BUFFER_CONFIG sd;
       const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
-      VPxWorker *const worker =
-          &ctx->frame_workers[ctx->next_output_worker_id];
+      VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
       ctx->next_output_worker_id =
@@ -751,8 +725,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
           RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
           release_last_output_frame(ctx);
           ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
-          if (ctx->need_resync)
-            return NULL;
+          if (ctx->need_resync) return NULL;
           yuvconfig2image(&ctx->img, &sd, frame_worker_data->user_priv);
           ctx->img.fb_priv = frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
           img = &ctx->img;
@@ -763,8 +736,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
         frame_worker_data->received_frame = 0;
         ++ctx->available_threads;
         ctx->need_resync = 1;
-        if (ctx->flushed != 1)
-          return NULL;
+        if (ctx->flushed != 1) return NULL;
       }
     } while (ctx->next_output_worker_id != ctx->next_submit_worker_id);
   }
@@ -772,8 +744,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
 }
 
 static vpx_codec_err_t decoder_set_fb_fn(
-    vpx_codec_alg_priv_t *ctx,
-    vpx_get_frame_buffer_cb_fn_t cb_get,
+    vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
     vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
   if (cb_get == NULL || cb_release == NULL) {
     return VPX_CODEC_INVALID_PARAM;
@@ -806,7 +777,7 @@ static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx,
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     image2yuvconfig(&frame->img, &sd);
     return vp10_set_reference_dec(&frame_worker_data->pbi->common,
-                                 (VPX_REFFRAME)frame->frame_type, &sd);
+                                  (VPX_REFFRAME)frame->frame_type, &sd);
   } else {
     return VPX_CODEC_INVALID_PARAM;
   }
@@ -823,13 +794,13 @@ static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
   }
 
   if (data) {
-    vpx_ref_frame_t *frame = (vpx_ref_frame_t *) data;
+    vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
     YV12_BUFFER_CONFIG sd;
     VPxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     image2yuvconfig(&frame->img, &sd);
     return vp10_copy_reference_dec(frame_worker_data->pbi,
-                                  (VPX_REFFRAME)frame->frame_type, &sd);
+                                   (VPX_REFFRAME)frame->frame_type, &sd);
   } else {
     return VPX_CODEC_INVALID_PARAM;
   }
@@ -846,7 +817,7 @@ static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
   }
 
   if (data) {
-    YV12_BUFFER_CONFIG* fb;
+    YV12_BUFFER_CONFIG *fb;
     VPxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx);
@@ -1025,8 +996,7 @@ static vpx_codec_err_t ctrl_set_byte_alignment(vpx_codec_alg_priv_t *ctx,
   ctx->byte_alignment = byte_alignment;
   if (ctx->frame_workers) {
     VPxWorker *const worker = ctx->frame_workers;
-    FrameWorkerData *const frame_worker_data =
-        (FrameWorkerData *)worker->data1;
+    FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     frame_worker_data->pbi->common.byte_alignment = byte_alignment;
   }
   return VPX_CODEC_OK;
@@ -1046,29 +1016,29 @@ static vpx_codec_err_t ctrl_set_skip_loop_filter(vpx_codec_alg_priv_t *ctx,
 }
 
 static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
-  {VP8_COPY_REFERENCE,            ctrl_copy_reference},
+  { VP8_COPY_REFERENCE, ctrl_copy_reference },
 
   // Setters
-  {VP8_SET_REFERENCE,             ctrl_set_reference},
-  {VP8_SET_POSTPROC,              ctrl_set_postproc},
-  {VP8_SET_DBG_COLOR_REF_FRAME,   ctrl_set_dbg_options},
-  {VP8_SET_DBG_COLOR_MB_MODES,    ctrl_set_dbg_options},
-  {VP8_SET_DBG_COLOR_B_MODES,     ctrl_set_dbg_options},
-  {VP8_SET_DBG_DISPLAY_MV,        ctrl_set_dbg_options},
-  {VP9_INVERT_TILE_DECODE_ORDER,  ctrl_set_invert_tile_order},
-  {VPXD_SET_DECRYPTOR,            ctrl_set_decryptor},
-  {VP9_SET_BYTE_ALIGNMENT,        ctrl_set_byte_alignment},
-  {VP9_SET_SKIP_LOOP_FILTER,      ctrl_set_skip_loop_filter},
+  { VP8_SET_REFERENCE, ctrl_set_reference },
+  { VP8_SET_POSTPROC, ctrl_set_postproc },
+  { VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options },
+  { VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options },
+  { VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options },
+  { VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options },
+  { VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order },
+  { VPXD_SET_DECRYPTOR, ctrl_set_decryptor },
+  { VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment },
+  { VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter },
 
   // Getters
-  {VP8D_GET_LAST_REF_UPDATES,     ctrl_get_last_ref_updates},
-  {VP8D_GET_FRAME_CORRUPTED,      ctrl_get_frame_corrupted},
-  {VP9_GET_REFERENCE,             ctrl_get_reference},
-  {VP9D_GET_DISPLAY_SIZE,         ctrl_get_render_size},
-  {VP9D_GET_BIT_DEPTH,            ctrl_get_bit_depth},
-  {VP9D_GET_FRAME_SIZE,           ctrl_get_frame_size},
-
-  { -1, NULL},
+  { VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates },
+  { VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted },
+  { VP9_GET_REFERENCE, ctrl_get_reference },
+  { VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size },
+  { VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth },
+  { VP9D_GET_FRAME_SIZE, ctrl_get_frame_size },
+
+  { -1, NULL },
 };
 
 #ifndef VERSION_STRING
@@ -1077,26 +1047,28 @@ static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
 CODEC_INTERFACE(vpx_codec_vp10_dx) = {
   "WebM Project VP10 Decoder" VERSION_STRING,
   VPX_CODEC_INTERNAL_ABI_VERSION,
-  VPX_CODEC_CAP_DECODER|
+  VPX_CODEC_CAP_DECODER |
       VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER,  // vpx_codec_caps_t
-  decoder_init,       // vpx_codec_init_fn_t
-  decoder_destroy,    // vpx_codec_destroy_fn_t
-  decoder_ctrl_maps,  // vpx_codec_ctrl_fn_map_t
-  { // NOLINT
-    decoder_peek_si,    // vpx_codec_peek_si_fn_t
-    decoder_get_si,     // vpx_codec_get_si_fn_t
-    decoder_decode,     // vpx_codec_decode_fn_t
-    decoder_get_frame,  // vpx_codec_frame_get_fn_t
-    decoder_set_fb_fn,  // vpx_codec_set_fb_fn_t
+  decoder_init,                             // vpx_codec_init_fn_t
+  decoder_destroy,                          // vpx_codec_destroy_fn_t
+  decoder_ctrl_maps,                        // vpx_codec_ctrl_fn_map_t
+  {
+      // NOLINT
+      decoder_peek_si,    // vpx_codec_peek_si_fn_t
+      decoder_get_si,     // vpx_codec_get_si_fn_t
+      decoder_decode,     // vpx_codec_decode_fn_t
+      decoder_get_frame,  // vpx_codec_frame_get_fn_t
+      decoder_set_fb_fn,  // vpx_codec_set_fb_fn_t
   },
-  { // NOLINT
-    0,
-    NULL,  // vpx_codec_enc_cfg_map_t
-    NULL,  // vpx_codec_encode_fn_t
-    NULL,  // vpx_codec_get_cx_data_fn_t
-    NULL,  // vpx_codec_enc_config_set_fn_t
-    NULL,  // vpx_codec_get_global_headers_fn_t
-    NULL,  // vpx_codec_get_preview_frame_fn_t
-    NULL   // vpx_codec_enc_mr_get_mem_loc_fn_t
+  {
+      // NOLINT
+      0,
+      NULL,  // vpx_codec_enc_cfg_map_t
+      NULL,  // vpx_codec_encode_fn_t
+      NULL,  // vpx_codec_get_cx_data_fn_t
+      NULL,  // vpx_codec_enc_config_set_fn_t
+      NULL,  // vpx_codec_get_global_headers_fn_t
+      NULL,  // vpx_codec_get_preview_frame_fn_t
+      NULL   // vpx_codec_enc_mr_get_mem_loc_fn_t
   }
 };
diff --git a/vp10/vp10_iface_common.h b/vp10/vp10_iface_common.h
index b875275ed30857a60a0c18800018ec1a2997399e..b930a2e15994845bb88374e3438d047c6dbb18b2 100644
--- a/vp10/vp10_iface_common.h
+++ b/vp10/vp10_iface_common.h
@@ -12,7 +12,7 @@
 
 #include "vpx_ports/mem.h"
 
-static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG  *yv12,
+static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
                             void *user_priv) {
   /** vpx_img_wrap() doesn't allow specifying independent strides for
     * the Y, U, and V planes, nor other alignment adjustments that
@@ -61,9 +61,9 @@ static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG  *yv12,
     // of the image.
     img->fmt = (vpx_img_fmt_t)(img->fmt | VPX_IMG_FMT_HIGHBITDEPTH);
     img->bit_depth = yv12->bit_depth;
-    img->planes[VPX_PLANE_Y] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->y_buffer);
-    img->planes[VPX_PLANE_U] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->u_buffer);
-    img->planes[VPX_PLANE_V] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->v_buffer);
+    img->planes[VPX_PLANE_Y] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->y_buffer);
+    img->planes[VPX_PLANE_U] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->u_buffer);
+    img->planes[VPX_PLANE_V] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->v_buffer);
     img->planes[VPX_PLANE_ALPHA] = NULL;
     img->stride[VPX_PLANE_Y] = 2 * yv12->y_stride;
     img->stride[VPX_PLANE_U] = 2 * yv12->uv_stride;
@@ -84,17 +84,17 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
   yv12->u_buffer = img->planes[VPX_PLANE_U];
   yv12->v_buffer = img->planes[VPX_PLANE_V];
 
-  yv12->y_crop_width  = img->d_w;
+  yv12->y_crop_width = img->d_w;
   yv12->y_crop_height = img->d_h;
-  yv12->render_width  = img->r_w;
+  yv12->render_width = img->r_w;
   yv12->render_height = img->r_h;
-  yv12->y_width  = img->d_w;
+  yv12->y_width = img->d_w;
   yv12->y_height = img->d_h;
 
-  yv12->uv_width = img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2
-                                            : yv12->y_width;
-  yv12->uv_height = img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2
-                                             : yv12->y_height;
+  yv12->uv_width =
+      img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 : yv12->y_width;
+  yv12->uv_height =
+      img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 : yv12->y_height;
   yv12->uv_crop_width = yv12->uv_width;
   yv12->uv_crop_height = yv12->uv_height;
 
@@ -124,9 +124,9 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
   } else {
     yv12->flags = 0;
   }
-  yv12->border  = (yv12->y_stride - img->w) / 2;
+  yv12->border = (yv12->y_stride - img->w) / 2;
 #else
-  yv12->border  = (img->stride[VPX_PLANE_Y] - img->w) / 2;
+  yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
 #endif  // CONFIG_VPX_HIGHBITDEPTH
   yv12->subsampling_x = img->x_chroma_shift;
   yv12->subsampling_y = img->y_chroma_shift;
diff --git a/vpx/internal/vpx_codec_internal.h b/vpx/internal/vpx_codec_internal.h
index c61b836831c18fa990ce21bf0761b6a99673431e..6a253a57e2438238d806ec3c56b661856c2e4574 100644
--- a/vpx/internal/vpx_codec_internal.h
+++ b/vpx/internal/vpx_codec_internal.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 /*!\file
  * \brief Describes the decoder algorithm interface for algorithm
  *        implementations.
@@ -61,7 +60,7 @@ extern "C" {
  */
 #define VPX_CODEC_INTERNAL_ABI_VERSION (5) /**<\hideinitializer*/
 
-typedef struct vpx_codec_alg_priv  vpx_codec_alg_priv_t;
+typedef struct vpx_codec_alg_priv vpx_codec_alg_priv_t;
 typedef struct vpx_codec_priv_enc_mr_cfg vpx_codec_priv_enc_mr_cfg_t;
 
 /*!\brief init function pointer prototype
@@ -77,8 +76,8 @@ typedef struct vpx_codec_priv_enc_mr_cfg vpx_codec_priv_enc_mr_cfg_t;
  * \retval #VPX_CODEC_MEM_ERROR
  *     Memory operation failed.
  */
-typedef vpx_codec_err_t (*vpx_codec_init_fn_t)(vpx_codec_ctx_t *ctx,
-                                               vpx_codec_priv_enc_mr_cfg_t *data);
+typedef vpx_codec_err_t (*vpx_codec_init_fn_t)(
+    vpx_codec_ctx_t *ctx, vpx_codec_priv_enc_mr_cfg_t *data);
 
 /*!\brief destroy function pointer prototype
  *
@@ -112,8 +111,8 @@ typedef vpx_codec_err_t (*vpx_codec_destroy_fn_t)(vpx_codec_alg_priv_t *ctx);
  * \retval #VPX_CODEC_OK
  *     Bitstream is parsable and stream information updated
  */
-typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t         *data,
-                                                  unsigned int           data_sz,
+typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data,
+                                                  unsigned int data_sz,
                                                   vpx_codec_stream_info_t *si);
 
 /*!\brief Return information about the current stream.
@@ -129,7 +128,7 @@ typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t         *data,
  * \retval #VPX_CODEC_OK
  *     Bitstream is parsable and stream information updated
  */
-typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t    *ctx,
+typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx,
                                                  vpx_codec_stream_info_t *si);
 
 /*!\brief control function pointer prototype
@@ -193,11 +192,11 @@ typedef const struct vpx_codec_ctrl_fn_map {
  *         see the descriptions of the other error codes in ::vpx_codec_err_t
  *         for recoverability capabilities.
  */
-typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t  *ctx,
-                                                 const uint8_t         *data,
-                                                 unsigned int     data_sz,
-                                                 void        *user_priv,
-                                                 long         deadline);
+typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx,
+                                                 const uint8_t *data,
+                                                 unsigned int data_sz,
+                                                 void *user_priv,
+                                                 long deadline);
 
 /*!\brief Decoded frames iterator
  *
@@ -206,7 +205,8 @@ typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t  *ctx,
  * complete when this function returns NULL.
  *
  * The list of available frames becomes valid upon completion of the
- * vpx_codec_decode call, and remains valid until the next call to vpx_codec_decode.
+ * vpx_codec_decode call, and remains valid until the next call to
+ * vpx_codec_decode.
  *
  * \param[in]     ctx      Pointer to this instance's context
  * \param[in out] iter     Iterator storage, initialized to NULL
@@ -215,7 +215,7 @@ typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t  *ctx,
  *         produced will always be in PTS (presentation time stamp) order.
  */
 typedef vpx_image_t *(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx,
-                                                 vpx_codec_iter_t     *iter);
+                                                 vpx_codec_iter_t *iter);
 
 /*!\brief Pass in external frame buffers for the decoder to use.
  *
@@ -244,32 +244,28 @@ typedef vpx_image_t *(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx,
  * buffers.
  */
 typedef vpx_codec_err_t (*vpx_codec_set_fb_fn_t)(
-    vpx_codec_alg_priv_t *ctx,
-    vpx_get_frame_buffer_cb_fn_t cb_get,
+    vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
     vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv);
 
+typedef vpx_codec_err_t (*vpx_codec_encode_fn_t)(vpx_codec_alg_priv_t *ctx,
+                                                 const vpx_image_t *img,
+                                                 vpx_codec_pts_t pts,
+                                                 unsigned long duration,
+                                                 vpx_enc_frame_flags_t flags,
+                                                 unsigned long deadline);
+typedef const vpx_codec_cx_pkt_t *(*vpx_codec_get_cx_data_fn_t)(
+    vpx_codec_alg_priv_t *ctx, vpx_codec_iter_t *iter);
 
-typedef vpx_codec_err_t (*vpx_codec_encode_fn_t)(vpx_codec_alg_priv_t  *ctx,
-                                                 const vpx_image_t     *img,
-                                                 vpx_codec_pts_t        pts,
-                                                 unsigned long          duration,
-                                                 vpx_enc_frame_flags_t  flags,
-                                                 unsigned long          deadline);
-typedef const vpx_codec_cx_pkt_t *(*vpx_codec_get_cx_data_fn_t)(vpx_codec_alg_priv_t *ctx,
-                                                                vpx_codec_iter_t     *iter);
-
-typedef vpx_codec_err_t
-(*vpx_codec_enc_config_set_fn_t)(vpx_codec_alg_priv_t       *ctx,
-                                 const vpx_codec_enc_cfg_t  *cfg);
-typedef vpx_fixed_buf_t *
-(*vpx_codec_get_global_headers_fn_t)(vpx_codec_alg_priv_t   *ctx);
+typedef vpx_codec_err_t (*vpx_codec_enc_config_set_fn_t)(
+    vpx_codec_alg_priv_t *ctx, const vpx_codec_enc_cfg_t *cfg);
+typedef vpx_fixed_buf_t *(*vpx_codec_get_global_headers_fn_t)(
+    vpx_codec_alg_priv_t *ctx);
 
-typedef vpx_image_t *
-(*vpx_codec_get_preview_frame_fn_t)(vpx_codec_alg_priv_t   *ctx);
+typedef vpx_image_t *(*vpx_codec_get_preview_frame_fn_t)(
+    vpx_codec_alg_priv_t *ctx);
 
-typedef vpx_codec_err_t
-(*vpx_codec_enc_mr_get_mem_loc_fn_t)(const vpx_codec_enc_cfg_t     *cfg,
-                                     void **mem_loc);
+typedef vpx_codec_err_t (*vpx_codec_enc_mr_get_mem_loc_fn_t)(
+    const vpx_codec_enc_cfg_t *cfg, void **mem_loc);
 
 /*!\brief usage configuration mapping
  *
@@ -282,7 +278,7 @@ typedef vpx_codec_err_t
  *
  */
 typedef const struct vpx_codec_enc_cfg_map {
-  int                 usage;
+  int usage;
   vpx_codec_enc_cfg_t cfg;
 } vpx_codec_enc_cfg_map_t;
 
@@ -291,41 +287,47 @@ typedef const struct vpx_codec_enc_cfg_map {
  * All decoders \ref MUST expose a variable of this type.
  */
 struct vpx_codec_iface {
-  const char               *name;        /**< Identification String  */
-  int                       abi_version; /**< Implemented ABI version */
-  vpx_codec_caps_t          caps;    /**< Decoder capabilities */
-  vpx_codec_init_fn_t       init;    /**< \copydoc ::vpx_codec_init_fn_t */
-  vpx_codec_destroy_fn_t    destroy;     /**< \copydoc ::vpx_codec_destroy_fn_t */
-  vpx_codec_ctrl_fn_map_t  *ctrl_maps;   /**< \copydoc ::vpx_codec_ctrl_fn_map_t */
+  const char *name;                   /**< Identification String  */
+  int abi_version;                    /**< Implemented ABI version */
+  vpx_codec_caps_t caps;              /**< Decoder capabilities */
+  vpx_codec_init_fn_t init;           /**< \copydoc ::vpx_codec_init_fn_t */
+  vpx_codec_destroy_fn_t destroy;     /**< \copydoc ::vpx_codec_destroy_fn_t */
+  vpx_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::vpx_codec_ctrl_fn_map_t */
   struct vpx_codec_dec_iface {
-    vpx_codec_peek_si_fn_t    peek_si;     /**< \copydoc ::vpx_codec_peek_si_fn_t */
-    vpx_codec_get_si_fn_t     get_si;      /**< \copydoc ::vpx_codec_get_si_fn_t */
-    vpx_codec_decode_fn_t     decode;      /**< \copydoc ::vpx_codec_decode_fn_t */
-    vpx_codec_get_frame_fn_t  get_frame;   /**< \copydoc ::vpx_codec_get_frame_fn_t */
-    vpx_codec_set_fb_fn_t     set_fb_fn;   /**< \copydoc ::vpx_codec_set_fb_fn_t */
+    vpx_codec_peek_si_fn_t peek_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */
+    vpx_codec_get_si_fn_t get_si;   /**< \copydoc ::vpx_codec_get_si_fn_t */
+    vpx_codec_decode_fn_t decode;   /**< \copydoc ::vpx_codec_decode_fn_t */
+    vpx_codec_get_frame_fn_t
+        get_frame;                   /**< \copydoc ::vpx_codec_get_frame_fn_t */
+    vpx_codec_set_fb_fn_t set_fb_fn; /**< \copydoc ::vpx_codec_set_fb_fn_t */
   } dec;
   struct vpx_codec_enc_iface {
-    int                                cfg_map_count;
-    vpx_codec_enc_cfg_map_t           *cfg_maps;      /**< \copydoc ::vpx_codec_enc_cfg_map_t */
-    vpx_codec_encode_fn_t              encode;        /**< \copydoc ::vpx_codec_encode_fn_t */
-    vpx_codec_get_cx_data_fn_t         get_cx_data;   /**< \copydoc ::vpx_codec_get_cx_data_fn_t */
-    vpx_codec_enc_config_set_fn_t      cfg_set;       /**< \copydoc ::vpx_codec_enc_config_set_fn_t */
-    vpx_codec_get_global_headers_fn_t  get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */
-    vpx_codec_get_preview_frame_fn_t   get_preview;   /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */
-    vpx_codec_enc_mr_get_mem_loc_fn_t  mr_get_mem_loc;   /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */
+    int cfg_map_count;
+    vpx_codec_enc_cfg_map_t
+        *cfg_maps;                /**< \copydoc ::vpx_codec_enc_cfg_map_t */
+    vpx_codec_encode_fn_t encode; /**< \copydoc ::vpx_codec_encode_fn_t */
+    vpx_codec_get_cx_data_fn_t
+        get_cx_data; /**< \copydoc ::vpx_codec_get_cx_data_fn_t */
+    vpx_codec_enc_config_set_fn_t
+        cfg_set; /**< \copydoc ::vpx_codec_enc_config_set_fn_t */
+    vpx_codec_get_global_headers_fn_t
+        get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */
+    vpx_codec_get_preview_frame_fn_t
+        get_preview; /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */
+    vpx_codec_enc_mr_get_mem_loc_fn_t
+        mr_get_mem_loc; /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */
   } enc;
 };
 
 /*!\brief Callback function pointer / user data pair storage */
 typedef struct vpx_codec_priv_cb_pair {
   union {
-    vpx_codec_put_frame_cb_fn_t    put_frame;
-    vpx_codec_put_slice_cb_fn_t    put_slice;
+    vpx_codec_put_frame_cb_fn_t put_frame;
+    vpx_codec_put_slice_cb_fn_t put_slice;
   } u;
-  void                            *user_priv;
+  void *user_priv;
 } vpx_codec_priv_cb_pair_t;
 
-
 /*!\brief Instance private storage
  *
  * This structure is allocated by the algorithm's init function. It can be
@@ -335,39 +337,38 @@ typedef struct vpx_codec_priv_cb_pair {
  * and the pointer cast to the proper type.
  */
 struct vpx_codec_priv {
-  const char                     *err_detail;
-  vpx_codec_flags_t               init_flags;
+  const char *err_detail;
+  vpx_codec_flags_t init_flags;
   struct {
-    vpx_codec_priv_cb_pair_t    put_frame_cb;
-    vpx_codec_priv_cb_pair_t    put_slice_cb;
+    vpx_codec_priv_cb_pair_t put_frame_cb;
+    vpx_codec_priv_cb_pair_t put_slice_cb;
   } dec;
   struct {
-    vpx_fixed_buf_t             cx_data_dst_buf;
-    unsigned int                cx_data_pad_before;
-    unsigned int                cx_data_pad_after;
-    vpx_codec_cx_pkt_t          cx_data_pkt;
-    unsigned int                total_encoders;
+    vpx_fixed_buf_t cx_data_dst_buf;
+    unsigned int cx_data_pad_before;
+    unsigned int cx_data_pad_after;
+    vpx_codec_cx_pkt_t cx_data_pkt;
+    unsigned int total_encoders;
   } enc;
 };
 
 /*
  * Multi-resolution encoding internal configuration
  */
-struct vpx_codec_priv_enc_mr_cfg
-{
-    unsigned int           mr_total_resolutions;
-    unsigned int           mr_encoder_id;
-    struct vpx_rational    mr_down_sampling_factor;
-    void*                  mr_low_res_mode_info;
+struct vpx_codec_priv_enc_mr_cfg {
+  unsigned int mr_total_resolutions;
+  unsigned int mr_encoder_id;
+  struct vpx_rational mr_down_sampling_factor;
+  void *mr_low_res_mode_info;
 };
 
 #undef VPX_CTRL_USE_TYPE
 #define VPX_CTRL_USE_TYPE(id, typ) \
-  static VPX_INLINE typ id##__value(va_list args) {return va_arg(args, typ);}
+  static VPX_INLINE typ id##__value(va_list args) { return va_arg(args, typ); }
 
 #undef VPX_CTRL_USE_TYPE_DEPRECATED
 #define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \
-  static VPX_INLINE typ id##__value(va_list args) {return va_arg(args, typ);}
+  static VPX_INLINE typ id##__value(va_list args) { return va_arg(args, typ); }
 
 #define CAST(id, arg) id##__value(arg)
 
@@ -380,10 +381,9 @@ struct vpx_codec_priv_enc_mr_cfg
  * the same name as the struct, less the _algo suffix. The CODEC_INTERFACE
  * macro is provided to define this getter function automatically.
  */
-#define CODEC_INTERFACE(id)\
-  vpx_codec_iface_t* id(void) { return &id##_algo; }\
-  vpx_codec_iface_t  id##_algo
-
+#define CODEC_INTERFACE(id)                          \
+  vpx_codec_iface_t *id(void) { return &id##_algo; } \
+  vpx_codec_iface_t id##_algo
 
 /* Internal Utility Functions
  *
@@ -391,38 +391,39 @@ struct vpx_codec_priv_enc_mr_cfg
  * utilities for manipulating vpx_codec_* data structures.
  */
 struct vpx_codec_pkt_list {
-  unsigned int            cnt;
-  unsigned int            max;
+  unsigned int cnt;
+  unsigned int max;
   struct vpx_codec_cx_pkt pkts[1];
 };
 
-#define vpx_codec_pkt_list_decl(n)\
-  union {struct vpx_codec_pkt_list head;\
-    struct {struct vpx_codec_pkt_list head;\
-      struct vpx_codec_cx_pkt    pkts[n];} alloc;}
-
-#define vpx_codec_pkt_list_init(m)\
-  (m)->alloc.head.cnt = 0,\
-                        (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0])
+#define vpx_codec_pkt_list_decl(n)     \
+  union {                              \
+    struct vpx_codec_pkt_list head;    \
+    struct {                           \
+      struct vpx_codec_pkt_list head;  \
+      struct vpx_codec_cx_pkt pkts[n]; \
+    } alloc;                           \
+  }
 
-int
-vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *,
-                       const struct vpx_codec_cx_pkt *);
+#define vpx_codec_pkt_list_init(m) \
+  (m)->alloc.head.cnt = 0,         \
+  (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0])
 
-const vpx_codec_cx_pkt_t *
-vpx_codec_pkt_list_get(struct vpx_codec_pkt_list *list,
-                       vpx_codec_iter_t           *iter);
+int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *,
+                           const struct vpx_codec_cx_pkt *);
 
+const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get(
+    struct vpx_codec_pkt_list *list, vpx_codec_iter_t *iter);
 
 #include <stdio.h>
 #include <setjmp.h>
 
 struct vpx_internal_error_info {
-  vpx_codec_err_t  error_code;
-  int              has_detail;
-  char             detail[80];
-  int              setjmp;
-  jmp_buf          jmp;
+  vpx_codec_err_t error_code;
+  int has_detail;
+  char detail[80];
+  int setjmp;
+  jmp_buf jmp;
 };
 
 #define CLANG_ANALYZER_NORETURN
@@ -434,8 +435,7 @@ struct vpx_internal_error_info {
 #endif
 
 void vpx_internal_error(struct vpx_internal_error_info *info,
-                        vpx_codec_err_t                 error,
-                        const char                     *fmt,
+                        vpx_codec_err_t error, const char *fmt,
                         ...) CLANG_ANALYZER_NORETURN;
 
 #ifdef __cplusplus
diff --git a/vpx/src/svc_encodeframe.c b/vpx/src/svc_encodeframe.c
index a615a8aae74718b5d52064360b7a094ad71fceef..7f86b71619c256af530a29a102ae53278a0f6e13 100644
--- a/vpx/src/svc_encodeframe.c
+++ b/vpx/src/svc_encodeframe.c
@@ -33,8 +33,8 @@
 #ifndef MINGW_HAS_SECURE_API
 // proto from /usr/x86_64-w64-mingw32/include/sec_api/string_s.h
 _CRTIMP char *__cdecl strtok_s(char *str, const char *delim, char **context);
-#endif  /* MINGW_HAS_SECURE_API */
-#endif  /* __MINGW32__ */
+#endif /* MINGW_HAS_SECURE_API */
+#endif /* __MINGW32__ */
 
 #ifdef _MSC_VER
 #define strdup _strdup
@@ -47,13 +47,11 @@ _CRTIMP char *__cdecl strtok_s(char *str, const char *delim, char **context);
 
 #define MAX_QUANTIZER 63
 
-static const int DEFAULT_SCALE_FACTORS_NUM[VPX_SS_MAX_LAYERS] = {
-  4, 5, 7, 11, 16
-};
+static const int DEFAULT_SCALE_FACTORS_NUM[VPX_SS_MAX_LAYERS] = { 4, 5, 7, 11,
+                                                                  16 };
 
-static const int DEFAULT_SCALE_FACTORS_DEN[VPX_SS_MAX_LAYERS] = {
-  16, 16, 16, 16, 16
-};
+static const int DEFAULT_SCALE_FACTORS_DEN[VPX_SS_MAX_LAYERS] = { 16, 16, 16,
+                                                                  16, 16 };
 
 typedef enum {
   QUANTIZER = 0,
@@ -63,20 +61,17 @@ typedef enum {
   ALL_OPTION_TYPES
 } LAYER_OPTION_TYPE;
 
-static const int option_max_values[ALL_OPTION_TYPES] = {
-  63, INT_MAX, INT_MAX, 1
-};
+static const int option_max_values[ALL_OPTION_TYPES] = { 63, INT_MAX, INT_MAX,
+                                                         1 };
 
-static const int option_min_values[ALL_OPTION_TYPES] = {
-  0, 0, 1, 0
-};
+static const int option_min_values[ALL_OPTION_TYPES] = { 0, 0, 1, 0 };
 
 // One encoded frame
 typedef struct FrameData {
-  void                     *buf;    // compressed data buffer
-  size_t                    size;  // length of compressed data
-  vpx_codec_frame_flags_t   flags;    /**< flags for this frame */
-  struct FrameData         *next;
+  void *buf;                     // compressed data buffer
+  size_t size;                   // length of compressed data
+  vpx_codec_frame_flags_t flags; /**< flags for this frame */
+  struct FrameData *next;
 } FrameData;
 
 static SvcInternal_t *get_svc_internal(SvcContext *svc_ctx) {
@@ -91,8 +86,7 @@ static SvcInternal_t *get_svc_internal(SvcContext *svc_ctx) {
   return (SvcInternal_t *)svc_ctx->internal;
 }
 
-static const SvcInternal_t *get_const_svc_internal(
-    const SvcContext *svc_ctx) {
+static const SvcInternal_t *get_const_svc_internal(const SvcContext *svc_ctx) {
   if (svc_ctx == NULL) return NULL;
   return (const SvcInternal_t *)svc_ctx->internal;
 }
@@ -102,8 +96,8 @@ static void svc_log_reset(SvcContext *svc_ctx) {
   si->message_buffer[0] = '\0';
 }
 
-static int svc_log(SvcContext *svc_ctx, SVC_LOG_LEVEL level,
-                   const char *fmt, ...) {
+static int svc_log(SvcContext *svc_ctx, SVC_LOG_LEVEL level, const char *fmt,
+                   ...) {
   char buf[512];
   int retval = 0;
   va_list ap;
@@ -130,14 +124,11 @@ static int svc_log(SvcContext *svc_ctx, SVC_LOG_LEVEL level,
   return retval;
 }
 
-static vpx_codec_err_t extract_option(LAYER_OPTION_TYPE type,
-                                      char *input,
-                                      int *value0,
-                                      int *value1) {
+static vpx_codec_err_t extract_option(LAYER_OPTION_TYPE type, char *input,
+                                      int *value0, int *value1) {
   if (type == SCALE_FACTOR) {
     *value0 = strtol(input, &input, 10);
-    if (*input++ != '/')
-      return VPX_CODEC_INVALID_PARAM;
+    if (*input++ != '/') return VPX_CODEC_INVALID_PARAM;
     *value1 = strtol(input, &input, 10);
 
     if (*value0 < option_min_values[SCALE_FACTOR] ||
@@ -148,8 +139,7 @@ static vpx_codec_err_t extract_option(LAYER_OPTION_TYPE type,
       return VPX_CODEC_INVALID_PARAM;
   } else {
     *value0 = atoi(input);
-    if (*value0 < option_min_values[type] ||
-        *value0 > option_max_values[type])
+    if (*value0 < option_min_values[type] || *value0 > option_max_values[type])
       return VPX_CODEC_INVALID_PARAM;
   }
   return VPX_CODEC_OK;
@@ -176,8 +166,7 @@ static vpx_codec_err_t parse_layer_options_from_string(SvcContext *svc_ctx,
   for (i = 0; i < svc_ctx->spatial_layers; ++i) {
     if (token != NULL) {
       res = extract_option(type, token, option0 + i, option1 + i);
-      if (res != VPX_CODEC_OK)
-        break;
+      if (res != VPX_CODEC_OK) break;
       token = strtok_r(NULL, delim, &save_ptr);
     } else {
       break;
@@ -186,7 +175,8 @@ static vpx_codec_err_t parse_layer_options_from_string(SvcContext *svc_ctx,
   if (res == VPX_CODEC_OK && i != svc_ctx->spatial_layers) {
     svc_log(svc_ctx, SVC_LOG_ERROR,
             "svc: layer params type: %d    %d values required, "
-            "but only %d specified\n", type, svc_ctx->spatial_layers, i);
+            "but only %d specified\n",
+            type, svc_ctx->spatial_layers, i);
     res = VPX_CODEC_INVALID_PARAM;
   }
   free(input_string);
@@ -233,14 +223,14 @@ static vpx_codec_err_t parse_options(SvcContext *svc_ctx, const char *options) {
                                             si->svc_params.scaling_factor_den);
       if (res != VPX_CODEC_OK) break;
     } else if (strcmp("max-quantizers", option_name) == 0) {
-      res = parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value,
-                                            si->svc_params.max_quantizers,
-                                            NULL);
+      res =
+          parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value,
+                                          si->svc_params.max_quantizers, NULL);
       if (res != VPX_CODEC_OK) break;
     } else if (strcmp("min-quantizers", option_name) == 0) {
-      res = parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value,
-                                            si->svc_params.min_quantizers,
-                                            NULL);
+      res =
+          parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value,
+                                          si->svc_params.min_quantizers, NULL);
       if (res != VPX_CODEC_OK) break;
     } else if (strcmp("auto-alt-refs", option_name) == 0) {
       res = parse_layer_options_from_string(svc_ctx, AUTO_ALT_REF, option_value,
@@ -287,8 +277,7 @@ static vpx_codec_err_t parse_options(SvcContext *svc_ctx, const char *options) {
   return res;
 }
 
-vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx,
-                                    const char *options) {
+vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, const char *options) {
   SvcInternal_t *const si = get_svc_internal(svc_ctx);
   if (svc_ctx == NULL || options == NULL || si == NULL) {
     return VPX_CODEC_INVALID_PARAM;
@@ -308,30 +297,31 @@ void assign_layer_bitrates(const SvcContext *svc_ctx,
     if (si->bitrates[0] != 0) {
       enc_cfg->rc_target_bitrate = 0;
       for (sl = 0; sl < svc_ctx->spatial_layers; ++sl) {
-        enc_cfg->ss_target_bitrate[sl*svc_ctx->temporal_layers] = 0;
+        enc_cfg->ss_target_bitrate[sl * svc_ctx->temporal_layers] = 0;
         for (tl = 0; tl < svc_ctx->temporal_layers; ++tl) {
-          enc_cfg->ss_target_bitrate[sl*svc_ctx->temporal_layers]
-              += (unsigned int)si->bitrates[sl * svc_ctx->temporal_layers + tl];
-          enc_cfg->layer_target_bitrate[sl*svc_ctx->temporal_layers + tl]
-              = si->bitrates[sl * svc_ctx->temporal_layers + tl];
+          enc_cfg->ss_target_bitrate[sl * svc_ctx->temporal_layers] +=
+              (unsigned int)si->bitrates[sl * svc_ctx->temporal_layers + tl];
+          enc_cfg->layer_target_bitrate[sl * svc_ctx->temporal_layers + tl] =
+              si->bitrates[sl * svc_ctx->temporal_layers + tl];
         }
       }
     } else {
       float total = 0;
-      float alloc_ratio[VPX_MAX_LAYERS] = {0};
+      float alloc_ratio[VPX_MAX_LAYERS] = { 0 };
 
       for (sl = 0; sl < svc_ctx->spatial_layers; ++sl) {
         if (si->svc_params.scaling_factor_den[sl] > 0) {
-          alloc_ratio[sl] = (float)(si->svc_params.scaling_factor_num[sl] *
-              1.0 / si->svc_params.scaling_factor_den[sl]);
+          alloc_ratio[sl] =
+              (float)(si->svc_params.scaling_factor_num[sl] * 1.0 /
+                      si->svc_params.scaling_factor_den[sl]);
           total += alloc_ratio[sl];
         }
       }
 
       for (sl = 0; sl < svc_ctx->spatial_layers; ++sl) {
         enc_cfg->ss_target_bitrate[sl] = spatial_layer_target =
-            (unsigned int)(enc_cfg->rc_target_bitrate *
-                alloc_ratio[sl] / total);
+            (unsigned int)(enc_cfg->rc_target_bitrate * alloc_ratio[sl] /
+                           total);
         if (svc_ctx->temporal_layering_mode == 3) {
           enc_cfg->layer_target_bitrate[sl * svc_ctx->temporal_layers] =
               spatial_layer_target >> 1;
@@ -360,7 +350,7 @@ void assign_layer_bitrates(const SvcContext *svc_ctx,
       }
     } else {
       float total = 0;
-      float alloc_ratio[VPX_MAX_LAYERS] = {0};
+      float alloc_ratio[VPX_MAX_LAYERS] = { 0 };
 
       for (i = 0; i < svc_ctx->spatial_layers; ++i) {
         if (si->svc_params.scaling_factor_den[i] > 0) {
@@ -373,8 +363,9 @@ void assign_layer_bitrates(const SvcContext *svc_ctx,
       }
       for (i = 0; i < VPX_SS_MAX_LAYERS; ++i) {
         if (total > 0) {
-          enc_cfg->layer_target_bitrate[i] = (unsigned int)
-              (enc_cfg->rc_target_bitrate * alloc_ratio[i] / total);
+          enc_cfg->layer_target_bitrate[i] =
+              (unsigned int)(enc_cfg->rc_target_bitrate * alloc_ratio[i] /
+                             total);
         }
       }
     }
@@ -385,7 +376,7 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
                              vpx_codec_iface_t *iface,
                              vpx_codec_enc_cfg_t *enc_cfg) {
   vpx_codec_err_t res;
-  int i, sl , tl;
+  int i, sl, tl;
   SvcInternal_t *const si = get_svc_internal(svc_ctx);
   if (svc_ctx == NULL || codec_ctx == NULL || iface == NULL ||
       enc_cfg == NULL) {
@@ -440,23 +431,21 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
   res = parse_options(svc_ctx, si->options);
   if (res != VPX_CODEC_OK) return res;
 
-  if (svc_ctx->spatial_layers < 1)
-    svc_ctx->spatial_layers = 1;
+  if (svc_ctx->spatial_layers < 1) svc_ctx->spatial_layers = 1;
   if (svc_ctx->spatial_layers > VPX_SS_MAX_LAYERS)
     svc_ctx->spatial_layers = VPX_SS_MAX_LAYERS;
 
-  if (svc_ctx->temporal_layers < 1)
-    svc_ctx->temporal_layers = 1;
+  if (svc_ctx->temporal_layers < 1) svc_ctx->temporal_layers = 1;
   if (svc_ctx->temporal_layers > VPX_TS_MAX_LAYERS)
     svc_ctx->temporal_layers = VPX_TS_MAX_LAYERS;
 
   if (svc_ctx->temporal_layers * svc_ctx->spatial_layers > VPX_MAX_LAYERS) {
-      svc_log(svc_ctx, SVC_LOG_ERROR,
-          "spatial layers * temporal layers exceeds the maximum number of "
-          "allowed layers of %d\n",
-          svc_ctx->spatial_layers * svc_ctx->temporal_layers,
-          (int) VPX_MAX_LAYERS);
-      return VPX_CODEC_INVALID_PARAM;
+    svc_log(svc_ctx, SVC_LOG_ERROR,
+            "spatial layers * temporal layers exceeds the maximum number of "
+            "allowed layers of %d\n",
+            svc_ctx->spatial_layers * svc_ctx->temporal_layers,
+            (int)VPX_MAX_LAYERS);
+    return VPX_CODEC_INVALID_PARAM;
   }
   assign_layer_bitrates(svc_ctx, enc_cfg);
 
@@ -468,14 +457,13 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
   if (svc_ctx->temporal_layers > 1) {
     int i;
     for (i = 0; i < svc_ctx->temporal_layers; ++i) {
-      enc_cfg->ts_target_bitrate[i] = enc_cfg->rc_target_bitrate /
-                                      svc_ctx->temporal_layers;
+      enc_cfg->ts_target_bitrate[i] =
+          enc_cfg->rc_target_bitrate / svc_ctx->temporal_layers;
       enc_cfg->ts_rate_decimator[i] = 1 << (svc_ctx->temporal_layers - 1 - i);
     }
   }
 
-  if (svc_ctx->threads)
-    enc_cfg->g_threads = svc_ctx->threads;
+  if (svc_ctx->threads) enc_cfg->g_threads = svc_ctx->threads;
 
   // Modify encoder configuration
   enc_cfg->ss_number_layers = svc_ctx->spatial_layers;
@@ -513,10 +501,8 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
  * Encode a frame into multiple layers
  * Create a superframe containing the individual layers
  */
-vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx,
-                               vpx_codec_ctx_t *codec_ctx,
-                               struct vpx_image *rawimg,
-                               vpx_codec_pts_t pts,
+vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
+                               struct vpx_image *rawimg, vpx_codec_pts_t pts,
                                int64_t duration, int deadline) {
   vpx_codec_err_t res;
   vpx_codec_iter_t iter;
@@ -528,8 +514,8 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx,
 
   svc_log_reset(svc_ctx);
 
-  res = vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration, 0,
-                         deadline);
+  res =
+      vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration, 0, deadline);
   if (res != VPX_CODEC_OK) {
     return res;
   }
@@ -546,23 +532,20 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx,
           svc_log(svc_ctx, SVC_LOG_DEBUG,
                   "SVC frame: %d, layer: %d, PSNR(Total/Y/U/V): "
                   "%2.3f  %2.3f  %2.3f  %2.3f \n",
-                  si->psnr_pkt_received, i,
-                  cx_pkt->data.layer_psnr[i].psnr[0],
+                  si->psnr_pkt_received, i, cx_pkt->data.layer_psnr[i].psnr[0],
                   cx_pkt->data.layer_psnr[i].psnr[1],
                   cx_pkt->data.layer_psnr[i].psnr[2],
                   cx_pkt->data.layer_psnr[i].psnr[3]);
           svc_log(svc_ctx, SVC_LOG_DEBUG,
                   "SVC frame: %d, layer: %d, SSE(Total/Y/U/V): "
                   "%2.3f  %2.3f  %2.3f  %2.3f \n",
-                  si->psnr_pkt_received, i,
-                  cx_pkt->data.layer_psnr[i].sse[0],
+                  si->psnr_pkt_received, i, cx_pkt->data.layer_psnr[i].sse[0],
                   cx_pkt->data.layer_psnr[i].sse[1],
                   cx_pkt->data.layer_psnr[i].sse[2],
                   cx_pkt->data.layer_psnr[i].sse[3]);
 
           for (j = 0; j < COMPONENTS; ++j) {
-            si->psnr_sum[i][j] +=
-                cx_pkt->data.layer_psnr[i].psnr[j];
+            si->psnr_sum[i][j] += cx_pkt->data.layer_psnr[i].psnr[j];
             si->sse_sum[i][j] += cx_pkt->data.layer_psnr[i].sse[j];
           }
         }
@@ -577,9 +560,7 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx,
       }
 #endif
 #endif
-      default: {
-        break;
-      }
+      default: { break; }
     }
   }
 
@@ -617,7 +598,6 @@ const char *vpx_svc_dump_statistics(SvcContext *svc_ctx) {
 
   svc_log(svc_ctx, SVC_LOG_INFO, "\n");
   for (i = 0; i < svc_ctx->spatial_layers; ++i) {
-
     svc_log(svc_ctx, SVC_LOG_INFO,
             "Layer %d Average PSNR=[%2.3f, %2.3f, %2.3f, %2.3f], Bytes=[%u]\n",
             i, (double)si->psnr_sum[i][0] / number_of_frames,
@@ -668,4 +648,3 @@ void vpx_svc_release(SvcContext *svc_ctx) {
     svc_ctx->internal = NULL;
   }
 }
-
diff --git a/vpx/src/vpx_codec.c b/vpx/src/vpx_codec.c
index 5a495ce814b814fe3fef105fb76894e754fda420..f222b9e5cb6f5073f551fec9f332bca7d01a2793 100644
--- a/vpx/src/vpx_codec.c
+++ b/vpx/src/vpx_codec.c
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 /*!\file
  * \brief Provides the high level interface to wrap decoder algorithms.
  *
@@ -19,67 +18,50 @@
 #include "vpx/internal/vpx_codec_internal.h"
 #include "vpx_version.h"
 
-#define SAVE_STATUS(ctx,var) (ctx?(ctx->err = var):var)
-
-int vpx_codec_version(void) {
-  return VERSION_PACKED;
-}
-
-
-const char *vpx_codec_version_str(void) {
-  return VERSION_STRING_NOSP;
-}
+#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
 
+int vpx_codec_version(void) { return VERSION_PACKED; }
 
-const char *vpx_codec_version_extra_str(void) {
-  return VERSION_EXTRA;
-}
+const char *vpx_codec_version_str(void) { return VERSION_STRING_NOSP; }
 
+const char *vpx_codec_version_extra_str(void) { return VERSION_EXTRA; }
 
 const char *vpx_codec_iface_name(vpx_codec_iface_t *iface) {
   return iface ? iface->name : "<invalid interface>";
 }
 
-const char *vpx_codec_err_to_string(vpx_codec_err_t  err) {
+const char *vpx_codec_err_to_string(vpx_codec_err_t err) {
   switch (err) {
-    case VPX_CODEC_OK:
-      return "Success";
-    case VPX_CODEC_ERROR:
-      return "Unspecified internal error";
-    case VPX_CODEC_MEM_ERROR:
-      return "Memory allocation error";
-    case VPX_CODEC_ABI_MISMATCH:
-      return "ABI version mismatch";
+    case VPX_CODEC_OK: return "Success";
+    case VPX_CODEC_ERROR: return "Unspecified internal error";
+    case VPX_CODEC_MEM_ERROR: return "Memory allocation error";
+    case VPX_CODEC_ABI_MISMATCH: return "ABI version mismatch";
     case VPX_CODEC_INCAPABLE:
       return "Codec does not implement requested capability";
     case VPX_CODEC_UNSUP_BITSTREAM:
       return "Bitstream not supported by this decoder";
     case VPX_CODEC_UNSUP_FEATURE:
       return "Bitstream required feature not supported by this decoder";
-    case VPX_CODEC_CORRUPT_FRAME:
-      return "Corrupt frame detected";
-    case  VPX_CODEC_INVALID_PARAM:
-      return "Invalid parameter";
-    case VPX_CODEC_LIST_END:
-      return "End of iterated list";
+    case VPX_CODEC_CORRUPT_FRAME: return "Corrupt frame detected";
+    case VPX_CODEC_INVALID_PARAM: return "Invalid parameter";
+    case VPX_CODEC_LIST_END: return "End of iterated list";
   }
 
   return "Unrecognized error code";
 }
 
-const char *vpx_codec_error(vpx_codec_ctx_t  *ctx) {
+const char *vpx_codec_error(vpx_codec_ctx_t *ctx) {
   return (ctx) ? vpx_codec_err_to_string(ctx->err)
-         : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM);
+               : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM);
 }
 
-const char *vpx_codec_error_detail(vpx_codec_ctx_t  *ctx) {
+const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx) {
   if (ctx && ctx->err)
     return ctx->priv ? ctx->priv->err_detail : ctx->err_detail;
 
   return NULL;
 }
 
-
 vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx) {
   vpx_codec_err_t res;
 
@@ -99,15 +81,11 @@ vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx) {
   return SAVE_STATUS(ctx, res);
 }
 
-
 vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface) {
   return (iface) ? iface->caps : 0;
 }
 
-
-vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t  *ctx,
-                                   int               ctrl_id,
-                                   ...) {
+vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...) {
   vpx_codec_err_t res;
 
   if (!ctx || !ctrl_id)
@@ -121,7 +99,7 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t  *ctx,
 
     for (entry = ctx->iface->ctrl_maps; entry && entry->fn; entry++) {
       if (!entry->ctrl_id || entry->ctrl_id == ctrl_id) {
-        va_list  ap;
+        va_list ap;
 
         va_start(ap, ctrl_id);
         res = entry->fn((vpx_codec_alg_priv_t *)ctx->priv, ap);
@@ -135,16 +113,14 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t  *ctx,
 }
 
 void vpx_internal_error(struct vpx_internal_error_info *info,
-                        vpx_codec_err_t                 error,
-                        const char                     *fmt,
-                        ...) {
+                        vpx_codec_err_t error, const char *fmt, ...) {
   va_list ap;
 
   info->error_code = error;
   info->has_detail = 0;
 
   if (fmt) {
-    size_t  sz = sizeof(info->detail);
+    size_t sz = sizeof(info->detail);
 
     info->has_detail = 1;
     va_start(ap, fmt);
@@ -153,6 +129,5 @@ void vpx_internal_error(struct vpx_internal_error_info *info,
     info->detail[sz - 1] = '\0';
   }
 
-  if (info->setjmp)
-    longjmp(info->jmp, info->error_code);
+  if (info->setjmp) longjmp(info->jmp, info->error_code);
 }
diff --git a/vpx/src/vpx_decoder.c b/vpx/src/vpx_decoder.c
index 802d8edd8a437a3e424941adb3273485ff00b23d..fc1c2bccae77f5067c26bd44af66e316d8908fbd 100644
--- a/vpx/src/vpx_decoder.c
+++ b/vpx/src/vpx_decoder.c
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 /*!\file
  * \brief Provides the high level interface to wrap decoder algorithms.
  *
@@ -16,17 +15,16 @@
 #include <string.h>
 #include "vpx/internal/vpx_codec_internal.h"
 
-#define SAVE_STATUS(ctx,var) (ctx?(ctx->err = var):var)
+#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
 
 static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) {
   return (vpx_codec_alg_priv_t *)ctx->priv;
 }
 
-vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t      *ctx,
-                                       vpx_codec_iface_t    *iface,
+vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
+                                       vpx_codec_iface_t *iface,
                                        const vpx_codec_dec_cfg_t *cfg,
-                                       vpx_codec_flags_t     flags,
-                                       int                   ver) {
+                                       vpx_codec_flags_t flags, int ver) {
   vpx_codec_err_t res;
 
   if (ver != VPX_DECODER_ABI_VERSION)
@@ -35,7 +33,8 @@ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t      *ctx,
     res = VPX_CODEC_INVALID_PARAM;
   else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
     res = VPX_CODEC_ABI_MISMATCH;
-  else if ((flags & VPX_CODEC_USE_POSTPROC) && !(iface->caps & VPX_CODEC_CAP_POSTPROC))
+  else if ((flags & VPX_CODEC_USE_POSTPROC) &&
+           !(iface->caps & VPX_CODEC_CAP_POSTPROC))
     res = VPX_CODEC_INCAPABLE;
   else if ((flags & VPX_CODEC_USE_ERROR_CONCEALMENT) &&
            !(iface->caps & VPX_CODEC_CAP_ERROR_CONCEALMENT))
@@ -63,15 +62,14 @@ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t      *ctx,
   return SAVE_STATUS(ctx, res);
 }
 
-
-vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t       *iface,
-                                           const uint8_t         *data,
-                                           unsigned int           data_sz,
+vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
+                                           const uint8_t *data,
+                                           unsigned int data_sz,
                                            vpx_codec_stream_info_t *si) {
   vpx_codec_err_t res;
 
-  if (!iface || !data || !data_sz || !si
-      || si->sz < sizeof(vpx_codec_stream_info_t))
+  if (!iface || !data || !data_sz || !si ||
+      si->sz < sizeof(vpx_codec_stream_info_t))
     res = VPX_CODEC_INVALID_PARAM;
   else {
     /* Set default/unknown values */
@@ -84,8 +82,7 @@ vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t       *iface,
   return res;
 }
 
-
-vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t         *ctx,
+vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
                                           vpx_codec_stream_info_t *si) {
   vpx_codec_err_t res;
 
@@ -104,12 +101,9 @@ vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t         *ctx,
   return SAVE_STATUS(ctx, res);
 }
 
-
-vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t    *ctx,
-                                 const uint8_t        *data,
-                                 unsigned int    data_sz,
-                                 void       *user_priv,
-                                 long        deadline) {
+vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data,
+                                 unsigned int data_sz, void *user_priv,
+                                 long deadline) {
   vpx_codec_err_t res;
 
   /* Sanity checks */
@@ -126,8 +120,7 @@ vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t    *ctx,
   return SAVE_STATUS(ctx, res);
 }
 
-vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t  *ctx,
-                                 vpx_codec_iter_t *iter) {
+vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter) {
   vpx_image_t *img;
 
   if (!ctx || !iter || !ctx->iface || !ctx->priv)
@@ -138,16 +131,15 @@ vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t  *ctx,
   return img;
 }
 
-
-vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t             *ctx,
-                                                vpx_codec_put_frame_cb_fn_t  cb,
-                                                void                      *user_priv) {
+vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
+                                                vpx_codec_put_frame_cb_fn_t cb,
+                                                void *user_priv) {
   vpx_codec_err_t res;
 
   if (!ctx || !cb)
     res = VPX_CODEC_INVALID_PARAM;
-  else if (!ctx->iface || !ctx->priv
-           || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
+  else if (!ctx->iface || !ctx->priv ||
+           !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
     res = VPX_CODEC_ERROR;
   else {
     ctx->priv->dec.put_frame_cb.u.put_frame = cb;
@@ -158,16 +150,15 @@ vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t             *ctx
   return SAVE_STATUS(ctx, res);
 }
 
-
-vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t             *ctx,
-                                                vpx_codec_put_slice_cb_fn_t  cb,
-                                                void                      *user_priv) {
+vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
+                                                vpx_codec_put_slice_cb_fn_t cb,
+                                                void *user_priv) {
   vpx_codec_err_t res;
 
   if (!ctx || !cb)
     res = VPX_CODEC_INVALID_PARAM;
-  else if (!ctx->iface || !ctx->priv
-           || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_SLICE))
+  else if (!ctx->iface || !ctx->priv ||
+           !(ctx->iface->caps & VPX_CODEC_CAP_PUT_SLICE))
     res = VPX_CODEC_ERROR;
   else {
     ctx->priv->dec.put_slice_cb.u.put_slice = cb;
diff --git a/vpx/src/vpx_encoder.c b/vpx/src/vpx_encoder.c
index cd10c411ceaf5ec0e33e58da7bbbf61c4cc2f08e..4390cf7c8f1f5ecf8dd356b97a722a384f071113 100644
--- a/vpx/src/vpx_encoder.c
+++ b/vpx/src/vpx_encoder.c
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 /*!\file
  * \brief Provides the high level interface to wrap encoder algorithms.
  *
@@ -18,17 +17,16 @@
 #include "vpx_config.h"
 #include "vpx/internal/vpx_codec_internal.h"
 
-#define SAVE_STATUS(ctx,var) (ctx?(ctx->err = var):var)
+#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
 
 static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) {
   return (vpx_codec_alg_priv_t *)ctx->priv;
 }
 
-vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t      *ctx,
-                                       vpx_codec_iface_t    *iface,
+vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
+                                       vpx_codec_iface_t *iface,
                                        const vpx_codec_enc_cfg_t *cfg,
-                                       vpx_codec_flags_t     flags,
-                                       int                   ver) {
+                                       vpx_codec_flags_t flags, int ver) {
   vpx_codec_err_t res;
 
   if (ver != VPX_ENCODER_ABI_VERSION)
@@ -39,11 +37,10 @@ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t      *ctx,
     res = VPX_CODEC_ABI_MISMATCH;
   else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
     res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_PSNR)
-           && !(iface->caps & VPX_CODEC_CAP_PSNR))
+  else if ((flags & VPX_CODEC_USE_PSNR) && !(iface->caps & VPX_CODEC_CAP_PSNR))
     res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
-           && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
+  else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION) &&
+           !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
     res = VPX_CODEC_INCAPABLE;
   else {
     ctx->iface = iface;
@@ -62,13 +59,9 @@ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t      *ctx,
   return SAVE_STATUS(ctx, res);
 }
 
-vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t      *ctx,
-                                             vpx_codec_iface_t    *iface,
-                                             vpx_codec_enc_cfg_t  *cfg,
-                                             int                   num_enc,
-                                             vpx_codec_flags_t     flags,
-                                             vpx_rational_t       *dsf,
-                                             int                   ver) {
+vpx_codec_err_t vpx_codec_enc_init_multi_ver(
+    vpx_codec_ctx_t *ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *cfg,
+    int num_enc, vpx_codec_flags_t flags, vpx_rational_t *dsf, int ver) {
   vpx_codec_err_t res = VPX_CODEC_OK;
 
   if (ver != VPX_ENCODER_ABI_VERSION)
@@ -79,11 +72,10 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t      *ctx,
     res = VPX_CODEC_ABI_MISMATCH;
   else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
     res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_PSNR)
-           && !(iface->caps & VPX_CODEC_CAP_PSNR))
+  else if ((flags & VPX_CODEC_USE_PSNR) && !(iface->caps & VPX_CODEC_CAP_PSNR))
     res = VPX_CODEC_INCAPABLE;
-  else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
-           && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
+  else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION) &&
+           !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
     res = VPX_CODEC_INCAPABLE;
   else {
     int i;
@@ -110,8 +102,7 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t      *ctx,
          * resolution always use the same frame_type chosen by the
          * lowest-resolution encoder.
          */
-        if (mr_cfg.mr_encoder_id)
-          cfg->kf_mode = VPX_KF_DISABLED;
+        if (mr_cfg.mr_encoder_id) cfg->kf_mode = VPX_KF_DISABLED;
 
         ctx->iface = iface;
         ctx->name = iface->name;
@@ -121,8 +112,7 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t      *ctx,
         res = ctx->iface->init(ctx, &mr_cfg);
 
         if (res) {
-          const char *error_detail =
-            ctx->priv ? ctx->priv->err_detail : NULL;
+          const char *error_detail = ctx->priv ? ctx->priv->err_detail : NULL;
           /* Destroy current ctx */
           ctx->err_detail = error_detail;
           vpx_codec_destroy(ctx);
@@ -136,8 +126,7 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t      *ctx,
           }
         }
 
-        if (res)
-          break;
+        if (res) break;
 
         ctx++;
         cfg++;
@@ -150,10 +139,9 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t      *ctx,
   return SAVE_STATUS(ctx, res);
 }
 
-
-vpx_codec_err_t  vpx_codec_enc_config_default(vpx_codec_iface_t    *iface,
-                                              vpx_codec_enc_cfg_t  *cfg,
-                                              unsigned int          usage) {
+vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
+                                             vpx_codec_enc_cfg_t *cfg,
+                                             unsigned int usage) {
   vpx_codec_err_t res;
   vpx_codec_enc_cfg_map_t *map;
   int i;
@@ -179,30 +167,28 @@ vpx_codec_err_t  vpx_codec_enc_config_default(vpx_codec_iface_t    *iface,
   return res;
 }
 
-
 #if ARCH_X86 || ARCH_X86_64
 /* On X86, disable the x87 unit's internal 80 bit precision for better
  * consistency with the SSE unit's 64 bit precision.
  */
 #include "vpx_ports/x86.h"
-#define FLOATING_POINT_INIT() do {\
+#define FLOATING_POINT_INIT() \
+  do {                        \
     unsigned short x87_orig_mode = x87_set_double_precision();
-#define FLOATING_POINT_RESTORE() \
-  x87_set_control_word(x87_orig_mode); }while(0)
-
+#define FLOATING_POINT_RESTORE()       \
+  x87_set_control_word(x87_orig_mode); \
+  }                                    \
+  while (0)
 
 #else
 static void FLOATING_POINT_INIT() {}
 static void FLOATING_POINT_RESTORE() {}
 #endif
 
-
-vpx_codec_err_t  vpx_codec_encode(vpx_codec_ctx_t            *ctx,
-                                  const vpx_image_t          *img,
-                                  vpx_codec_pts_t             pts,
-                                  unsigned long               duration,
-                                  vpx_enc_frame_flags_t       flags,
-                                  unsigned long               deadline) {
+vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img,
+                                 vpx_codec_pts_t pts, unsigned long duration,
+                                 vpx_enc_frame_flags_t flags,
+                                 unsigned long deadline) {
   vpx_codec_err_t res = VPX_CODEC_OK;
 
   if (!ctx || (img && !duration))
@@ -220,8 +206,8 @@ vpx_codec_err_t  vpx_codec_encode(vpx_codec_ctx_t            *ctx,
     FLOATING_POINT_INIT();
 
     if (num_enc == 1)
-      res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts,
-                                   duration, flags, deadline);
+      res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts, duration, flags,
+                                   deadline);
     else {
       /* Multi-resolution encoding:
        * Encode multi-levels in reverse order. For example,
@@ -234,8 +220,8 @@ vpx_codec_err_t  vpx_codec_encode(vpx_codec_ctx_t            *ctx,
       if (img) img += num_enc - 1;
 
       for (i = num_enc - 1; i >= 0; i--) {
-        if ((res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts,
-                                          duration, flags, deadline)))
+        if ((res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts, duration,
+                                          flags, deadline)))
           break;
 
         ctx--;
@@ -250,7 +236,6 @@ vpx_codec_err_t  vpx_codec_encode(vpx_codec_ctx_t            *ctx,
   return SAVE_STATUS(ctx, res);
 }
 
-
 const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
                                                 vpx_codec_iter_t *iter) {
   const vpx_codec_cx_pkt_t *pkt = NULL;
@@ -273,18 +258,18 @@ const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
     vpx_codec_priv_t *const priv = ctx->priv;
     char *const dst_buf = (char *)priv->enc.cx_data_dst_buf.buf;
 
-    if (dst_buf &&
-        pkt->data.raw.buf != dst_buf &&
+    if (dst_buf && pkt->data.raw.buf != dst_buf &&
         pkt->data.raw.sz + priv->enc.cx_data_pad_before +
-            priv->enc.cx_data_pad_after <= priv->enc.cx_data_dst_buf.sz) {
+                priv->enc.cx_data_pad_after <=
+            priv->enc.cx_data_dst_buf.sz) {
       vpx_codec_cx_pkt_t *modified_pkt = &priv->enc.cx_data_pkt;
 
       memcpy(dst_buf + priv->enc.cx_data_pad_before, pkt->data.raw.buf,
              pkt->data.raw.sz);
       *modified_pkt = *pkt;
       modified_pkt->data.raw.buf = dst_buf;
-      modified_pkt->data.raw.sz += priv->enc.cx_data_pad_before +
-                                       priv->enc.cx_data_pad_after;
+      modified_pkt->data.raw.sz +=
+          priv->enc.cx_data_pad_before + priv->enc.cx_data_pad_after;
       pkt = modified_pkt;
     }
 
@@ -297,13 +282,11 @@ const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
   return pkt;
 }
 
-
-vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t       *ctx,
+vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
                                           const vpx_fixed_buf_t *buf,
-                                          unsigned int           pad_before,
-                                          unsigned int           pad_after) {
-  if (!ctx || !ctx->priv)
-    return VPX_CODEC_INVALID_PARAM;
+                                          unsigned int pad_before,
+                                          unsigned int pad_after) {
+  if (!ctx || !ctx->priv) return VPX_CODEC_INVALID_PARAM;
 
   if (buf) {
     ctx->priv->enc.cx_data_dst_buf = *buf;
@@ -319,8 +302,7 @@ vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t       *ctx,
   return VPX_CODEC_OK;
 }
 
-
-const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t   *ctx) {
+const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx) {
   vpx_image_t *img = NULL;
 
   if (ctx) {
@@ -337,8 +319,7 @@ const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t   *ctx) {
   return img;
 }
 
-
-vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t   *ctx) {
+vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx) {
   vpx_fixed_buf_t *buf = NULL;
 
   if (ctx) {
@@ -355,9 +336,8 @@ vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t   *ctx) {
   return buf;
 }
 
-
-vpx_codec_err_t  vpx_codec_enc_config_set(vpx_codec_ctx_t            *ctx,
-                                          const vpx_codec_enc_cfg_t  *cfg) {
+vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
+                                         const vpx_codec_enc_cfg_t *cfg) {
   vpx_codec_err_t res;
 
   if (!ctx || !ctx->iface || !ctx->priv || !cfg)
@@ -370,7 +350,6 @@ vpx_codec_err_t  vpx_codec_enc_config_set(vpx_codec_ctx_t            *ctx,
   return SAVE_STATUS(ctx, res);
 }
 
-
 int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *list,
                            const struct vpx_codec_cx_pkt *pkt) {
   if (list->cnt < list->max) {
@@ -381,9 +360,8 @@ int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *list,
   return 1;
 }
 
-
-const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get(struct vpx_codec_pkt_list *list,
-                                                 vpx_codec_iter_t           *iter) {
+const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get(
+    struct vpx_codec_pkt_list *list, vpx_codec_iter_t *iter) {
   const vpx_codec_cx_pkt_t *pkt;
 
   if (!(*iter)) {
diff --git a/vpx/src/vpx_image.c b/vpx/src/vpx_image.c
index 9aae12c794ba225ed78a74eb6c36108c431910ef..dba439c10a8490f82e1a7dc03f2f90f7f66309ca 100644
--- a/vpx/src/vpx_image.c
+++ b/vpx/src/vpx_image.c
@@ -15,10 +15,8 @@
 #include "vpx/vpx_integer.h"
 #include "vpx_mem/vpx_mem.h"
 
-static vpx_image_t *img_alloc_helper(vpx_image_t *img,
-                                     vpx_img_fmt_t fmt,
-                                     unsigned int d_w,
-                                     unsigned int d_h,
+static vpx_image_t *img_alloc_helper(vpx_image_t *img, vpx_img_fmt_t fmt,
+                                     unsigned int d_w, unsigned int d_h,
                                      unsigned int buf_align,
                                      unsigned int stride_align,
                                      unsigned char *img_data) {
@@ -27,68 +25,44 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
   int align;
 
   /* Treat align==0 like align==1 */
-  if (!buf_align)
-    buf_align = 1;
+  if (!buf_align) buf_align = 1;
 
   /* Validate alignment (must be power of 2) */
-  if (buf_align & (buf_align - 1))
-    goto fail;
+  if (buf_align & (buf_align - 1)) goto fail;
 
   /* Treat align==0 like align==1 */
-  if (!stride_align)
-    stride_align = 1;
+  if (!stride_align) stride_align = 1;
 
   /* Validate alignment (must be power of 2) */
-  if (stride_align & (stride_align - 1))
-    goto fail;
+  if (stride_align & (stride_align - 1)) goto fail;
 
   /* Get sample size for this format */
   switch (fmt) {
     case VPX_IMG_FMT_RGB32:
     case VPX_IMG_FMT_RGB32_LE:
     case VPX_IMG_FMT_ARGB:
-    case VPX_IMG_FMT_ARGB_LE:
-      bps = 32;
-      break;
+    case VPX_IMG_FMT_ARGB_LE: bps = 32; break;
     case VPX_IMG_FMT_RGB24:
-    case VPX_IMG_FMT_BGR24:
-      bps = 24;
-      break;
+    case VPX_IMG_FMT_BGR24: bps = 24; break;
     case VPX_IMG_FMT_RGB565:
     case VPX_IMG_FMT_RGB565_LE:
     case VPX_IMG_FMT_RGB555:
     case VPX_IMG_FMT_RGB555_LE:
     case VPX_IMG_FMT_UYVY:
     case VPX_IMG_FMT_YUY2:
-    case VPX_IMG_FMT_YVYU:
-      bps = 16;
-      break;
+    case VPX_IMG_FMT_YVYU: bps = 16; break;
     case VPX_IMG_FMT_I420:
     case VPX_IMG_FMT_YV12:
     case VPX_IMG_FMT_VPXI420:
-    case VPX_IMG_FMT_VPXYV12:
-      bps = 12;
-      break;
+    case VPX_IMG_FMT_VPXYV12: bps = 12; break;
     case VPX_IMG_FMT_I422:
-    case VPX_IMG_FMT_I440:
-      bps = 16;
-      break;
-    case VPX_IMG_FMT_I444:
-      bps = 24;
-      break;
-    case VPX_IMG_FMT_I42016:
-      bps = 24;
-      break;
+    case VPX_IMG_FMT_I440: bps = 16; break;
+    case VPX_IMG_FMT_I444: bps = 24; break;
+    case VPX_IMG_FMT_I42016: bps = 24; break;
     case VPX_IMG_FMT_I42216:
-    case VPX_IMG_FMT_I44016:
-      bps = 32;
-      break;
-    case VPX_IMG_FMT_I44416:
-      bps = 48;
-      break;
-    default:
-      bps = 16;
-      break;
+    case VPX_IMG_FMT_I44016: bps = 32; break;
+    case VPX_IMG_FMT_I44416: bps = 48; break;
+    default: bps = 16; break;
   }
 
   /* Get chroma shift values for this format */
@@ -99,12 +73,8 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
     case VPX_IMG_FMT_VPXYV12:
     case VPX_IMG_FMT_I422:
     case VPX_IMG_FMT_I42016:
-    case VPX_IMG_FMT_I42216:
-      xcs = 1;
-      break;
-    default:
-      xcs = 0;
-      break;
+    case VPX_IMG_FMT_I42216: xcs = 1; break;
+    default: xcs = 0; break;
   }
 
   switch (fmt) {
@@ -114,12 +84,8 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
     case VPX_IMG_FMT_VPXI420:
     case VPX_IMG_FMT_VPXYV12:
     case VPX_IMG_FMT_I42016:
-    case VPX_IMG_FMT_I44016:
-      ycs = 1;
-      break;
-    default:
-      ycs = 0;
-      break;
+    case VPX_IMG_FMT_I44016: ycs = 1; break;
+    default: ycs = 0; break;
   }
 
   /* Calculate storage sizes given the chroma subsampling */
@@ -135,8 +101,7 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
   if (!img) {
     img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
 
-    if (!img)
-      goto fail;
+    if (!img) goto fail;
 
     img->self_allocd = 1;
   } else {
@@ -146,18 +111,17 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
   img->img_data = img_data;
 
   if (!img_data) {
-    const uint64_t alloc_size = (fmt & VPX_IMG_FMT_PLANAR) ?
-                                (uint64_t)h * s * bps / 8 : (uint64_t)h * s;
+    const uint64_t alloc_size = (fmt & VPX_IMG_FMT_PLANAR)
+                                    ? (uint64_t)h * s * bps / 8
+                                    : (uint64_t)h * s;
 
-    if (alloc_size != (size_t)alloc_size)
-      goto fail;
+    if (alloc_size != (size_t)alloc_size) goto fail;
 
     img->img_data = (uint8_t *)vpx_memalign(buf_align, (size_t)alloc_size);
     img->img_data_owner = 1;
   }
 
-  if (!img->img_data)
-    goto fail;
+  if (!img->img_data) goto fail;
 
   img->fmt = fmt;
   img->bit_depth = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 16 : 8;
@@ -172,39 +136,30 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
   img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = stride_in_bytes >> xcs;
 
   /* Default viewport to entire image */
-  if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
-    return img;
+  if (!vpx_img_set_rect(img, 0, 0, d_w, d_h)) return img;
 
 fail:
   vpx_img_free(img);
   return NULL;
 }
 
-vpx_image_t *vpx_img_alloc(vpx_image_t  *img,
-                           vpx_img_fmt_t fmt,
-                           unsigned int  d_w,
-                           unsigned int  d_h,
-                           unsigned int  align) {
+vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt,
+                           unsigned int d_w, unsigned int d_h,
+                           unsigned int align) {
   return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
 }
 
-vpx_image_t *vpx_img_wrap(vpx_image_t  *img,
-                          vpx_img_fmt_t fmt,
-                          unsigned int  d_w,
-                          unsigned int  d_h,
-                          unsigned int  stride_align,
-                          unsigned char       *img_data) {
+vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w,
+                          unsigned int d_h, unsigned int stride_align,
+                          unsigned char *img_data) {
   /* By setting buf_align = 1, we don't change buffer alignment in this
    * function. */
   return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
 }
 
-int vpx_img_set_rect(vpx_image_t  *img,
-                     unsigned int  x,
-                     unsigned int  y,
-                     unsigned int  w,
-                     unsigned int  h) {
-  unsigned char      *data;
+int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y,
+                     unsigned int w, unsigned int h) {
+  unsigned char *data;
 
   if (x + w <= img->w && y + h <= img->h) {
     img->d_w = w;
@@ -213,7 +168,7 @@ int vpx_img_set_rect(vpx_image_t  *img,
     /* Calculate plane pointers */
     if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
       img->planes[VPX_PLANE_PACKED] =
-        img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
+          img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
     } else {
       const int bytes_per_sample =
           (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
@@ -225,8 +180,8 @@ int vpx_img_set_rect(vpx_image_t  *img,
         data += img->h * img->stride[VPX_PLANE_ALPHA];
       }
 
-      img->planes[VPX_PLANE_Y] = data + x * bytes_per_sample +
-          y * img->stride[VPX_PLANE_Y];
+      img->planes[VPX_PLANE_Y] =
+          data + x * bytes_per_sample + y * img->stride[VPX_PLANE_Y];
       data += img->h * img->stride[VPX_PLANE_Y];
 
       if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
@@ -262,24 +217,23 @@ void vpx_img_flip(vpx_image_t *img) {
   img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
   img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
 
-  img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
-                              * img->stride[VPX_PLANE_U];
+  img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
+                              img->stride[VPX_PLANE_U];
   img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
 
-  img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
-                              * img->stride[VPX_PLANE_V];
+  img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
+                              img->stride[VPX_PLANE_V];
   img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
 
-  img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
+  img->planes[VPX_PLANE_ALPHA] +=
+      (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
   img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
 }
 
 void vpx_img_free(vpx_image_t *img) {
   if (img) {
-    if (img->img_data && img->img_data_owner)
-      vpx_free(img->img_data);
+    if (img->img_data && img->img_data_owner) vpx_free(img->img_data);
 
-    if (img->self_allocd)
-      free(img);
+    if (img->self_allocd) free(img);
   }
 }
diff --git a/vpx/svc_context.h b/vpx/svc_context.h
index 5bc25189bac9ab0fcd322b128c8f3b14b67b46aa..c8bde5832a5b17d0b6406d4a9ffc99b51f04eb95 100644
--- a/vpx/svc_context.h
+++ b/vpx/svc_context.h
@@ -31,14 +31,14 @@ typedef enum SVC_LOG_LEVEL {
 
 typedef struct {
   // public interface to svc_command options
-  int spatial_layers;               // number of spatial layers
-  int temporal_layers;               // number of temporal layers
+  int spatial_layers;   // number of spatial layers
+  int temporal_layers;  // number of temporal layers
   int temporal_layering_mode;
   SVC_LOG_LEVEL log_level;  // amount of information to display
-  int log_print;  // when set, printf log messages instead of returning the
-                  // message with svc_get_message
+  int log_print;       // when set, printf log messages instead of returning the
+                       // message with svc_get_message
   int output_rc_stat;  // for outputting rc stats
-  int speed;  // speed setting for codec
+  int speed;           // speed setting for codec
   int threads;
   int aqmode;  // turns on aq-mode=3 (cyclic_refresh): 0=off, 1=on.
   // private storage for vpx_svc_encode
@@ -49,7 +49,7 @@ typedef struct {
 #define COMPONENTS 4  // psnr & sse statistics maintained for total, y, u, v
 
 typedef struct SvcInternal {
-  char options[OPTION_BUFFER_SIZE];        // set by vpx_svc_set_options
+  char options[OPTION_BUFFER_SIZE];  // set by vpx_svc_set_options
 
   // values extracted from option, quantizers
   vpx_svc_extra_cfg_t svc_params;
@@ -57,7 +57,7 @@ typedef struct SvcInternal {
   int bitrates[VPX_SS_MAX_LAYERS];
 
   // accumulated statistics
-  double psnr_sum[VPX_SS_MAX_LAYERS][COMPONENTS];   // total/Y/U/V
+  double psnr_sum[VPX_SS_MAX_LAYERS][COMPONENTS];  // total/Y/U/V
   uint64_t sse_sum[VPX_SS_MAX_LAYERS][COMPONENTS];
   uint32_t bytes_sum[VPX_SS_MAX_LAYERS];
 
@@ -88,17 +88,14 @@ vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, const char *options);
 /**
  * initialize SVC encoding
  */
-vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx,
-                             vpx_codec_ctx_t *codec_ctx,
+vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
                              vpx_codec_iface_t *iface,
                              vpx_codec_enc_cfg_t *cfg);
 /**
  * encode a frame of video with multiple layers
  */
-vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx,
-                               vpx_codec_ctx_t *codec_ctx,
-                               struct vpx_image *rawimg,
-                               vpx_codec_pts_t pts,
+vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
+                               struct vpx_image *rawimg, vpx_codec_pts_t pts,
                                int64_t duration, int deadline);
 
 /**
diff --git a/vpx/vp8.h b/vpx/vp8.h
index 8a035f9770735d21c0e7dc3aeda0b40262015139..1cc3d5f8d60ac26f5aadc007d7b7c70cc672fc4d 100644
--- a/vpx/vp8.h
+++ b/vpx/vp8.h
@@ -42,21 +42,24 @@ extern "C" {
  * The set of macros define the control functions of VP8 interface
  */
 enum vp8_com_control_id {
-  VP8_SET_REFERENCE           = 1,    /**< pass in an external frame into decoder to be used as reference frame */
-  VP8_COPY_REFERENCE          = 2,    /**< get a copy of reference frame from the decoder */
-  VP8_SET_POSTPROC            = 3,    /**< set the decoder's post processing settings  */
-  VP8_SET_DBG_COLOR_REF_FRAME = 4,    /**< set the reference frames to color for each macroblock */
-  VP8_SET_DBG_COLOR_MB_MODES  = 5,    /**< set which macro block modes to color */
-  VP8_SET_DBG_COLOR_B_MODES   = 6,    /**< set which blocks modes to color */
-  VP8_SET_DBG_DISPLAY_MV      = 7,    /**< set which motion vector modes to draw */
+  VP8_SET_REFERENCE =
+      1, /**< pass in an external frame into decoder to be used as reference
+            frame */
+  VP8_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */
+  VP8_SET_POSTPROC = 3,   /**< set the decoder's post processing settings  */
+  VP8_SET_DBG_COLOR_REF_FRAME =
+      4, /**< set the reference frames to color for each macroblock */
+  VP8_SET_DBG_COLOR_MB_MODES = 5, /**< set which macro block modes to color */
+  VP8_SET_DBG_COLOR_B_MODES = 6,  /**< set which blocks modes to color */
+  VP8_SET_DBG_DISPLAY_MV = 7,     /**< set which motion vector modes to draw */
 
   /* TODO(jkoleszar): The encoder incorrectly reuses some of these values (5+)
    * for its control ids. These should be migrated to something like the
    * VP8_DECODER_CTRL_ID_START range next time we're ready to break the ABI.
    */
-  VP9_GET_REFERENCE           = 128,  /**< get a pointer to a reference frame */
+  VP9_GET_REFERENCE = 128, /**< get a pointer to a reference frame */
   VP8_COMMON_CTRL_ID_MAX,
-  VP8_DECODER_CTRL_ID_START   = 256
+  VP8_DECODER_CTRL_ID_START = 256
 };
 
 /*!\brief post process flags
@@ -64,15 +67,16 @@ enum vp8_com_control_id {
  * The set of macros define VP8 decoder post processing flags
  */
 enum vp8_postproc_level {
-  VP8_NOFILTERING             = 0,
-  VP8_DEBLOCK                 = 1 << 0,
-  VP8_DEMACROBLOCK            = 1 << 1,
-  VP8_ADDNOISE                = 1 << 2,
-  VP8_DEBUG_TXT_FRAME_INFO    = 1 << 3, /**< print frame information */
-  VP8_DEBUG_TXT_MBLK_MODES    = 1 << 4, /**< print macro block modes over each macro block */
-  VP8_DEBUG_TXT_DC_DIFF       = 1 << 5, /**< print dc diff for each macro block */
-  VP8_DEBUG_TXT_RATE_INFO     = 1 << 6, /**< print video rate info (encoder only) */
-  VP8_MFQE                    = 1 << 10
+  VP8_NOFILTERING = 0,
+  VP8_DEBLOCK = 1 << 0,
+  VP8_DEMACROBLOCK = 1 << 1,
+  VP8_ADDNOISE = 1 << 2,
+  VP8_DEBUG_TXT_FRAME_INFO = 1 << 3, /**< print frame information */
+  VP8_DEBUG_TXT_MBLK_MODES =
+      1 << 4, /**< print macro block modes over each macro block */
+  VP8_DEBUG_TXT_DC_DIFF = 1 << 5,   /**< print dc diff for each macro block */
+  VP8_DEBUG_TXT_RATE_INFO = 1 << 6, /**< print video rate info (encoder only) */
+  VP8_MFQE = 1 << 10
 };
 
 /*!\brief post process flags
@@ -83,9 +87,10 @@ enum vp8_postproc_level {
  */
 
 typedef struct vp8_postproc_cfg {
-  int post_proc_flag;         /**< the types of post processing to be done, should be combination of "vp8_postproc_level" */
-  int deblocking_level;       /**< the strength of deblocking, valid range [0, 16] */
-  int noise_level;            /**< the strength of additive noise, valid range [0, 16] */
+  int post_proc_flag;   /**< the types of post processing to be done, should be
+                           combination of "vp8_postproc_level" */
+  int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */
+  int noise_level; /**< the strength of additive noise, valid range [0, 16] */
 } vp8_postproc_cfg_t;
 
 /*!\brief reference frame type
@@ -103,8 +108,8 @@ typedef enum vpx_ref_frame_type {
  * Define the data struct to access vp8 reference frames.
  */
 typedef struct vpx_ref_frame {
-  vpx_ref_frame_type_t  frame_type;   /**< which reference frame */
-  vpx_image_t           img;          /**< reference frame data in image format */
+  vpx_ref_frame_type_t frame_type; /**< which reference frame */
+  vpx_image_t img;                 /**< reference frame data in image format */
 } vpx_ref_frame_t;
 
 /*!\brief VP9 specific reference frame data struct
@@ -112,8 +117,8 @@ typedef struct vpx_ref_frame {
  * Define the data struct to access vp9 reference frames.
  */
 typedef struct vp9_ref_frame {
-  int idx; /**< frame index to get (input) */
-  vpx_image_t  img; /**< img structure to populate (output) */
+  int idx;         /**< frame index to get (input) */
+  vpx_image_t img; /**< img structure to populate (output) */
 } vp9_ref_frame_t;
 
 /*!\cond */
@@ -121,21 +126,21 @@ typedef struct vp9_ref_frame {
  *
  * defines the data type for each of VP8 decoder control function requires
  */
-VPX_CTRL_USE_TYPE(VP8_SET_REFERENCE,           vpx_ref_frame_t *)
+VPX_CTRL_USE_TYPE(VP8_SET_REFERENCE, vpx_ref_frame_t *)
 #define VPX_CTRL_VP8_SET_REFERENCE
-VPX_CTRL_USE_TYPE(VP8_COPY_REFERENCE,          vpx_ref_frame_t *)
+VPX_CTRL_USE_TYPE(VP8_COPY_REFERENCE, vpx_ref_frame_t *)
 #define VPX_CTRL_VP8_COPY_REFERENCE
-VPX_CTRL_USE_TYPE(VP8_SET_POSTPROC,            vp8_postproc_cfg_t *)
+VPX_CTRL_USE_TYPE(VP8_SET_POSTPROC, vp8_postproc_cfg_t *)
 #define VPX_CTRL_VP8_SET_POSTPROC
 VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_REF_FRAME, int)
 #define VPX_CTRL_VP8_SET_DBG_COLOR_REF_FRAME
-VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_MB_MODES,  int)
+VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_MB_MODES, int)
 #define VPX_CTRL_VP8_SET_DBG_COLOR_MB_MODES
-VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_B_MODES,   int)
+VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_B_MODES, int)
 #define VPX_CTRL_VP8_SET_DBG_COLOR_B_MODES
-VPX_CTRL_USE_TYPE(VP8_SET_DBG_DISPLAY_MV,      int)
+VPX_CTRL_USE_TYPE(VP8_SET_DBG_DISPLAY_MV, int)
 #define VPX_CTRL_VP8_SET_DBG_DISPLAY_MV
-VPX_CTRL_USE_TYPE(VP9_GET_REFERENCE,           vp9_ref_frame_t *)
+VPX_CTRL_USE_TYPE(VP9_GET_REFERENCE, vp9_ref_frame_t *)
 #define VPX_CTRL_VP9_GET_REFERENCE
 
 /*!\endcond */
diff --git a/vpx/vp8cx.h b/vpx/vp8cx.h
index 3344307bdff1bbb2a643651442ff7709027717d2..835ea78dee329d8df94cccdbb8098bcb043681e8 100644
--- a/vpx/vp8cx.h
+++ b/vpx/vp8cx.h
@@ -32,7 +32,7 @@ extern "C" {
  * This interface provides the capability to encode raw VP10 streams.
  * @{
  */
-extern vpx_codec_iface_t  vpx_codec_vp10_cx_algo;
+extern vpx_codec_iface_t vpx_codec_vp10_cx_algo;
 extern vpx_codec_iface_t *vpx_codec_vp10_cx(void);
 /*!@} - end algorithm interface member group*/
 
@@ -46,8 +46,7 @@ extern vpx_codec_iface_t *vpx_codec_vp10_cx(void);
  * predictor. When not set, the encoder will choose whether to use the
  * last frame or not automatically.
  */
-#define VP8_EFLAG_NO_REF_LAST      (1<<16)
-
+#define VP8_EFLAG_NO_REF_LAST (1 << 16)
 
 /*!\brief Don't reference the golden frame
  *
@@ -55,8 +54,7 @@ extern vpx_codec_iface_t *vpx_codec_vp10_cx(void);
  * predictor. When not set, the encoder will choose whether to use the
  * golden frame or not automatically.
  */
-#define VP8_EFLAG_NO_REF_GF        (1<<17)
-
+#define VP8_EFLAG_NO_REF_GF (1 << 17)
 
 /*!\brief Don't reference the alternate reference frame
  *
@@ -64,56 +62,49 @@ extern vpx_codec_iface_t *vpx_codec_vp10_cx(void);
  * predictor. When not set, the encoder will choose whether to use the
  * alt ref frame or not automatically.
  */
-#define VP8_EFLAG_NO_REF_ARF       (1<<21)
-
+#define VP8_EFLAG_NO_REF_ARF (1 << 21)
 
 /*!\brief Don't update the last frame
  *
  * When this flag is set, the encoder will not update the last frame with
  * the contents of the current frame.
  */
-#define VP8_EFLAG_NO_UPD_LAST      (1<<18)
-
+#define VP8_EFLAG_NO_UPD_LAST (1 << 18)
 
 /*!\brief Don't update the golden frame
  *
  * When this flag is set, the encoder will not update the golden frame with
  * the contents of the current frame.
  */
-#define VP8_EFLAG_NO_UPD_GF        (1<<22)
-
+#define VP8_EFLAG_NO_UPD_GF (1 << 22)
 
 /*!\brief Don't update the alternate reference frame
  *
  * When this flag is set, the encoder will not update the alt ref frame with
  * the contents of the current frame.
  */
-#define VP8_EFLAG_NO_UPD_ARF       (1<<23)
-
+#define VP8_EFLAG_NO_UPD_ARF (1 << 23)
 
 /*!\brief Force golden frame update
  *
  * When this flag is set, the encoder copy the contents of the current frame
  * to the golden frame buffer.
  */
-#define VP8_EFLAG_FORCE_GF         (1<<19)
-
+#define VP8_EFLAG_FORCE_GF (1 << 19)
 
 /*!\brief Force alternate reference frame update
  *
  * When this flag is set, the encoder copy the contents of the current frame
  * to the alternate reference frame buffer.
  */
-#define VP8_EFLAG_FORCE_ARF        (1<<24)
-
+#define VP8_EFLAG_FORCE_ARF (1 << 24)
 
 /*!\brief Disable entropy update
  *
  * When this flag is set, the encoder will not update its internal entropy
  * model based on the entropy of this frame.
  */
-#define VP8_EFLAG_NO_UPD_ENTROPY   (1<<20)
-
+#define VP8_EFLAG_NO_UPD_ENTROPY (1 << 20)
 
 /*!\brief VPx encoder control functions
  *
@@ -127,7 +118,7 @@ enum vp8e_enc_control_id {
    *
    * Supported in codecs: VP8, VP9
    */
-  VP8E_SET_ROI_MAP           = 8,
+  VP8E_SET_ROI_MAP = 8,
 
   /*!\brief Codec control function to pass an Active map to encoder.
    *
@@ -139,7 +130,7 @@ enum vp8e_enc_control_id {
    *
    * Supported in codecs: VP8, VP9
    */
-  VP8E_SET_SCALEMODE         = 11,
+  VP8E_SET_SCALEMODE = 11,
 
   /*!\brief Codec control function to set encoder internal speed settings.
    *
@@ -152,7 +143,7 @@ enum vp8e_enc_control_id {
    *
    * Supported in codecs: VP8, VP9
    */
-  VP8E_SET_CPUUSED           = 13,
+  VP8E_SET_CPUUSED = 13,
 
   /*!\brief Codec control function to enable automatic set and use alf frames.
    *
@@ -480,7 +471,8 @@ enum vp8e_enc_control_id {
   VP9E_SET_COLOR_SPACE,
 
   /*!\brief Codec control function to set temporal layering mode.
-   * \note Valid ranges: 0..3, default is "0" (VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING).
+   * \note Valid ranges: 0..3, default is "0"
+   * (VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING).
    *                     0 = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING
    *                     1 = VP9E_TEMPORAL_LAYERING_MODE_BYPASS
    *                     2 = VP9E_TEMPORAL_LAYERING_MODE_0101
@@ -543,10 +535,10 @@ enum vp8e_enc_control_id {
  * This set of constants define 1-D vpx scaling modes
  */
 typedef enum vpx_scaling_mode_1d {
-  VP8E_NORMAL      = 0,
-  VP8E_FOURFIVE    = 1,
-  VP8E_THREEFIVE   = 2,
-  VP8E_ONETWO      = 3
+  VP8E_NORMAL = 0,
+  VP8E_FOURFIVE = 1,
+  VP8E_THREEFIVE = 2,
+  VP8E_ONETWO = 3
 } VPX_SCALING_MODE;
 
 /*!\brief Temporal layering mode enum for VP9 SVC.
@@ -559,21 +551,21 @@ typedef enum vp9e_temporal_layering_mode {
   /*!\brief No temporal layering.
    * Used when only spatial layering is used.
    */
-  VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING   = 0,
+  VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING = 0,
 
   /*!\brief Bypass mode.
    * Used when application needs to control temporal layering.
    * This will only work when the number of spatial layers equals 1.
    */
-  VP9E_TEMPORAL_LAYERING_MODE_BYPASS       = 1,
+  VP9E_TEMPORAL_LAYERING_MODE_BYPASS = 1,
 
   /*!\brief 0-1-0-1... temporal layering scheme with two temporal layers.
    */
-  VP9E_TEMPORAL_LAYERING_MODE_0101         = 2,
+  VP9E_TEMPORAL_LAYERING_MODE_0101 = 2,
 
   /*!\brief 0-2-1-2... temporal layering scheme with three temporal layers.
    */
-  VP9E_TEMPORAL_LAYERING_MODE_0212         = 3
+  VP9E_TEMPORAL_LAYERING_MODE_0212 = 3
 } VP9E_TEMPORAL_LAYERING_MODE;
 
 /*!\brief  vpx region of interest map
@@ -585,13 +577,13 @@ typedef enum vp9e_temporal_layering_mode {
 typedef struct vpx_roi_map {
   /*! An id between 0 and 3 for each 16x16 region within a frame. */
   unsigned char *roi_map;
-  unsigned int rows;       /**< Number of rows. */
-  unsigned int cols;       /**< Number of columns. */
+  unsigned int rows; /**< Number of rows. */
+  unsigned int cols; /**< Number of columns. */
   // TODO(paulwilkins): broken for VP9 which has 8 segments
   // q and loop filter deltas for each segment
   // (see MAX_MB_SEGMENTS)
-  int delta_q[4];          /**< Quantizer deltas. */
-  int delta_lf[4];         /**< Loop filter deltas. */
+  int delta_q[4];  /**< Quantizer deltas. */
+  int delta_lf[4]; /**< Loop filter deltas. */
   /*! Static breakout threshold for each segment. */
   unsigned int static_threshold[4];
 } vpx_roi_map_t;
@@ -602,11 +594,12 @@ typedef struct vpx_roi_map {
  *
  */
 
-
 typedef struct vpx_active_map {
-  unsigned char  *active_map; /**< specify an on (1) or off (0) each 16x16 region within a frame */
-  unsigned int    rows;       /**< number of rows */
-  unsigned int    cols;       /**< number of cols */
+  unsigned char
+      *active_map; /**< specify an on (1) or off (0) each 16x16 region within a
+                      frame */
+  unsigned int rows; /**< number of rows */
+  unsigned int cols; /**< number of cols */
 } vpx_active_map_t;
 
 /*!\brief  vpx image scaling mode
@@ -615,8 +608,8 @@ typedef struct vpx_active_map {
  *
  */
 typedef struct vpx_scaling_mode {
-  VPX_SCALING_MODE    h_scaling_mode;  /**< horizontal scaling mode */
-  VPX_SCALING_MODE    v_scaling_mode;  /**< vertical scaling mode   */
+  VPX_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */
+  VPX_SCALING_MODE v_scaling_mode; /**< vertical scaling mode   */
 } vpx_scaling_mode_t;
 
 /*!\brief VP8 token partition mode
@@ -627,9 +620,9 @@ typedef struct vpx_scaling_mode {
  */
 
 typedef enum {
-  VP8_ONE_TOKENPARTITION   = 0,
-  VP8_TWO_TOKENPARTITION   = 1,
-  VP8_FOUR_TOKENPARTITION  = 2,
+  VP8_ONE_TOKENPARTITION = 0,
+  VP8_TWO_TOKENPARTITION = 1,
+  VP8_FOUR_TOKENPARTITION = 2,
   VP8_EIGHT_TOKENPARTITION = 3
 } vp8e_token_partitions;
 
@@ -645,10 +638,7 @@ typedef enum {
  * Changes the encoder to tune for certain types of input material.
  *
  */
-typedef enum {
-  VPX_TUNE_PSNR,
-  VPX_TUNE_SSIM
-} vpx_tune_metric;
+typedef enum { VPX_TUNE_PSNR, VPX_TUNE_SSIM } vpx_tune_metric;
 
 /*!\brief  vp9 svc layer parameters
  *
@@ -658,8 +648,8 @@ typedef enum {
  *
  */
 typedef struct vpx_svc_layer_id {
-  int spatial_layer_id;       /**< Spatial layer id number. */
-  int temporal_layer_id;      /**< Temporal layer id number. */
+  int spatial_layer_id;  /**< Spatial layer id number. */
+  int temporal_layer_id; /**< Temporal layer id number. */
 } vpx_svc_layer_id_t;
 
 /*!\brief  vp9 svc frame flag parameters.
@@ -671,7 +661,7 @@ typedef struct vpx_svc_layer_id {
  *
  */
 typedef struct vpx_svc_ref_frame_config {
-  int frame_flags[VPX_TS_MAX_LAYERS];  /**< Frame flags. */
+  int frame_flags[VPX_TS_MAX_LAYERS]; /**< Frame flags. */
   int lst_fb_idx[VPX_TS_MAX_LAYERS];  /**< Last buffer index. */
   int gld_fb_idx[VPX_TS_MAX_LAYERS];  /**< Golden buffer index. */
   int alt_fb_idx[VPX_TS_MAX_LAYERS];  /**< Altref buffer index. */
@@ -685,60 +675,60 @@ typedef struct vpx_svc_ref_frame_config {
  *
  */
 
-VPX_CTRL_USE_TYPE(VP8E_SET_FRAME_FLAGS,        int)
+VPX_CTRL_USE_TYPE(VP8E_SET_FRAME_FLAGS, int)
 #define VPX_CTRL_VP8E_SET_FRAME_FLAGS
-VPX_CTRL_USE_TYPE(VP8E_SET_TEMPORAL_LAYER_ID,  int)
+VPX_CTRL_USE_TYPE(VP8E_SET_TEMPORAL_LAYER_ID, int)
 #define VPX_CTRL_VP8E_SET_TEMPORAL_LAYER_ID
-VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP,            vpx_roi_map_t *)
+VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP, vpx_roi_map_t *)
 #define VPX_CTRL_VP8E_SET_ROI_MAP
-VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP,          vpx_active_map_t *)
+VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP, vpx_active_map_t *)
 #define VPX_CTRL_VP8E_SET_ACTIVEMAP
-VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE,          vpx_scaling_mode_t *)
+VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE, vpx_scaling_mode_t *)
 #define VPX_CTRL_VP8E_SET_SCALEMODE
 
-VPX_CTRL_USE_TYPE(VP9E_SET_SVC,                int)
+VPX_CTRL_USE_TYPE(VP9E_SET_SVC, int)
 #define VPX_CTRL_VP9E_SET_SVC
-VPX_CTRL_USE_TYPE(VP9E_SET_SVC_PARAMETERS,     void *)
+VPX_CTRL_USE_TYPE(VP9E_SET_SVC_PARAMETERS, void *)
 #define VPX_CTRL_VP9E_SET_SVC_PARAMETERS
-VPX_CTRL_USE_TYPE(VP9E_REGISTER_CX_CALLBACK,   void *)
+VPX_CTRL_USE_TYPE(VP9E_REGISTER_CX_CALLBACK, void *)
 #define VPX_CTRL_VP9E_REGISTER_CX_CALLBACK
-VPX_CTRL_USE_TYPE(VP9E_SET_SVC_LAYER_ID,       vpx_svc_layer_id_t *)
+VPX_CTRL_USE_TYPE(VP9E_SET_SVC_LAYER_ID, vpx_svc_layer_id_t *)
 #define VPX_CTRL_VP9E_SET_SVC_LAYER_ID
 
-VPX_CTRL_USE_TYPE(VP8E_SET_CPUUSED,            int)
+VPX_CTRL_USE_TYPE(VP8E_SET_CPUUSED, int)
 #define VPX_CTRL_VP8E_SET_CPUUSED
-VPX_CTRL_USE_TYPE(VP8E_SET_ENABLEAUTOALTREF,   unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_ENABLEAUTOALTREF, unsigned int)
 #define VPX_CTRL_VP8E_SET_ENABLEAUTOALTREF
-VPX_CTRL_USE_TYPE(VP8E_SET_NOISE_SENSITIVITY,  unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_NOISE_SENSITIVITY, unsigned int)
 #define VPX_CTRL_VP8E_SET_NOISE_SENSITIVITY
-VPX_CTRL_USE_TYPE(VP8E_SET_SHARPNESS,          unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_SHARPNESS, unsigned int)
 #define VPX_CTRL_VP8E_SET_SHARPNESS
-VPX_CTRL_USE_TYPE(VP8E_SET_STATIC_THRESHOLD,   unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_STATIC_THRESHOLD, unsigned int)
 #define VPX_CTRL_VP8E_SET_STATIC_THRESHOLD
-VPX_CTRL_USE_TYPE(VP8E_SET_TOKEN_PARTITIONS,   int) /* vp8e_token_partitions */
+VPX_CTRL_USE_TYPE(VP8E_SET_TOKEN_PARTITIONS, int) /* vp8e_token_partitions */
 #define VPX_CTRL_VP8E_SET_TOKEN_PARTITIONS
 
-VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_MAXFRAMES,     unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_MAXFRAMES, unsigned int)
 #define VPX_CTRL_VP8E_SET_ARNR_MAXFRAMES
-VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH,     unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH, unsigned int)
 #define VPX_CTRL_VP8E_SET_ARNR_STRENGTH
-VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_SET_ARNR_TYPE,     unsigned int)
+VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_SET_ARNR_TYPE, unsigned int)
 #define VPX_CTRL_VP8E_SET_ARNR_TYPE
-VPX_CTRL_USE_TYPE(VP8E_SET_TUNING,             int) /* vpx_tune_metric */
+VPX_CTRL_USE_TYPE(VP8E_SET_TUNING, int) /* vpx_tune_metric */
 #define VPX_CTRL_VP8E_SET_TUNING
-VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL,      unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL, unsigned int)
 #define VPX_CTRL_VP8E_SET_CQ_LEVEL
 
-VPX_CTRL_USE_TYPE(VP9E_SET_TILE_COLUMNS,  int)
+VPX_CTRL_USE_TYPE(VP9E_SET_TILE_COLUMNS, int)
 #define VPX_CTRL_VP9E_SET_TILE_COLUMNS
-VPX_CTRL_USE_TYPE(VP9E_SET_TILE_ROWS,  int)
+VPX_CTRL_USE_TYPE(VP9E_SET_TILE_ROWS, int)
 #define VPX_CTRL_VP9E_SET_TILE_ROWS
 
-VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER,     int *)
+VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER, int *)
 #define VPX_CTRL_VP8E_GET_LAST_QUANTIZER
-VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER_64,  int *)
+VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER_64, int *)
 #define VPX_CTRL_VP8E_GET_LAST_QUANTIZER_64
-VPX_CTRL_USE_TYPE(VP9E_GET_SVC_LAYER_ID,  vpx_svc_layer_id_t *)
+VPX_CTRL_USE_TYPE(VP9E_GET_SVC_LAYER_ID, vpx_svc_layer_id_t *)
 #define VPX_CTRL_VP9E_GET_SVC_LAYER_ID
 
 VPX_CTRL_USE_TYPE(VP8E_SET_MAX_INTRA_BITRATE_PCT, unsigned int)
@@ -764,7 +754,7 @@ VPX_CTRL_USE_TYPE(VP9E_SET_AQ_MODE, unsigned int)
 VPX_CTRL_USE_TYPE(VP9E_SET_FRAME_PERIODIC_BOOST, unsigned int)
 #define VPX_CTRL_VP9E_SET_FRAME_PERIODIC_BOOST
 
-VPX_CTRL_USE_TYPE(VP9E_SET_NOISE_SENSITIVITY,  unsigned int)
+VPX_CTRL_USE_TYPE(VP9E_SET_NOISE_SENSITIVITY, unsigned int)
 #define VPX_CTRL_VP9E_SET_NOISE_SENSITIVITY
 
 VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* vpx_tune_content */
@@ -773,10 +763,10 @@ VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* vpx_tune_content */
 VPX_CTRL_USE_TYPE(VP9E_SET_COLOR_SPACE, int)
 #define VPX_CTRL_VP9E_SET_COLOR_SPACE
 
-VPX_CTRL_USE_TYPE(VP9E_SET_MIN_GF_INTERVAL,  unsigned int)
+VPX_CTRL_USE_TYPE(VP9E_SET_MIN_GF_INTERVAL, unsigned int)
 #define VPX_CTRL_VP9E_SET_MIN_GF_INTERVAL
 
-VPX_CTRL_USE_TYPE(VP9E_SET_MAX_GF_INTERVAL,  unsigned int)
+VPX_CTRL_USE_TYPE(VP9E_SET_MAX_GF_INTERVAL, unsigned int)
 #define VPX_CTRL_VP9E_SET_MAX_GF_INTERVAL
 
 VPX_CTRL_USE_TYPE(VP9E_GET_ACTIVEMAP, vpx_active_map_t *)
diff --git a/vpx/vp8dx.h b/vpx/vp8dx.h
index 4c6c19de2af33ab7d8439a5e2b53ef0881667508..fd5195a7b30994e6239246475e07c50dadc0e1b1 100644
--- a/vpx/vp8dx.h
+++ b/vpx/vp8dx.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 /*!\defgroup vp8_decoder WebM VP8/VP9 Decoder
  * \ingroup vp8
  *
@@ -33,7 +32,7 @@ extern "C" {
  * This interface provides the capability to decode VP10 streams.
  * @{
  */
-extern vpx_codec_iface_t  vpx_codec_vp10_dx_algo;
+extern vpx_codec_iface_t vpx_codec_vp10_dx_algo;
 extern vpx_codec_iface_t *vpx_codec_vp10_dx(void);
 /*!@} - end algorithm interface member group*/
 
@@ -117,18 +116,17 @@ typedef void (*vpx_decrypt_cb)(void *decrypt_state, const unsigned char *input,
  * Defines a structure to hold the decryption state and access function.
  */
 typedef struct vpx_decrypt_init {
-    /*! Decrypt callback. */
-    vpx_decrypt_cb decrypt_cb;
+  /*! Decrypt callback. */
+  vpx_decrypt_cb decrypt_cb;
 
-    /*! Decryption state. */
-    void *decrypt_state;
+  /*! Decryption state. */
+  void *decrypt_state;
 } vpx_decrypt_init;
 
 /*!\brief A deprecated alias for vpx_decrypt_init.
  */
 typedef vpx_decrypt_init vp8_decrypt_init;
 
-
 /*!\cond */
 /*!\brief VP8 decoder control function parameter type
  *
@@ -137,22 +135,21 @@ typedef vpx_decrypt_init vp8_decrypt_init;
  *
  */
 
-
-VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_UPDATES,    int *)
+VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_UPDATES, int *)
 #define VPX_CTRL_VP8D_GET_LAST_REF_UPDATES
-VPX_CTRL_USE_TYPE(VP8D_GET_FRAME_CORRUPTED,     int *)
+VPX_CTRL_USE_TYPE(VP8D_GET_FRAME_CORRUPTED, int *)
 #define VPX_CTRL_VP8D_GET_FRAME_CORRUPTED
-VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_USED,       int *)
+VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_USED, int *)
 #define VPX_CTRL_VP8D_GET_LAST_REF_USED
-VPX_CTRL_USE_TYPE(VPXD_SET_DECRYPTOR,           vpx_decrypt_init *)
+VPX_CTRL_USE_TYPE(VPXD_SET_DECRYPTOR, vpx_decrypt_init *)
 #define VPX_CTRL_VPXD_SET_DECRYPTOR
-VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR,           vpx_decrypt_init *)
+VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR, vpx_decrypt_init *)
 #define VPX_CTRL_VP8D_SET_DECRYPTOR
-VPX_CTRL_USE_TYPE(VP9D_GET_DISPLAY_SIZE,        int *)
+VPX_CTRL_USE_TYPE(VP9D_GET_DISPLAY_SIZE, int *)
 #define VPX_CTRL_VP9D_GET_DISPLAY_SIZE
-VPX_CTRL_USE_TYPE(VP9D_GET_BIT_DEPTH,           unsigned int *)
+VPX_CTRL_USE_TYPE(VP9D_GET_BIT_DEPTH, unsigned int *)
 #define VPX_CTRL_VP9D_GET_BIT_DEPTH
-VPX_CTRL_USE_TYPE(VP9D_GET_FRAME_SIZE,          int *)
+VPX_CTRL_USE_TYPE(VP9D_GET_FRAME_SIZE, int *)
 #define VPX_CTRL_VP9D_GET_FRAME_SIZE
 VPX_CTRL_USE_TYPE(VP9_INVERT_TILE_DECODE_ORDER, int)
 #define VPX_CTRL_VP9_INVERT_TILE_DECODE_ORDER
diff --git a/vpx/vpx_codec.h b/vpx/vpx_codec.h
index b6037bb4d7a91a46c0aaf31448a5130fc31a7401..a8d4e54e7c054ec7988112745edf380208d43a1c 100644
--- a/vpx/vpx_codec.h
+++ b/vpx/vpx_codec.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 /*!\defgroup codec Common Algorithm Interface
  * This abstraction allows applications to easily support multiple video
  * formats with minimal code duplication. This section describes the interface
@@ -46,434 +45,419 @@ extern "C" {
 #include "./vpx_integer.h"
 #include "./vpx_image.h"
 
-  /*!\brief Decorator indicating a function is deprecated */
+/*!\brief Decorator indicating a function is deprecated */
 #ifndef DEPRECATED
 #if defined(__GNUC__) && __GNUC__
-#define DEPRECATED          __attribute__ ((deprecated))
+#define DEPRECATED __attribute__((deprecated))
 #elif defined(_MSC_VER)
 #define DEPRECATED
 #else
 #define DEPRECATED
 #endif
-#endif  /* DEPRECATED */
+#endif /* DEPRECATED */
 
 #ifndef DECLSPEC_DEPRECATED
 #if defined(__GNUC__) && __GNUC__
 #define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
 #elif defined(_MSC_VER)
-#define DECLSPEC_DEPRECATED __declspec(deprecated) /**< \copydoc #DEPRECATED */
+#define DECLSPEC_DEPRECATED __declspec(deprecated)
+/**< \copydoc #DEPRECATED */
 #else
 #define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
 #endif
-#endif  /* DECLSPEC_DEPRECATED */
+#endif /* DECLSPEC_DEPRECATED */
 
-  /*!\brief Decorator indicating a function is potentially unused */
+/*!\brief Decorator indicating a function is potentially unused */
 #ifdef UNUSED
 #elif defined(__GNUC__) || defined(__clang__)
-#define UNUSED __attribute__ ((unused))
+#define UNUSED __attribute__((unused))
 #else
 #define UNUSED
 #endif
 
-  /*!\brief Current ABI version number
-   *
-   * \internal
-   * If this file is altered in any way that changes the ABI, this value
-   * must be bumped.  Examples include, but are not limited to, changing
-   * types, removing or reassigning enums, adding/removing/rearranging
-   * fields to structures
-   */
+/*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped.  Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
 #define VPX_CODEC_ABI_VERSION (3 + VPX_IMAGE_ABI_VERSION) /**<\hideinitializer*/
 
-  /*!\brief Algorithm return codes */
-  typedef enum {
-    /*!\brief Operation completed without error */
-    VPX_CODEC_OK,
-
-    /*!\brief Unspecified error */
-    VPX_CODEC_ERROR,
-
-    /*!\brief Memory operation failed */
-    VPX_CODEC_MEM_ERROR,
-
-    /*!\brief ABI version mismatch */
-    VPX_CODEC_ABI_MISMATCH,
-
-    /*!\brief Algorithm does not have required capability */
-    VPX_CODEC_INCAPABLE,
-
-    /*!\brief The given bitstream is not supported.
-     *
-     * The bitstream was unable to be parsed at the highest level. The decoder
-     * is unable to proceed. This error \ref SHOULD be treated as fatal to the
-     * stream. */
-    VPX_CODEC_UNSUP_BITSTREAM,
-
-    /*!\brief Encoded bitstream uses an unsupported feature
-     *
-     * The decoder does not implement a feature required by the encoder. This
-     * return code should only be used for features that prevent future
-     * pictures from being properly decoded. This error \ref MAY be treated as
-     * fatal to the stream or \ref MAY be treated as fatal to the current GOP.
-     */
-    VPX_CODEC_UNSUP_FEATURE,
-
-    /*!\brief The coded data for this stream is corrupt or incomplete
-     *
-     * There was a problem decoding the current frame.  This return code
-     * should only be used for failures that prevent future pictures from
-     * being properly decoded. This error \ref MAY be treated as fatal to the
-     * stream or \ref MAY be treated as fatal to the current GOP. If decoding
-     * is continued for the current GOP, artifacts may be present.
-     */
-    VPX_CODEC_CORRUPT_FRAME,
-
-    /*!\brief An application-supplied parameter is not valid.
-     *
-     */
-    VPX_CODEC_INVALID_PARAM,
-
-    /*!\brief An iterator reached the end of list.
-     *
-     */
-    VPX_CODEC_LIST_END
-
-  }
-  vpx_codec_err_t;
-
-
-  /*! \brief Codec capabilities bitfield
-   *
-   *  Each codec advertises the capabilities it supports as part of its
-   *  ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
-   *  or functionality, and are not required to be supported.
-   *
-   *  The available flags are specified by VPX_CODEC_CAP_* defines.
-   */
-  typedef long vpx_codec_caps_t;
-#define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */
-#define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */
+/*!\brief Algorithm return codes */
+typedef enum {
+  /*!\brief Operation completed without error */
+  VPX_CODEC_OK,
 
+  /*!\brief Unspecified error */
+  VPX_CODEC_ERROR,
 
-  /*! \brief Initialization-time Feature Enabling
-   *
-   *  Certain codec features must be known at initialization time, to allow for
-   *  proper memory allocation.
-   *
-   *  The available flags are specified by VPX_CODEC_USE_* defines.
-   */
-  typedef long vpx_codec_flags_t;
-
+  /*!\brief Memory operation failed */
+  VPX_CODEC_MEM_ERROR,
 
-  /*!\brief Codec interface structure.
-   *
-   * Contains function pointers and other data private to the codec
-   * implementation. This structure is opaque to the application.
-   */
-  typedef const struct vpx_codec_iface vpx_codec_iface_t;
+  /*!\brief ABI version mismatch */
+  VPX_CODEC_ABI_MISMATCH,
 
+  /*!\brief Algorithm does not have required capability */
+  VPX_CODEC_INCAPABLE,
 
-  /*!\brief Codec private data structure.
+  /*!\brief The given bitstream is not supported.
    *
-   * Contains data private to the codec implementation. This structure is opaque
-   * to the application.
-   */
-  typedef       struct vpx_codec_priv  vpx_codec_priv_t;
-
+   * The bitstream was unable to be parsed at the highest level. The decoder
+   * is unable to proceed. This error \ref SHOULD be treated as fatal to the
+   * stream. */
+  VPX_CODEC_UNSUP_BITSTREAM,
 
-  /*!\brief Iterator
+  /*!\brief Encoded bitstream uses an unsupported feature
    *
-   * Opaque storage used for iterating over lists.
+   * The decoder does not implement a feature required by the encoder. This
+   * return code should only be used for features that prevent future
+   * pictures from being properly decoded. This error \ref MAY be treated as
+   * fatal to the stream or \ref MAY be treated as fatal to the current GOP.
    */
-  typedef const void *vpx_codec_iter_t;
-
+  VPX_CODEC_UNSUP_FEATURE,
 
-  /*!\brief Codec context structure
+  /*!\brief The coded data for this stream is corrupt or incomplete
    *
-   * All codecs \ref MUST support this context structure fully. In general,
-   * this data should be considered private to the codec algorithm, and
-   * not be manipulated or examined by the calling application. Applications
-   * may reference the 'name' member to get a printable description of the
-   * algorithm.
+   * There was a problem decoding the current frame.  This return code
+   * should only be used for failures that prevent future pictures from
+   * being properly decoded. This error \ref MAY be treated as fatal to the
+   * stream or \ref MAY be treated as fatal to the current GOP. If decoding
+   * is continued for the current GOP, artifacts may be present.
    */
-  typedef struct vpx_codec_ctx {
-    const char              *name;        /**< Printable interface name */
-    vpx_codec_iface_t       *iface;       /**< Interface pointers */
-    vpx_codec_err_t          err;         /**< Last returned error */
-    const char              *err_detail;  /**< Detailed info, if available */
-    vpx_codec_flags_t        init_flags;  /**< Flags passed at init time */
-    union {
-      /**< Decoder Configuration Pointer */
-      const struct vpx_codec_dec_cfg *dec;
-      /**< Encoder Configuration Pointer */
-      const struct vpx_codec_enc_cfg *enc;
-      const void                     *raw;
-    }                        config;      /**< Configuration pointer aliasing union */
-    vpx_codec_priv_t        *priv;        /**< Algorithm private storage */
-  } vpx_codec_ctx_t;
-
-  /*!\brief Bit depth for codec
-   * *
-   * This enumeration determines the bit depth of the codec.
-   */
-  typedef enum vpx_bit_depth {
-    VPX_BITS_8  =  8,  /**<  8 bits */
-    VPX_BITS_10 = 10,  /**< 10 bits */
-    VPX_BITS_12 = 12,  /**< 12 bits */
-  } vpx_bit_depth_t;
-
-  /*
-   * Library Version Number Interface
+  VPX_CODEC_CORRUPT_FRAME,
+
+  /*!\brief An application-supplied parameter is not valid.
    *
-   * For example, see the following sample return values:
-   *     vpx_codec_version()           (1<<16 | 2<<8 | 3)
-   *     vpx_codec_version_str()       "v1.2.3-rc1-16-gec6a1ba"
-   *     vpx_codec_version_extra_str() "rc1-16-gec6a1ba"
    */
+  VPX_CODEC_INVALID_PARAM,
 
-  /*!\brief Return the version information (as an integer)
-   *
-   * Returns a packed encoding of the library version number. This will only include
-   * the major.minor.patch component of the version number. Note that this encoded
-   * value should be accessed through the macros provided, as the encoding may change
-   * in the future.
+  /*!\brief An iterator reached the end of list.
    *
    */
-  int vpx_codec_version(void);
-#define VPX_VERSION_MAJOR(v) ((v>>16)&0xff) /**< extract major from packed version */
-#define VPX_VERSION_MINOR(v) ((v>>8)&0xff)  /**< extract minor from packed version */
-#define VPX_VERSION_PATCH(v) ((v>>0)&0xff)  /**< extract patch from packed version */
-
-  /*!\brief Return the version major number */
-#define vpx_codec_version_major() ((vpx_codec_version()>>16)&0xff)
+  VPX_CODEC_LIST_END
 
-  /*!\brief Return the version minor number */
-#define vpx_codec_version_minor() ((vpx_codec_version()>>8)&0xff)
+} vpx_codec_err_t;
 
-  /*!\brief Return the version patch number */
-#define vpx_codec_version_patch() ((vpx_codec_version()>>0)&0xff)
+/*! \brief Codec capabilities bitfield
+ *
+ *  Each codec advertises the capabilities it supports as part of its
+ *  ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
+ *  or functionality, and are not required to be supported.
+ *
+ *  The available flags are specified by VPX_CODEC_CAP_* defines.
+ */
+typedef long vpx_codec_caps_t;
+#define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */
+#define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */
 
+/*! \brief Initialization-time Feature Enabling
+ *
+ *  Certain codec features must be known at initialization time, to allow for
+ *  proper memory allocation.
+ *
+ *  The available flags are specified by VPX_CODEC_USE_* defines.
+ */
+typedef long vpx_codec_flags_t;
 
-  /*!\brief Return the version information (as a string)
-   *
-   * Returns a printable string containing the full library version number. This may
-   * contain additional text following the three digit version number, as to indicate
-   * release candidates, prerelease versions, etc.
-   *
-   */
-  const char *vpx_codec_version_str(void);
+/*!\brief Codec interface structure.
+ *
+ * Contains function pointers and other data private to the codec
+ * implementation. This structure is opaque to the application.
+ */
+typedef const struct vpx_codec_iface vpx_codec_iface_t;
 
+/*!\brief Codec private data structure.
+ *
+ * Contains data private to the codec implementation. This structure is opaque
+ * to the application.
+ */
+typedef struct vpx_codec_priv vpx_codec_priv_t;
 
-  /*!\brief Return the version information (as a string)
-   *
-   * Returns a printable "extra string". This is the component of the string returned
-   * by vpx_codec_version_str() following the three digit version number.
-   *
-   */
-  const char *vpx_codec_version_extra_str(void);
+/*!\brief Iterator
+ *
+ * Opaque storage used for iterating over lists.
+ */
+typedef const void *vpx_codec_iter_t;
 
+/*!\brief Codec context structure
+ *
+ * All codecs \ref MUST support this context structure fully. In general,
+ * this data should be considered private to the codec algorithm, and
+ * not be manipulated or examined by the calling application. Applications
+ * may reference the 'name' member to get a printable description of the
+ * algorithm.
+ */
+typedef struct vpx_codec_ctx {
+  const char *name;             /**< Printable interface name */
+  vpx_codec_iface_t *iface;     /**< Interface pointers */
+  vpx_codec_err_t err;          /**< Last returned error */
+  const char *err_detail;       /**< Detailed info, if available */
+  vpx_codec_flags_t init_flags; /**< Flags passed at init time */
+  union {
+    /**< Decoder Configuration Pointer */
+    const struct vpx_codec_dec_cfg *dec;
+    /**< Encoder Configuration Pointer */
+    const struct vpx_codec_enc_cfg *enc;
+    const void *raw;
+  } config;               /**< Configuration pointer aliasing union */
+  vpx_codec_priv_t *priv; /**< Algorithm private storage */
+} vpx_codec_ctx_t;
+
+/*!\brief Bit depth for codec
+ * *
+ * This enumeration determines the bit depth of the codec.
+ */
+typedef enum vpx_bit_depth {
+  VPX_BITS_8 = 8,   /**<  8 bits */
+  VPX_BITS_10 = 10, /**< 10 bits */
+  VPX_BITS_12 = 12, /**< 12 bits */
+} vpx_bit_depth_t;
 
-  /*!\brief Return the build configuration
-   *
-   * Returns a printable string containing an encoded version of the build
-   * configuration. This may be useful to vpx support.
-   *
-   */
-  const char *vpx_codec_build_config(void);
+/*
+ * Library Version Number Interface
+ *
+ * For example, see the following sample return values:
+ *     vpx_codec_version()           (1<<16 | 2<<8 | 3)
+ *     vpx_codec_version_str()       "v1.2.3-rc1-16-gec6a1ba"
+ *     vpx_codec_version_extra_str() "rc1-16-gec6a1ba"
+ */
 
+/*!\brief Return the version information (as an integer)
+ *
+ * Returns a packed encoding of the library version number. This will only
+ * include
+ * the major.minor.patch component of the version number. Note that this encoded
+ * value should be accessed through the macros provided, as the encoding may
+ * change
+ * in the future.
+ *
+ */
+int vpx_codec_version(void);
+#define VPX_VERSION_MAJOR(v) \
+  ((v >> 16) & 0xff) /**< extract major from packed version */
+#define VPX_VERSION_MINOR(v) \
+  ((v >> 8) & 0xff) /**< extract minor from packed version */
+#define VPX_VERSION_PATCH(v) \
+  ((v >> 0) & 0xff) /**< extract patch from packed version */
 
-  /*!\brief Return the name for a given interface
-   *
-   * Returns a human readable string for name of the given codec interface.
-   *
-   * \param[in]    iface     Interface pointer
-   *
-   */
-  const char *vpx_codec_iface_name(vpx_codec_iface_t *iface);
+/*!\brief Return the version major number */
+#define vpx_codec_version_major() ((vpx_codec_version() >> 16) & 0xff)
 
+/*!\brief Return the version minor number */
+#define vpx_codec_version_minor() ((vpx_codec_version() >> 8) & 0xff)
 
-  /*!\brief Convert error number to printable string
-   *
-   * Returns a human readable string for the last error returned by the
-   * algorithm. The returned error will be one line and will not contain
-   * any newline characters.
-   *
-   *
-   * \param[in]    err     Error number.
-   *
-   */
-  const char *vpx_codec_err_to_string(vpx_codec_err_t  err);
+/*!\brief Return the version patch number */
+#define vpx_codec_version_patch() ((vpx_codec_version() >> 0) & 0xff)
 
+/*!\brief Return the version information (as a string)
+ *
+ * Returns a printable string containing the full library version number. This
+ * may
+ * contain additional text following the three digit version number, as to
+ * indicate
+ * release candidates, prerelease versions, etc.
+ *
+ */
+const char *vpx_codec_version_str(void);
 
-  /*!\brief Retrieve error synopsis for codec context
-   *
-   * Returns a human readable string for the last error returned by the
-   * algorithm. The returned error will be one line and will not contain
-   * any newline characters.
-   *
-   *
-   * \param[in]    ctx     Pointer to this instance's context.
-   *
-   */
-  const char *vpx_codec_error(vpx_codec_ctx_t  *ctx);
+/*!\brief Return the version information (as a string)
+ *
+ * Returns a printable "extra string". This is the component of the string
+ * returned
+ * by vpx_codec_version_str() following the three digit version number.
+ *
+ */
+const char *vpx_codec_version_extra_str(void);
 
+/*!\brief Return the build configuration
+ *
+ * Returns a printable string containing an encoded version of the build
+ * configuration. This may be useful to vpx support.
+ *
+ */
+const char *vpx_codec_build_config(void);
 
-  /*!\brief Retrieve detailed error information for codec context
-   *
-   * Returns a human readable string providing detailed information about
-   * the last error.
-   *
-   * \param[in]    ctx     Pointer to this instance's context.
-   *
-   * \retval NULL
-   *     No detailed information is available.
-   */
-  const char *vpx_codec_error_detail(vpx_codec_ctx_t  *ctx);
+/*!\brief Return the name for a given interface
+ *
+ * Returns a human readable string for name of the given codec interface.
+ *
+ * \param[in]    iface     Interface pointer
+ *
+ */
+const char *vpx_codec_iface_name(vpx_codec_iface_t *iface);
 
+/*!\brief Convert error number to printable string
+ *
+ * Returns a human readable string for the last error returned by the
+ * algorithm. The returned error will be one line and will not contain
+ * any newline characters.
+ *
+ *
+ * \param[in]    err     Error number.
+ *
+ */
+const char *vpx_codec_err_to_string(vpx_codec_err_t err);
 
-  /* REQUIRED FUNCTIONS
-   *
-   * The following functions are required to be implemented for all codecs.
-   * They represent the base case functionality expected of all codecs.
-   */
+/*!\brief Retrieve error synopsis for codec context
+ *
+ * Returns a human readable string for the last error returned by the
+ * algorithm. The returned error will be one line and will not contain
+ * any newline characters.
+ *
+ *
+ * \param[in]    ctx     Pointer to this instance's context.
+ *
+ */
+const char *vpx_codec_error(vpx_codec_ctx_t *ctx);
 
-  /*!\brief Destroy a codec instance
-   *
-   * Destroys a codec context, freeing any associated memory buffers.
-   *
-   * \param[in] ctx   Pointer to this instance's context
-   *
-   * \retval #VPX_CODEC_OK
-   *     The codec algorithm initialized.
-   * \retval #VPX_CODEC_MEM_ERROR
-   *     Memory allocation failed.
-   */
-  vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx);
+/*!\brief Retrieve detailed error information for codec context
+ *
+ * Returns a human readable string providing detailed information about
+ * the last error.
+ *
+ * \param[in]    ctx     Pointer to this instance's context.
+ *
+ * \retval NULL
+ *     No detailed information is available.
+ */
+const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx);
 
+/* REQUIRED FUNCTIONS
+ *
+ * The following functions are required to be implemented for all codecs.
+ * They represent the base case functionality expected of all codecs.
+ */
 
-  /*!\brief Get the capabilities of an algorithm.
-   *
-   * Retrieves the capabilities bitfield from the algorithm's interface.
-   *
-   * \param[in] iface   Pointer to the algorithm interface
-   *
-   */
-  vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface);
+/*!\brief Destroy a codec instance
+ *
+ * Destroys a codec context, freeing any associated memory buffers.
+ *
+ * \param[in] ctx   Pointer to this instance's context
+ *
+ * \retval #VPX_CODEC_OK
+ *     The codec algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ *     Memory allocation failed.
+ */
+vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx);
 
+/*!\brief Get the capabilities of an algorithm.
+ *
+ * Retrieves the capabilities bitfield from the algorithm's interface.
+ *
+ * \param[in] iface   Pointer to the algorithm interface
+ *
+ */
+vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface);
 
-  /*!\brief Control algorithm
-   *
-   * This function is used to exchange algorithm specific data with the codec
-   * instance. This can be used to implement features specific to a particular
-   * algorithm.
-   *
-   * This wrapper function dispatches the request to the helper function
-   * associated with the given ctrl_id. It tries to call this function
-   * transparently, but will return #VPX_CODEC_ERROR if the request could not
-   * be dispatched.
-   *
-   * Note that this function should not be used directly. Call the
-   * #vpx_codec_control wrapper macro instead.
-   *
-   * \param[in]     ctx              Pointer to this instance's context
-   * \param[in]     ctrl_id          Algorithm specific control identifier
-   *
-   * \retval #VPX_CODEC_OK
-   *     The control request was processed.
-   * \retval #VPX_CODEC_ERROR
-   *     The control request was not processed.
-   * \retval #VPX_CODEC_INVALID_PARAM
-   *     The data was not valid.
-   */
-  vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t  *ctx,
-                                     int               ctrl_id,
-                                     ...);
+/*!\brief Control algorithm
+ *
+ * This function is used to exchange algorithm specific data with the codec
+ * instance. This can be used to implement features specific to a particular
+ * algorithm.
+ *
+ * This wrapper function dispatches the request to the helper function
+ * associated with the given ctrl_id. It tries to call this function
+ * transparently, but will return #VPX_CODEC_ERROR if the request could not
+ * be dispatched.
+ *
+ * Note that this function should not be used directly. Call the
+ * #vpx_codec_control wrapper macro instead.
+ *
+ * \param[in]     ctx              Pointer to this instance's context
+ * \param[in]     ctrl_id          Algorithm specific control identifier
+ *
+ * \retval #VPX_CODEC_OK
+ *     The control request was processed.
+ * \retval #VPX_CODEC_ERROR
+ *     The control request was not processed.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ *     The data was not valid.
+ */
+vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...);
 #if defined(VPX_DISABLE_CTRL_TYPECHECKS) && VPX_DISABLE_CTRL_TYPECHECKS
-#    define vpx_codec_control(ctx,id,data) vpx_codec_control_(ctx,id,data)
-#    define VPX_CTRL_USE_TYPE(id, typ)
-#    define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ)
-#    define VPX_CTRL_VOID(id, typ)
+#define vpx_codec_control(ctx, id, data) vpx_codec_control_(ctx, id, data)
+#define VPX_CTRL_USE_TYPE(id, typ)
+#define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ)
+#define VPX_CTRL_VOID(id, typ)
 
 #else
-  /*!\brief vpx_codec_control wrapper macro
-   *
-   * This macro allows for type safe conversions across the variadic parameter
-   * to vpx_codec_control_().
-   *
-   * \internal
-   * It works by dispatching the call to the control function through a wrapper
-   * function named with the id parameter.
-   */
-#    define vpx_codec_control(ctx,id,data) vpx_codec_control_##id(ctx,id,data)\
-  /**<\hideinitializer*/
-
+/*!\brief vpx_codec_control wrapper macro
+ *
+ * This macro allows for type safe conversions across the variadic parameter
+ * to vpx_codec_control_().
+ *
+ * \internal
+ * It works by dispatching the call to the control function through a wrapper
+ * function named with the id parameter.
+ */
+#define vpx_codec_control(ctx, id, data) \
+  vpx_codec_control_##id(ctx, id, data) /**<\hideinitializer*/
 
-  /*!\brief vpx_codec_control type definition macro
-   *
-   * This macro allows for type safe conversions across the variadic parameter
-   * to vpx_codec_control_(). It defines the type of the argument for a given
-   * control identifier.
-   *
-   * \internal
-   * It defines a static function with
-   * the correctly typed arguments as a wrapper to the type-unsafe internal
-   * function.
-   */
-#    define VPX_CTRL_USE_TYPE(id, typ) \
-  static vpx_codec_err_t \
-  vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) UNUSED;\
-  \
-  static vpx_codec_err_t \
-  vpx_codec_control_##id(vpx_codec_ctx_t  *ctx, int ctrl_id, typ data) {\
-    return vpx_codec_control_(ctx, ctrl_id, data);\
+/*!\brief vpx_codec_control type definition macro
+ *
+ * This macro allows for type safe conversions across the variadic parameter
+ * to vpx_codec_control_(). It defines the type of the argument for a given
+ * control identifier.
+ *
+ * \internal
+ * It defines a static function with
+ * the correctly typed arguments as a wrapper to the type-unsafe internal
+ * function.
+ */
+#define VPX_CTRL_USE_TYPE(id, typ)                                           \
+  static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *, int, typ) \
+      UNUSED;                                                                \
+                                                                             \
+  static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *ctx,        \
+                                                int ctrl_id, typ data) {     \
+    return vpx_codec_control_(ctx, ctrl_id, data);                           \
   } /**<\hideinitializer*/
 
-
-  /*!\brief vpx_codec_control deprecated type definition macro
-   *
-   * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is
-   * deprecated and should not be used. Consult the documentation for your
-   * codec for more information.
-   *
-   * \internal
-   * It defines a static function with the correctly typed arguments as a
-   * wrapper to the type-unsafe internal function.
-   */
-#    define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \
-  DECLSPEC_DEPRECATED static vpx_codec_err_t \
-  vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) DEPRECATED UNUSED;\
-  \
-  DECLSPEC_DEPRECATED static vpx_codec_err_t \
-  vpx_codec_control_##id(vpx_codec_ctx_t  *ctx, int ctrl_id, typ data) {\
-    return vpx_codec_control_(ctx, ctrl_id, data);\
+/*!\brief vpx_codec_control deprecated type definition macro
+ *
+ * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is
+ * deprecated and should not be used. Consult the documentation for your
+ * codec for more information.
+ *
+ * \internal
+ * It defines a static function with the correctly typed arguments as a
+ * wrapper to the type-unsafe internal function.
+ */
+#define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ)                        \
+  DECLSPEC_DEPRECATED static vpx_codec_err_t vpx_codec_control_##id( \
+      vpx_codec_ctx_t *, int, typ) DEPRECATED UNUSED;                \
+                                                                     \
+  DECLSPEC_DEPRECATED static vpx_codec_err_t vpx_codec_control_##id( \
+      vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {                 \
+    return vpx_codec_control_(ctx, ctrl_id, data);                   \
   } /**<\hideinitializer*/
 
-
-  /*!\brief vpx_codec_control void type definition macro
-   *
-   * This macro allows for type safe conversions across the variadic parameter
-   * to vpx_codec_control_(). It indicates that a given control identifier takes
-   * no argument.
-   *
-   * \internal
-   * It defines a static function without a data argument as a wrapper to the
-   * type-unsafe internal function.
-   */
-#    define VPX_CTRL_VOID(id) \
-  static vpx_codec_err_t \
-  vpx_codec_control_##id(vpx_codec_ctx_t*, int) UNUSED;\
-  \
-  static vpx_codec_err_t \
-  vpx_codec_control_##id(vpx_codec_ctx_t  *ctx, int ctrl_id) {\
-    return vpx_codec_control_(ctx, ctrl_id);\
+/*!\brief vpx_codec_control void type definition macro
+ *
+ * This macro allows for type safe conversions across the variadic parameter
+ * to vpx_codec_control_(). It indicates that a given control identifier takes
+ * no argument.
+ *
+ * \internal
+ * It defines a static function without a data argument as a wrapper to the
+ * type-unsafe internal function.
+ */
+#define VPX_CTRL_VOID(id)                                               \
+  static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *, int) \
+      UNUSED;                                                           \
+                                                                        \
+  static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *ctx,   \
+                                                int ctrl_id) {          \
+    return vpx_codec_control_(ctx, ctrl_id);                            \
   } /**<\hideinitializer*/
 
-
 #endif
 
-  /*!@} - end defgroup codec*/
+/*!@} - end defgroup codec*/
 #ifdef __cplusplus
 }
 #endif
 #endif  // VPX_VPX_CODEC_H_
-
diff --git a/vpx/vpx_decoder.h b/vpx/vpx_decoder.h
index bfe90c6112e9c87fcf2bf0487cd8bb84a59f6a0b..2c94ac454d210090d9a97159887dfdc48ed59932 100644
--- a/vpx/vpx_decoder.h
+++ b/vpx/vpx_decoder.h
@@ -32,347 +32,333 @@ extern "C" {
 #include "./vpx_codec.h"
 #include "./vpx_frame_buffer.h"
 
-  /*!\brief Current ABI version number
-   *
-   * \internal
-   * If this file is altered in any way that changes the ABI, this value
-   * must be bumped.  Examples include, but are not limited to, changing
-   * types, removing or reassigning enums, adding/removing/rearranging
-   * fields to structures
-   */
-#define VPX_DECODER_ABI_VERSION (3 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
-
-  /*! \brief Decoder capabilities bitfield
-   *
-   *  Each decoder advertises the capabilities it supports as part of its
-   *  ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
-   *  or functionality, and are not required to be supported by a decoder.
-   *
-   *  The available flags are specified by VPX_CODEC_CAP_* defines.
-   */
-#define VPX_CODEC_CAP_PUT_SLICE  0x10000 /**< Will issue put_slice callbacks */
-#define VPX_CODEC_CAP_PUT_FRAME  0x20000 /**< Will issue put_frame callbacks */
-#define VPX_CODEC_CAP_POSTPROC   0x40000 /**< Can postprocess decoded frame */
-#define VPX_CODEC_CAP_ERROR_CONCEALMENT   0x80000 /**< Can conceal errors due to
-  packet loss */
-#define VPX_CODEC_CAP_INPUT_FRAGMENTS   0x100000 /**< Can receive encoded frames
-  one fragment at a time */
-
-  /*! \brief Initialization-time Feature Enabling
-   *
-   *  Certain codec features must be known at initialization time, to allow for
-   *  proper memory allocation.
-   *
-   *  The available flags are specified by VPX_CODEC_USE_* defines.
-   */
-#define VPX_CODEC_CAP_FRAME_THREADING   0x200000 /**< Can support frame-based
-                                                      multi-threading */
-#define VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER 0x400000 /**< Can support external
-                                                          frame buffers */
-
-#define VPX_CODEC_USE_POSTPROC   0x10000 /**< Postprocess decoded frame */
-#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000 /**< Conceal errors in decoded
-  frames */
-#define VPX_CODEC_USE_INPUT_FRAGMENTS   0x40000 /**< The input frame should be
-  passed to the decoder one
-  fragment at a time */
-#define VPX_CODEC_USE_FRAME_THREADING   0x80000 /**< Enable frame-based
-                                                     multi-threading */
-
-  /*!\brief Stream properties
-   *
-   * This structure is used to query or set properties of the decoded
-   * stream. Algorithms may extend this structure with data specific
-   * to their bitstream by setting the sz member appropriately.
-   */
-  typedef struct vpx_codec_stream_info {
-    unsigned int sz;     /**< Size of this structure */
-    unsigned int w;      /**< Width (or 0 for unknown/default) */
-    unsigned int h;      /**< Height (or 0 for unknown/default) */
-    unsigned int is_kf;  /**< Current frame is a keyframe */
-  } vpx_codec_stream_info_t;
-
-  /* REQUIRED FUNCTIONS
-   *
-   * The following functions are required to be implemented for all decoders.
-   * They represent the base case functionality expected of all decoders.
-   */
-
+/*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped.  Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
+#define VPX_DECODER_ABI_VERSION \
+  (3 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
 
-  /*!\brief Initialization Configurations
-   *
-   * This structure is used to pass init time configuration options to the
-   * decoder.
-   */
-  typedef struct vpx_codec_dec_cfg {
-    unsigned int threads; /**< Maximum number of threads to use, default 1 */
-    unsigned int w;      /**< Width */
-    unsigned int h;      /**< Height */
-  } vpx_codec_dec_cfg_t; /**< alias for struct vpx_codec_dec_cfg */
+/*! \brief Decoder capabilities bitfield
+ *
+ *  Each decoder advertises the capabilities it supports as part of its
+ *  ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
+ *  or functionality, and are not required to be supported by a decoder.
+ *
+ *  The available flags are specified by VPX_CODEC_CAP_* defines.
+ */
+#define VPX_CODEC_CAP_PUT_SLICE 0x10000 /**< Will issue put_slice callbacks */
+#define VPX_CODEC_CAP_PUT_FRAME 0x20000 /**< Will issue put_frame callbacks */
+#define VPX_CODEC_CAP_POSTPROC 0x40000  /**< Can postprocess decoded frame */
+#define VPX_CODEC_CAP_ERROR_CONCEALMENT 0x80000
+/**< Can conceal errors due to packet loss */
+#define VPX_CODEC_CAP_INPUT_FRAGMENTS 0x100000
+/**< Can receive encoded frames one fragment at a time */
+
+/*! \brief Initialization-time Feature Enabling
+ *
+ *  Certain codec features must be known at initialization time, to allow for
+ *  proper memory allocation.
+ *
+ *  The available flags are specified by VPX_CODEC_USE_* defines.
+ */
+#define VPX_CODEC_CAP_FRAME_THREADING 0x200000
+/**< Can support frame-based multi-threading */
+#define VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER 0x400000
+/**< Can support external frame buffers */
+
+#define VPX_CODEC_USE_POSTPROC 0x10000 /**< Postprocess decoded frame */
+#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000
+/**< Conceal errors in decoded frames */
+#define VPX_CODEC_USE_INPUT_FRAGMENTS 0x40000
+/**< The input frame should be passed to the decoder one fragment at a time */
+#define VPX_CODEC_USE_FRAME_THREADING 0x80000
+/**< Enable frame-based multi-threading */
+
+/*!\brief Stream properties
+ *
+ * This structure is used to query or set properties of the decoded
+ * stream. Algorithms may extend this structure with data specific
+ * to their bitstream by setting the sz member appropriately.
+ */
+typedef struct vpx_codec_stream_info {
+  unsigned int sz;    /**< Size of this structure */
+  unsigned int w;     /**< Width (or 0 for unknown/default) */
+  unsigned int h;     /**< Height (or 0 for unknown/default) */
+  unsigned int is_kf; /**< Current frame is a keyframe */
+} vpx_codec_stream_info_t;
+
+/* REQUIRED FUNCTIONS
+ *
+ * The following functions are required to be implemented for all decoders.
+ * They represent the base case functionality expected of all decoders.
+ */
 
+/*!\brief Initialization Configurations
+ *
+ * This structure is used to pass init time configuration options to the
+ * decoder.
+ */
+typedef struct vpx_codec_dec_cfg {
+  unsigned int threads; /**< Maximum number of threads to use, default 1 */
+  unsigned int w;       /**< Width */
+  unsigned int h;       /**< Height */
+} vpx_codec_dec_cfg_t;  /**< alias for struct vpx_codec_dec_cfg */
 
-  /*!\brief Initialize a decoder instance
-   *
-   * Initializes a decoder context using the given interface. Applications
-   * should call the vpx_codec_dec_init convenience macro instead of this
-   * function directly, to ensure that the ABI version number parameter
-   * is properly initialized.
-   *
-   * If the library was configured with --disable-multithread, this call
-   * is not thread safe and should be guarded with a lock if being used
-   * in a multithreaded context.
-   *
-   * \param[in]    ctx     Pointer to this instance's context.
-   * \param[in]    iface   Pointer to the algorithm interface to use.
-   * \param[in]    cfg     Configuration to use, if known. May be NULL.
-   * \param[in]    flags   Bitfield of VPX_CODEC_USE_* flags
-   * \param[in]    ver     ABI version number. Must be set to
-   *                       VPX_DECODER_ABI_VERSION
-   * \retval #VPX_CODEC_OK
-   *     The decoder algorithm initialized.
-   * \retval #VPX_CODEC_MEM_ERROR
-   *     Memory allocation failed.
-   */
-  vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t      *ctx,
-                                         vpx_codec_iface_t    *iface,
-                                         const vpx_codec_dec_cfg_t *cfg,
-                                         vpx_codec_flags_t     flags,
-                                         int                   ver);
+/*!\brief Initialize a decoder instance
+ *
+ * Initializes a decoder context using the given interface. Applications
+ * should call the vpx_codec_dec_init convenience macro instead of this
+ * function directly, to ensure that the ABI version number parameter
+ * is properly initialized.
+ *
+ * If the library was configured with --disable-multithread, this call
+ * is not thread safe and should be guarded with a lock if being used
+ * in a multithreaded context.
+ *
+ * \param[in]    ctx     Pointer to this instance's context.
+ * \param[in]    iface   Pointer to the algorithm interface to use.
+ * \param[in]    cfg     Configuration to use, if known. May be NULL.
+ * \param[in]    flags   Bitfield of VPX_CODEC_USE_* flags
+ * \param[in]    ver     ABI version number. Must be set to
+ *                       VPX_DECODER_ABI_VERSION
+ * \retval #VPX_CODEC_OK
+ *     The decoder algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ *     Memory allocation failed.
+ */
+vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
+                                       vpx_codec_iface_t *iface,
+                                       const vpx_codec_dec_cfg_t *cfg,
+                                       vpx_codec_flags_t flags, int ver);
 
-  /*!\brief Convenience macro for vpx_codec_dec_init_ver()
-   *
-   * Ensures the ABI version parameter is properly set.
-   */
+/*!\brief Convenience macro for vpx_codec_dec_init_ver()
+ *
+ * Ensures the ABI version parameter is properly set.
+ */
 #define vpx_codec_dec_init(ctx, iface, cfg, flags) \
   vpx_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION)
 
+/*!\brief Parse stream info from a buffer
+ *
+ * Performs high level parsing of the bitstream. Construction of a decoder
+ * context is not necessary. Can be used to determine if the bitstream is
+ * of the proper format, and to extract information from the stream.
+ *
+ * \param[in]      iface   Pointer to the algorithm interface
+ * \param[in]      data    Pointer to a block of data to parse
+ * \param[in]      data_sz Size of the data buffer
+ * \param[in,out]  si      Pointer to stream info to update. The size member
+ *                         \ref MUST be properly initialized, but \ref MAY be
+ *                         clobbered by the algorithm. This parameter \ref MAY
+ *                         be NULL.
+ *
+ * \retval #VPX_CODEC_OK
+ *     Bitstream is parsable and stream information updated
+ */
+vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
+                                           const uint8_t *data,
+                                           unsigned int data_sz,
+                                           vpx_codec_stream_info_t *si);
 
-  /*!\brief Parse stream info from a buffer
-   *
-   * Performs high level parsing of the bitstream. Construction of a decoder
-   * context is not necessary. Can be used to determine if the bitstream is
-   * of the proper format, and to extract information from the stream.
-   *
-   * \param[in]      iface   Pointer to the algorithm interface
-   * \param[in]      data    Pointer to a block of data to parse
-   * \param[in]      data_sz Size of the data buffer
-   * \param[in,out]  si      Pointer to stream info to update. The size member
-   *                         \ref MUST be properly initialized, but \ref MAY be
-   *                         clobbered by the algorithm. This parameter \ref MAY
-   *                         be NULL.
-   *
-   * \retval #VPX_CODEC_OK
-   *     Bitstream is parsable and stream information updated
-   */
-  vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t       *iface,
-                                             const uint8_t           *data,
-                                             unsigned int             data_sz,
-                                             vpx_codec_stream_info_t *si);
-
-
-  /*!\brief Return information about the current stream.
-   *
-   * Returns information about the stream that has been parsed during decoding.
-   *
-   * \param[in]      ctx     Pointer to this instance's context
-   * \param[in,out]  si      Pointer to stream info to update. The size member
-   *                         \ref MUST be properly initialized, but \ref MAY be
-   *                         clobbered by the algorithm. This parameter \ref MAY
-   *                         be NULL.
-   *
-   * \retval #VPX_CODEC_OK
-   *     Bitstream is parsable and stream information updated
-   */
-  vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t         *ctx,
-                                            vpx_codec_stream_info_t *si);
-
-
-  /*!\brief Decode data
-   *
-   * Processes a buffer of coded data. If the processing results in a new
-   * decoded frame becoming available, PUT_SLICE and PUT_FRAME events may be
-   * generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode
-   * time stamp) order. Frames produced will always be in PTS (presentation
-   * time stamp) order.
-   * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled,
-   * data and data_sz can contain a fragment of the encoded frame. Fragment
-   * \#n must contain at least partition \#n, but can also contain subsequent
-   * partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must
-   * be empty. When no more data is available, this function should be called
-   * with NULL as data and 0 as data_sz. The memory passed to this function
-   * must be available until the frame has been decoded.
-   *
-   * \param[in] ctx          Pointer to this instance's context
-   * \param[in] data         Pointer to this block of new coded data. If
-   *                         NULL, a VPX_CODEC_CB_PUT_FRAME event is posted
-   *                         for the previously decoded frame.
-   * \param[in] data_sz      Size of the coded data, in bytes.
-   * \param[in] user_priv    Application specific data to associate with
-   *                         this frame.
-   * \param[in] deadline     Soft deadline the decoder should attempt to meet,
-   *                         in us. Set to zero for unlimited.
-   *
-   * \return Returns #VPX_CODEC_OK if the coded data was processed completely
-   *         and future pictures can be decoded without error. Otherwise,
-   *         see the descriptions of the other error codes in ::vpx_codec_err_t
-   *         for recoverability capabilities.
-   */
-  vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t    *ctx,
-                                   const uint8_t        *data,
-                                   unsigned int            data_sz,
-                                   void               *user_priv,
-                                   long                deadline);
-
-
-  /*!\brief Decoded frames iterator
-   *
-   * Iterates over a list of the frames available for display. The iterator
-   * storage should be initialized to NULL to start the iteration. Iteration is
-   * complete when this function returns NULL.
-   *
-   * The list of available frames becomes valid upon completion of the
-   * vpx_codec_decode call, and remains valid until the next call to vpx_codec_decode.
-   *
-   * \param[in]     ctx      Pointer to this instance's context
-   * \param[in,out] iter     Iterator storage, initialized to NULL
-   *
-   * \return Returns a pointer to an image, if one is ready for display. Frames
-   *         produced will always be in PTS (presentation time stamp) order.
-   */
-  vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t  *ctx,
-                                   vpx_codec_iter_t *iter);
-
-
-  /*!\defgroup cap_put_frame Frame-Based Decoding Functions
-   *
-   * The following functions are required to be implemented for all decoders
-   * that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these functions
-   * for codecs that don't advertise this capability will result in an error
-   * code being returned, usually VPX_CODEC_ERROR
-   * @{
-   */
-
-  /*!\brief put frame callback prototype
-   *
-   * This callback is invoked by the decoder to notify the application of
-   * the availability of decoded image data.
-   */
-  typedef void (*vpx_codec_put_frame_cb_fn_t)(void        *user_priv,
-                                              const vpx_image_t *img);
+/*!\brief Return information about the current stream.
+ *
+ * Returns information about the stream that has been parsed during decoding.
+ *
+ * \param[in]      ctx     Pointer to this instance's context
+ * \param[in,out]  si      Pointer to stream info to update. The size member
+ *                         \ref MUST be properly initialized, but \ref MAY be
+ *                         clobbered by the algorithm. This parameter \ref MAY
+ *                         be NULL.
+ *
+ * \retval #VPX_CODEC_OK
+ *     Bitstream is parsable and stream information updated
+ */
+vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
+                                          vpx_codec_stream_info_t *si);
 
+/*!\brief Decode data
+ *
+ * Processes a buffer of coded data. If the processing results in a new
+ * decoded frame becoming available, PUT_SLICE and PUT_FRAME events may be
+ * generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode
+ * time stamp) order. Frames produced will always be in PTS (presentation
+ * time stamp) order.
+ * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled,
+ * data and data_sz can contain a fragment of the encoded frame. Fragment
+ * \#n must contain at least partition \#n, but can also contain subsequent
+ * partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must
+ * be empty. When no more data is available, this function should be called
+ * with NULL as data and 0 as data_sz. The memory passed to this function
+ * must be available until the frame has been decoded.
+ *
+ * \param[in] ctx          Pointer to this instance's context
+ * \param[in] data         Pointer to this block of new coded data. If
+ *                         NULL, a VPX_CODEC_CB_PUT_FRAME event is posted
+ *                         for the previously decoded frame.
+ * \param[in] data_sz      Size of the coded data, in bytes.
+ * \param[in] user_priv    Application specific data to associate with
+ *                         this frame.
+ * \param[in] deadline     Soft deadline the decoder should attempt to meet,
+ *                         in us. Set to zero for unlimited.
+ *
+ * \return Returns #VPX_CODEC_OK if the coded data was processed completely
+ *         and future pictures can be decoded without error. Otherwise,
+ *         see the descriptions of the other error codes in ::vpx_codec_err_t
+ *         for recoverability capabilities.
+ */
+vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data,
+                                 unsigned int data_sz, void *user_priv,
+                                 long deadline);
 
-  /*!\brief Register for notification of frame completion.
-   *
-   * Registers a given function to be called when a decoded frame is
-   * available.
-   *
-   * \param[in] ctx          Pointer to this instance's context
-   * \param[in] cb           Pointer to the callback function
-   * \param[in] user_priv    User's private data
-   *
-   * \retval #VPX_CODEC_OK
-   *     Callback successfully registered.
-   * \retval #VPX_CODEC_ERROR
-   *     Decoder context not initialized, or algorithm not capable of
-   *     posting slice completion.
-   */
-  vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t             *ctx,
-                                                  vpx_codec_put_frame_cb_fn_t  cb,
-                                                  void                        *user_priv);
+/*!\brief Decoded frames iterator
+ *
+ * Iterates over a list of the frames available for display. The iterator
+ * storage should be initialized to NULL to start the iteration. Iteration is
+ * complete when this function returns NULL.
+ *
+ * The list of available frames becomes valid upon completion of the
+ * vpx_codec_decode call, and remains valid until the next call to
+ * vpx_codec_decode.
+ *
+ * \param[in]     ctx      Pointer to this instance's context
+ * \param[in,out] iter     Iterator storage, initialized to NULL
+ *
+ * \return Returns a pointer to an image, if one is ready for display. Frames
+ *         produced will always be in PTS (presentation time stamp) order.
+ */
+vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter);
 
+/*!\defgroup cap_put_frame Frame-Based Decoding Functions
+ *
+ * The following functions are required to be implemented for all decoders
+ * that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these
+ * functions
+ * for codecs that don't advertise this capability will result in an error
+ * code being returned, usually VPX_CODEC_ERROR
+ * @{
+ */
 
-  /*!@} - end defgroup cap_put_frame */
+/*!\brief put frame callback prototype
+ *
+ * This callback is invoked by the decoder to notify the application of
+ * the availability of decoded image data.
+ */
+typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv,
+                                            const vpx_image_t *img);
 
-  /*!\defgroup cap_put_slice Slice-Based Decoding Functions
-   *
-   * The following functions are required to be implemented for all decoders
-   * that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these functions
-   * for codecs that don't advertise this capability will result in an error
-   * code being returned, usually VPX_CODEC_ERROR
-   * @{
-   */
+/*!\brief Register for notification of frame completion.
+ *
+ * Registers a given function to be called when a decoded frame is
+ * available.
+ *
+ * \param[in] ctx          Pointer to this instance's context
+ * \param[in] cb           Pointer to the callback function
+ * \param[in] user_priv    User's private data
+ *
+ * \retval #VPX_CODEC_OK
+ *     Callback successfully registered.
+ * \retval #VPX_CODEC_ERROR
+ *     Decoder context not initialized, or algorithm not capable of
+ *     posting slice completion.
+ */
+vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
+                                                vpx_codec_put_frame_cb_fn_t cb,
+                                                void *user_priv);
 
-  /*!\brief put slice callback prototype
-   *
-   * This callback is invoked by the decoder to notify the application of
-   * the availability of partially decoded image data. The
-   */
-  typedef void (*vpx_codec_put_slice_cb_fn_t)(void         *user_priv,
-                                              const vpx_image_t      *img,
-                                              const vpx_image_rect_t *valid,
-                                              const vpx_image_rect_t *update);
+/*!@} - end defgroup cap_put_frame */
 
+/*!\defgroup cap_put_slice Slice-Based Decoding Functions
+ *
+ * The following functions are required to be implemented for all decoders
+ * that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these
+ * functions
+ * for codecs that don't advertise this capability will result in an error
+ * code being returned, usually VPX_CODEC_ERROR
+ * @{
+ */
 
-  /*!\brief Register for notification of slice completion.
-   *
-   * Registers a given function to be called when a decoded slice is
-   * available.
-   *
-   * \param[in] ctx          Pointer to this instance's context
-   * \param[in] cb           Pointer to the callback function
-   * \param[in] user_priv    User's private data
-   *
-   * \retval #VPX_CODEC_OK
-   *     Callback successfully registered.
-   * \retval #VPX_CODEC_ERROR
-   *     Decoder context not initialized, or algorithm not capable of
-   *     posting slice completion.
-   */
-  vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t             *ctx,
-                                                  vpx_codec_put_slice_cb_fn_t  cb,
-                                                  void                        *user_priv);
+/*!\brief put slice callback prototype
+ *
+ * This callback is invoked by the decoder to notify the application of
+ * the availability of partially decoded image data. The
+ */
+typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv,
+                                            const vpx_image_t *img,
+                                            const vpx_image_rect_t *valid,
+                                            const vpx_image_rect_t *update);
 
+/*!\brief Register for notification of slice completion.
+ *
+ * Registers a given function to be called when a decoded slice is
+ * available.
+ *
+ * \param[in] ctx          Pointer to this instance's context
+ * \param[in] cb           Pointer to the callback function
+ * \param[in] user_priv    User's private data
+ *
+ * \retval #VPX_CODEC_OK
+ *     Callback successfully registered.
+ * \retval #VPX_CODEC_ERROR
+ *     Decoder context not initialized, or algorithm not capable of
+ *     posting slice completion.
+ */
+vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
+                                                vpx_codec_put_slice_cb_fn_t cb,
+                                                void *user_priv);
 
-  /*!@} - end defgroup cap_put_slice*/
+/*!@} - end defgroup cap_put_slice*/
 
-  /*!\defgroup cap_external_frame_buffer External Frame Buffer Functions
-   *
-   * The following section is required to be implemented for all decoders
-   * that advertise the VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER capability.
-   * Calling this function for codecs that don't advertise this capability
-   * will result in an error code being returned, usually VPX_CODEC_ERROR.
-   *
-   * \note
-   * Currently this only works with VP9.
-   * @{
-   */
+/*!\defgroup cap_external_frame_buffer External Frame Buffer Functions
+ *
+ * The following section is required to be implemented for all decoders
+ * that advertise the VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER capability.
+ * Calling this function for codecs that don't advertise this capability
+ * will result in an error code being returned, usually VPX_CODEC_ERROR.
+ *
+ * \note
+ * Currently this only works with VP9.
+ * @{
+ */
 
-  /*!\brief Pass in external frame buffers for the decoder to use.
-   *
-   * Registers functions to be called when libvpx needs a frame buffer
-   * to decode the current frame and a function to be called when libvpx does
-   * not internally reference the frame buffer. This set function must
-   * be called before the first call to decode or libvpx will assume the
-   * default behavior of allocating frame buffers internally.
-   *
-   * \param[in] ctx          Pointer to this instance's context
-   * \param[in] cb_get       Pointer to the get callback function
-   * \param[in] cb_release   Pointer to the release callback function
-   * \param[in] cb_priv      Callback's private data
-   *
-   * \retval #VPX_CODEC_OK
-   *     External frame buffers will be used by libvpx.
-   * \retval #VPX_CODEC_INVALID_PARAM
-   *     One or more of the callbacks were NULL.
-   * \retval #VPX_CODEC_ERROR
-   *     Decoder context not initialized, or algorithm not capable of
-   *     using external frame buffers.
-   *
-   * \note
-   * When decoding VP9, the application may be required to pass in at least
-   * #VPX_MAXIMUM_WORK_BUFFERS external frame
-   * buffers.
-   */
-  vpx_codec_err_t vpx_codec_set_frame_buffer_functions(
-      vpx_codec_ctx_t *ctx,
-      vpx_get_frame_buffer_cb_fn_t cb_get,
-      vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv);
+/*!\brief Pass in external frame buffers for the decoder to use.
+ *
+ * Registers functions to be called when libvpx needs a frame buffer
+ * to decode the current frame and a function to be called when libvpx does
+ * not internally reference the frame buffer. This set function must
+ * be called before the first call to decode or libvpx will assume the
+ * default behavior of allocating frame buffers internally.
+ *
+ * \param[in] ctx          Pointer to this instance's context
+ * \param[in] cb_get       Pointer to the get callback function
+ * \param[in] cb_release   Pointer to the release callback function
+ * \param[in] cb_priv      Callback's private data
+ *
+ * \retval #VPX_CODEC_OK
+ *     External frame buffers will be used by libvpx.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ *     One or more of the callbacks were NULL.
+ * \retval #VPX_CODEC_ERROR
+ *     Decoder context not initialized, or algorithm not capable of
+ *     using external frame buffers.
+ *
+ * \note
+ * When decoding VP9, the application may be required to pass in at least
+ * #VPX_MAXIMUM_WORK_BUFFERS external frame
+ * buffers.
+ */
+vpx_codec_err_t vpx_codec_set_frame_buffer_functions(
+    vpx_codec_ctx_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
+    vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv);
 
-  /*!@} - end defgroup cap_external_frame_buffer */
+/*!@} - end defgroup cap_external_frame_buffer */
 
-  /*!@} - end defgroup decoder*/
+/*!@} - end defgroup decoder*/
 #ifdef __cplusplus
 }
 #endif
 #endif  // VPX_VPX_DECODER_H_
-
diff --git a/vpx/vpx_encoder.h b/vpx/vpx_encoder.h
index 955e8735193b67c1bb21678ba345ea62864cd865..1de6d7a250873cb327e975cf8a2c397ce9ec8917 100644
--- a/vpx/vpx_encoder.h
+++ b/vpx/vpx_encoder.h
@@ -31,1013 +31,952 @@ extern "C" {
 
 #include "./vpx_codec.h"
 
-  /*! Temporal Scalability: Maximum length of the sequence defining frame
-   * layer membership
-   */
+/*! Temporal Scalability: Maximum length of the sequence defining frame
+ * layer membership
+ */
 #define VPX_TS_MAX_PERIODICITY 16
 
-  /*! Temporal Scalability: Maximum number of coding layers */
-#define VPX_TS_MAX_LAYERS       5
+/*! Temporal Scalability: Maximum number of coding layers */
+#define VPX_TS_MAX_LAYERS 5
 
-  /*!\deprecated Use #VPX_TS_MAX_PERIODICITY instead. */
+/*!\deprecated Use #VPX_TS_MAX_PERIODICITY instead. */
 #define MAX_PERIODICITY VPX_TS_MAX_PERIODICITY
 
 /*! Temporal+Spatial Scalability: Maximum number of coding layers */
-#define VPX_MAX_LAYERS  12  // 3 temporal + 4 spatial layers are allowed.
+#define VPX_MAX_LAYERS 12  // 3 temporal + 4 spatial layers are allowed.
 
 /*!\deprecated Use #VPX_MAX_LAYERS instead. */
-#define MAX_LAYERS    VPX_MAX_LAYERS  // 3 temporal + 4 spatial layers allowed.
+#define MAX_LAYERS VPX_MAX_LAYERS  // 3 temporal + 4 spatial layers allowed.
 
 /*! Spatial Scalability: Maximum number of coding layers */
-#define VPX_SS_MAX_LAYERS       5
+#define VPX_SS_MAX_LAYERS 5
 
 /*! Spatial Scalability: Default number of coding layers */
-#define VPX_SS_DEFAULT_LAYERS       1
-
-  /*!\brief Current ABI version number
-   *
-   * \internal
-   * If this file is altered in any way that changes the ABI, this value
-   * must be bumped.  Examples include, but are not limited to, changing
-   * types, removing or reassigning enums, adding/removing/rearranging
-   * fields to structures
-   */
-#define VPX_ENCODER_ABI_VERSION (5 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
+#define VPX_SS_DEFAULT_LAYERS 1
 
+/*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped.  Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
+#define VPX_ENCODER_ABI_VERSION \
+  (5 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
 
-  /*! \brief Encoder capabilities bitfield
-   *
-   *  Each encoder advertises the capabilities it supports as part of its
-   *  ::vpx_codec_iface_t interface structure. Capabilities are extra
-   *  interfaces or functionality, and are not required to be supported
-   *  by an encoder.
-   *
-   *  The available flags are specified by VPX_CODEC_CAP_* defines.
-   */
-#define VPX_CODEC_CAP_PSNR  0x10000 /**< Can issue PSNR packets */
+/*! \brief Encoder capabilities bitfield
+ *
+ *  Each encoder advertises the capabilities it supports as part of its
+ *  ::vpx_codec_iface_t interface structure. Capabilities are extra
+ *  interfaces or functionality, and are not required to be supported
+ *  by an encoder.
+ *
+ *  The available flags are specified by VPX_CODEC_CAP_* defines.
+ */
+#define VPX_CODEC_CAP_PSNR 0x10000 /**< Can issue PSNR packets */
 
-  /*! Can output one partition at a time. Each partition is returned in its
-   *  own VPX_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for
-   *  every partition but the last. In this mode all frames are always
-   *  returned partition by partition.
-   */
-#define VPX_CODEC_CAP_OUTPUT_PARTITION  0x20000
+/*! Can output one partition at a time. Each partition is returned in its
+ *  own VPX_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for
+ *  every partition but the last. In this mode all frames are always
+ *  returned partition by partition.
+ */
+#define VPX_CODEC_CAP_OUTPUT_PARTITION 0x20000
 
 /*! Can support input images at greater than 8 bitdepth.
  */
-#define VPX_CODEC_CAP_HIGHBITDEPTH  0x40000
+#define VPX_CODEC_CAP_HIGHBITDEPTH 0x40000
 
-  /*! \brief Initialization-time Feature Enabling
-   *
-   *  Certain codec features must be known at initialization time, to allow
-   *  for proper memory allocation.
-   *
-   *  The available flags are specified by VPX_CODEC_USE_* defines.
-   */
-#define VPX_CODEC_USE_PSNR  0x10000 /**< Calculate PSNR on each frame */
-#define VPX_CODEC_USE_OUTPUT_PARTITION  0x20000 /**< Make the encoder output one
-  partition at a time. */
+/*! \brief Initialization-time Feature Enabling
+ *
+ *  Certain codec features must be known at initialization time, to allow
+ *  for proper memory allocation.
+ *
+ *  The available flags are specified by VPX_CODEC_USE_* defines.
+ */
+#define VPX_CODEC_USE_PSNR 0x10000 /**< Calculate PSNR on each frame */
+#define VPX_CODEC_USE_OUTPUT_PARTITION 0x20000
+/**< Make the encoder output one partition at a time. */
 #define VPX_CODEC_USE_HIGHBITDEPTH 0x40000 /**< Use high bitdepth */
 
+/*!\brief Generic fixed size buffer structure
+ *
+ * This structure is able to hold a reference to any fixed size buffer.
+ */
+typedef struct vpx_fixed_buf {
+  void *buf;       /**< Pointer to the data */
+  size_t sz;       /**< Length of the buffer, in chars */
+} vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */
 
-  /*!\brief Generic fixed size buffer structure
-   *
-   * This structure is able to hold a reference to any fixed size buffer.
-   */
-  typedef struct vpx_fixed_buf {
-    void          *buf; /**< Pointer to the data */
-    size_t         sz;  /**< Length of the buffer, in chars */
-  } vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */
+/*!\brief Time Stamp Type
+ *
+ * An integer, which when multiplied by the stream's time base, provides
+ * the absolute time of a sample.
+ */
+typedef int64_t vpx_codec_pts_t;
 
+/*!\brief Compressed Frame Flags
+ *
+ * This type represents a bitfield containing information about a compressed
+ * frame that may be useful to an application. The most significant 16 bits
+ * can be used by an algorithm to provide additional detail, for example to
+ * support frame types that are codec specific (MPEG-1 D-frames for example)
+ */
+typedef uint32_t vpx_codec_frame_flags_t;
+#define VPX_FRAME_IS_KEY 0x1 /**< frame is the start of a GOP */
+#define VPX_FRAME_IS_DROPPABLE 0x2
+/**< frame can be dropped without affecting the stream (no future frame depends
+ * on this one) */
+#define VPX_FRAME_IS_INVISIBLE 0x4
+/**< frame should be decoded but will not be shown */
+#define VPX_FRAME_IS_FRAGMENT 0x8
+/**< this is a fragment of the encoded frame */
+
+/*!\brief Error Resilient flags
+ *
+ * These flags define which error resilient features to enable in the
+ * encoder. The flags are specified through the
+ * vpx_codec_enc_cfg::g_error_resilient variable.
+ */
+typedef uint32_t vpx_codec_er_flags_t;
+#define VPX_ERROR_RESILIENT_DEFAULT 0x1
+/**< Improve resiliency against losses of whole frames */
+#define VPX_ERROR_RESILIENT_PARTITIONS 0x2
+/**< The frame partitions are independently decodable by the bool decoder,
+ * meaning that partitions can be decoded even though earlier partitions have
+ * been lost. Note that intra prediction is still done over the partition
+ * boundary. */
+
+/*!\brief Encoder output packet variants
+ *
+ * This enumeration lists the different kinds of data packets that can be
+ * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY
+ * extend this list to provide additional functionality.
+ */
+enum vpx_codec_cx_pkt_kind {
+  VPX_CODEC_CX_FRAME_PKT,   /**< Compressed video frame */
+  VPX_CODEC_STATS_PKT,      /**< Two-pass statistics for this frame */
+  VPX_CODEC_FPMB_STATS_PKT, /**< first pass mb statistics for this frame */
+  VPX_CODEC_PSNR_PKT,       /**< PSNR statistics for this frame */
+// Spatial SVC is still experimental and may be removed before the next ABI
+// bump.
+#if VPX_ENCODER_ABI_VERSION > (5 + VPX_CODEC_ABI_VERSION)
+  VPX_CODEC_SPATIAL_SVC_LAYER_SIZES, /**< Sizes for each layer in this frame*/
+  VPX_CODEC_SPATIAL_SVC_LAYER_PSNR,  /**< PSNR for each layer in this frame*/
+#endif
+  VPX_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions  */
+};
 
-  /*!\brief Time Stamp Type
-   *
-   * An integer, which when multiplied by the stream's time base, provides
-   * the absolute time of a sample.
-   */
-  typedef int64_t vpx_codec_pts_t;
+/*!\brief Encoder output packet
+ *
+ * This structure contains the different kinds of output data the encoder
+ * may produce while compressing a frame.
+ */
+typedef struct vpx_codec_cx_pkt {
+  enum vpx_codec_cx_pkt_kind kind; /**< packet variant */
+  union {
+    struct {
+      void *buf;                        /**< compressed data buffer */
+      size_t sz;                        /**< length of compressed data */
+      vpx_codec_pts_t pts;              /**< time stamp to show frame
+                                                 (in timebase units) */
+      unsigned long duration;           /**< duration to show frame
+                                                 (in timebase units) */
+      vpx_codec_frame_flags_t flags;    /**< flags for this frame */
+      int partition_id;                 /**< the partition id
+                                       defines the decoding order
+                                       of the partitions. Only
+                                       applicable when "output partition"
+                                       mode is enabled. First partition
+                                       has id 0.*/
+    } frame;                            /**< data for compressed frame packet */
+    vpx_fixed_buf_t twopass_stats;      /**< data for two-pass packet */
+    vpx_fixed_buf_t firstpass_mb_stats; /**< first pass mb packet */
+    struct vpx_psnr_pkt {
+      unsigned int samples[4]; /**< Number of samples, total/y/u/v */
+      uint64_t sse[4];         /**< sum squared error, total/y/u/v */
+      double psnr[4];          /**< PSNR, total/y/u/v */
+    } psnr;                    /**< data for PSNR packet */
+    vpx_fixed_buf_t raw;       /**< data for arbitrary packets */
+// Spatial SVC is still experimental and may be removed before the next
+// ABI bump.
+#if VPX_ENCODER_ABI_VERSION > (5 + VPX_CODEC_ABI_VERSION)
+    size_t layer_sizes[VPX_SS_MAX_LAYERS];
+    struct vpx_psnr_pkt layer_psnr[VPX_SS_MAX_LAYERS];
+#endif
 
+    /* This packet size is fixed to allow codecs to extend this
+     * interface without having to manage storage for raw packets,
+     * i.e., if it's smaller than 128 bytes, you can store in the
+     * packet list directly.
+     */
+    char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */
+  } data;                                               /**< packet data */
+} vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */
 
-  /*!\brief Compressed Frame Flags
-   *
-   * This type represents a bitfield containing information about a compressed
-   * frame that may be useful to an application. The most significant 16 bits
-   * can be used by an algorithm to provide additional detail, for example to
-   * support frame types that are codec specific (MPEG-1 D-frames for example)
+/*!\brief Encoder return output buffer callback
+ *
+ * This callback function, when registered, returns with packets when each
+ * spatial layer is encoded.
+ */
+// putting the definitions here for now. (agrange: find if there
+// is a better place for this)
+typedef void (*vpx_codec_enc_output_cx_pkt_cb_fn_t)(vpx_codec_cx_pkt_t *pkt,
+                                                    void *user_data);
+
+/*!\brief Callback function pointer / user data pair storage */
+typedef struct vpx_codec_enc_output_cx_cb_pair {
+  vpx_codec_enc_output_cx_pkt_cb_fn_t output_cx_pkt; /**< Callback function */
+  void *user_priv; /**< Pointer to private data */
+} vpx_codec_priv_output_cx_pkt_cb_pair_t;
+
+/*!\brief Rational Number
+ *
+ * This structure holds a fractional value.
+ */
+typedef struct vpx_rational {
+  int num;        /**< fraction numerator */
+  int den;        /**< fraction denominator */
+} vpx_rational_t; /**< alias for struct vpx_rational */
+
+/*!\brief Multi-pass Encoding Pass */
+enum vpx_enc_pass {
+  VPX_RC_ONE_PASS,   /**< Single pass mode */
+  VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */
+  VPX_RC_LAST_PASS   /**< Final pass of multi-pass mode */
+};
+
+/*!\brief Rate control mode */
+enum vpx_rc_mode {
+  VPX_VBR, /**< Variable Bit Rate (VBR) mode */
+  VPX_CBR, /**< Constant Bit Rate (CBR) mode */
+  VPX_CQ,  /**< Constrained Quality (CQ)  mode */
+  VPX_Q,   /**< Constant Quality (Q) mode */
+};
+
+/*!\brief Keyframe placement mode.
+ *
+ * This enumeration determines whether keyframes are placed automatically by
+ * the encoder or whether this behavior is disabled. Older releases of this
+ * SDK were implemented such that VPX_KF_FIXED meant keyframes were disabled.
+ * This name is confusing for this behavior, so the new symbols to be used
+ * are VPX_KF_AUTO and VPX_KF_DISABLED.
+ */
+enum vpx_kf_mode {
+  VPX_KF_FIXED,       /**< deprecated, implies VPX_KF_DISABLED */
+  VPX_KF_AUTO,        /**< Encoder determines optimal placement automatically */
+  VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */
+};
+
+/*!\brief Encoded Frame Flags
+ *
+ * This type indicates a bitfield to be passed to vpx_codec_encode(), defining
+ * per-frame boolean values. By convention, bits common to all codecs will be
+ * named VPX_EFLAG_*, and bits specific to an algorithm will be named
+ * /algo/_eflag_*. The lower order 16 bits are reserved for common use.
+ */
+typedef long vpx_enc_frame_flags_t;
+#define VPX_EFLAG_FORCE_KF (1 << 0) /**< Force this frame to be a keyframe */
+
+/*!\brief Encoder configuration structure
+ *
+ * This structure contains the encoder settings that have common representations
+ * across all codecs. This doesn't imply that all codecs support all features,
+ * however.
+ */
+typedef struct vpx_codec_enc_cfg {
+  /*
+   * generic settings (g)
    */
-  typedef uint32_t vpx_codec_frame_flags_t;
-#define VPX_FRAME_IS_KEY       0x1 /**< frame is the start of a GOP */
-#define VPX_FRAME_IS_DROPPABLE 0x2 /**< frame can be dropped without affecting
-  the stream (no future frame depends on
-              this one) */
-#define VPX_FRAME_IS_INVISIBLE 0x4 /**< frame should be decoded but will not
-  be shown */
-#define VPX_FRAME_IS_FRAGMENT  0x8 /**< this is a fragment of the encoded
-  frame */
-
-  /*!\brief Error Resilient flags
+
+  /*!\brief Algorithm specific "usage" value
    *
-   * These flags define which error resilient features to enable in the
-   * encoder. The flags are specified through the
-   * vpx_codec_enc_cfg::g_error_resilient variable.
+   * Algorithms may define multiple values for usage, which may convey the
+   * intent of how the application intends to use the stream. If this value
+   * is non-zero, consult the documentation for the codec to determine its
+   * meaning.
    */
-  typedef uint32_t vpx_codec_er_flags_t;
-#define VPX_ERROR_RESILIENT_DEFAULT     0x1 /**< Improve resiliency against
-  losses of whole frames */
-#define VPX_ERROR_RESILIENT_PARTITIONS  0x2 /**< The frame partitions are
-  independently decodable by the
-  bool decoder, meaning that
-  partitions can be decoded even
-  though earlier partitions have
-  been lost. Note that intra
-  prediction is still done over
-  the partition boundary. */
-
-  /*!\brief Encoder output packet variants
+  unsigned int g_usage;
+
+  /*!\brief Maximum number of threads to use
    *
-   * This enumeration lists the different kinds of data packets that can be
-   * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY
-   * extend this list to provide additional functionality.
+   * For multi-threaded implementations, use no more than this number of
+   * threads. The codec may use fewer threads than allowed. The value
+   * 0 is equivalent to the value 1.
    */
-  enum vpx_codec_cx_pkt_kind {
-    VPX_CODEC_CX_FRAME_PKT,    /**< Compressed video frame */
-    VPX_CODEC_STATS_PKT,       /**< Two-pass statistics for this frame */
-    VPX_CODEC_FPMB_STATS_PKT,  /**< first pass mb statistics for this frame */
-    VPX_CODEC_PSNR_PKT,        /**< PSNR statistics for this frame */
-    // Spatial SVC is still experimental and may be removed before the next ABI
-    // bump.
-#if VPX_ENCODER_ABI_VERSION > (5 + VPX_CODEC_ABI_VERSION)
-    VPX_CODEC_SPATIAL_SVC_LAYER_SIZES, /**< Sizes for each layer in this frame*/
-    VPX_CODEC_SPATIAL_SVC_LAYER_PSNR, /**< PSNR for each layer in this frame*/
-#endif
-    VPX_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions  */
-  };
-
+  unsigned int g_threads;
 
-  /*!\brief Encoder output packet
+  /*!\brief Bitstream profile to use
    *
-   * This structure contains the different kinds of output data the encoder
-   * may produce while compressing a frame.
+   * Some codecs support a notion of multiple bitstream profiles. Typically
+   * this maps to a set of features that are turned on or off. Often the
+   * profile to use is determined by the features of the intended decoder.
+   * Consult the documentation for the codec to determine the valid values
+   * for this parameter, or set to zero for a sane default.
    */
-  typedef struct vpx_codec_cx_pkt {
-    enum vpx_codec_cx_pkt_kind  kind; /**< packet variant */
-    union {
-      struct {
-        void                    *buf;      /**< compressed data buffer */
-        size_t                   sz;       /**< length of compressed data */
-        vpx_codec_pts_t          pts;      /**< time stamp to show frame
-                                                    (in timebase units) */
-        unsigned long            duration; /**< duration to show frame
-                                                    (in timebase units) */
-        vpx_codec_frame_flags_t  flags;    /**< flags for this frame */
-        int                      partition_id; /**< the partition id
-                                              defines the decoding order
-                                              of the partitions. Only
-                                              applicable when "output partition"
-                                              mode is enabled. First partition
-                                              has id 0.*/
-
-      } frame;  /**< data for compressed frame packet */
-      vpx_fixed_buf_t twopass_stats;  /**< data for two-pass packet */
-      vpx_fixed_buf_t firstpass_mb_stats; /**< first pass mb packet */
-      struct vpx_psnr_pkt {
-        unsigned int samples[4];  /**< Number of samples, total/y/u/v */
-        uint64_t     sse[4];      /**< sum squared error, total/y/u/v */
-        double       psnr[4];     /**< PSNR, total/y/u/v */
-      } psnr;                       /**< data for PSNR packet */
-      vpx_fixed_buf_t raw;     /**< data for arbitrary packets */
-      // Spatial SVC is still experimental and may be removed before the next
-      // ABI bump.
-#if VPX_ENCODER_ABI_VERSION > (5 + VPX_CODEC_ABI_VERSION)
-      size_t layer_sizes[VPX_SS_MAX_LAYERS];
-      struct vpx_psnr_pkt layer_psnr[VPX_SS_MAX_LAYERS];
-#endif
+  unsigned int g_profile; /**< profile of bitstream to use */
 
-      /* This packet size is fixed to allow codecs to extend this
-       * interface without having to manage storage for raw packets,
-       * i.e., if it's smaller than 128 bytes, you can store in the
-       * packet list directly.
-       */
-      char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */
-    } data; /**< packet data */
-  } vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */
-
-
-  /*!\brief Encoder return output buffer callback
+  /*!\brief Width of the frame
    *
-   * This callback function, when registered, returns with packets when each
-   * spatial layer is encoded.
+   * This value identifies the presentation resolution of the frame,
+   * in pixels. Note that the frames passed as input to the encoder must
+   * have this resolution. Frames will be presented by the decoder in this
+   * resolution, independent of any spatial resampling the encoder may do.
    */
-  // putting the definitions here for now. (agrange: find if there
-  // is a better place for this)
-  typedef void (* vpx_codec_enc_output_cx_pkt_cb_fn_t)(vpx_codec_cx_pkt_t *pkt,
-                                                       void *user_data);
-
-  /*!\brief Callback function pointer / user data pair storage */
-  typedef struct vpx_codec_enc_output_cx_cb_pair {
-    vpx_codec_enc_output_cx_pkt_cb_fn_t output_cx_pkt; /**< Callback function */
-    void                            *user_priv; /**< Pointer to private data */
-  } vpx_codec_priv_output_cx_pkt_cb_pair_t;
-
-  /*!\brief Rational Number
+  unsigned int g_w;
+
+  /*!\brief Height of the frame
    *
-   * This structure holds a fractional value.
+   * This value identifies the presentation resolution of the frame,
+   * in pixels. Note that the frames passed as input to the encoder must
+   * have this resolution. Frames will be presented by the decoder in this
+   * resolution, independent of any spatial resampling the encoder may do.
    */
-  typedef struct vpx_rational {
-    int num; /**< fraction numerator */
-    int den; /**< fraction denominator */
-  } vpx_rational_t; /**< alias for struct vpx_rational */
-
-
-  /*!\brief Multi-pass Encoding Pass */
-  enum vpx_enc_pass {
-    VPX_RC_ONE_PASS,   /**< Single pass mode */
-    VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */
-    VPX_RC_LAST_PASS   /**< Final pass of multi-pass mode */
-  };
+  unsigned int g_h;
 
-
-  /*!\brief Rate control mode */
-  enum vpx_rc_mode {
-    VPX_VBR,  /**< Variable Bit Rate (VBR) mode */
-    VPX_CBR,  /**< Constant Bit Rate (CBR) mode */
-    VPX_CQ,   /**< Constrained Quality (CQ)  mode */
-    VPX_Q,    /**< Constant Quality (Q) mode */
-  };
-
-
-  /*!\brief Keyframe placement mode.
+  /*!\brief Bit-depth of the codec
    *
-   * This enumeration determines whether keyframes are placed automatically by
-   * the encoder or whether this behavior is disabled. Older releases of this
-   * SDK were implemented such that VPX_KF_FIXED meant keyframes were disabled.
-   * This name is confusing for this behavior, so the new symbols to be used
-   * are VPX_KF_AUTO and VPX_KF_DISABLED.
+   * This value identifies the bit_depth of the codec,
+   * Only certain bit-depths are supported as identified in the
+   * vpx_bit_depth_t enum.
    */
-  enum vpx_kf_mode {
-    VPX_KF_FIXED, /**< deprecated, implies VPX_KF_DISABLED */
-    VPX_KF_AUTO,  /**< Encoder determines optimal placement automatically */
-    VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */
-  };
+  vpx_bit_depth_t g_bit_depth;
 
-
-  /*!\brief Encoded Frame Flags
+  /*!\brief Bit-depth of the input frames
    *
-   * This type indicates a bitfield to be passed to vpx_codec_encode(), defining
-   * per-frame boolean values. By convention, bits common to all codecs will be
-   * named VPX_EFLAG_*, and bits specific to an algorithm will be named
-   * /algo/_eflag_*. The lower order 16 bits are reserved for common use.
+   * This value identifies the bit_depth of the input frames in bits.
+   * Note that the frames passed as input to the encoder must have
+   * this bit-depth.
    */
-  typedef long vpx_enc_frame_flags_t;
-#define VPX_EFLAG_FORCE_KF (1<<0)  /**< Force this frame to be a keyframe */
-
+  unsigned int g_input_bit_depth;
+
+  /*!\brief Stream timebase units
+   *
+   * Indicates the smallest interval of time, in seconds, used by the stream.
+   * For fixed frame rate material, or variable frame rate material where
+   * frames are timed at a multiple of a given clock (ex: video capture),
+   * the \ref RECOMMENDED method is to set the timebase to the reciprocal
+   * of the frame rate (ex: 1001/30000 for 29.970 Hz NTSC). This allows the
+   * pts to correspond to the frame number, which can be handy. For
+   * re-encoding video from containers with absolute time timestamps, the
+   * \ref RECOMMENDED method is to set the timebase to that of the parent
+   * container or multimedia framework (ex: 1/1000 for ms, as in FLV).
+   */
+  struct vpx_rational g_timebase;
 
-  /*!\brief Encoder configuration structure
+  /*!\brief Enable error resilient modes.
    *
-   * This structure contains the encoder settings that have common representations
-   * across all codecs. This doesn't imply that all codecs support all features,
-   * however.
+   * The error resilient bitfield indicates to the encoder which features
+   * it should enable to take measures for streaming over lossy or noisy
+   * links.
    */
-  typedef struct vpx_codec_enc_cfg {
-    /*
-     * generic settings (g)
-     */
-
-    /*!\brief Algorithm specific "usage" value
-     *
-     * Algorithms may define multiple values for usage, which may convey the
-     * intent of how the application intends to use the stream. If this value
-     * is non-zero, consult the documentation for the codec to determine its
-     * meaning.
-     */
-    unsigned int           g_usage;
-
-
-    /*!\brief Maximum number of threads to use
-     *
-     * For multi-threaded implementations, use no more than this number of
-     * threads. The codec may use fewer threads than allowed. The value
-     * 0 is equivalent to the value 1.
-     */
-    unsigned int           g_threads;
-
-
-    /*!\brief Bitstream profile to use
-     *
-     * Some codecs support a notion of multiple bitstream profiles. Typically
-     * this maps to a set of features that are turned on or off. Often the
-     * profile to use is determined by the features of the intended decoder.
-     * Consult the documentation for the codec to determine the valid values
-     * for this parameter, or set to zero for a sane default.
-     */
-    unsigned int           g_profile;  /**< profile of bitstream to use */
-
-
-
-    /*!\brief Width of the frame
-     *
-     * This value identifies the presentation resolution of the frame,
-     * in pixels. Note that the frames passed as input to the encoder must
-     * have this resolution. Frames will be presented by the decoder in this
-     * resolution, independent of any spatial resampling the encoder may do.
-     */
-    unsigned int           g_w;
-
-
-    /*!\brief Height of the frame
-     *
-     * This value identifies the presentation resolution of the frame,
-     * in pixels. Note that the frames passed as input to the encoder must
-     * have this resolution. Frames will be presented by the decoder in this
-     * resolution, independent of any spatial resampling the encoder may do.
-     */
-    unsigned int           g_h;
-
-    /*!\brief Bit-depth of the codec
-     *
-     * This value identifies the bit_depth of the codec,
-     * Only certain bit-depths are supported as identified in the
-     * vpx_bit_depth_t enum.
-     */
-    vpx_bit_depth_t        g_bit_depth;
-
-    /*!\brief Bit-depth of the input frames
-     *
-     * This value identifies the bit_depth of the input frames in bits.
-     * Note that the frames passed as input to the encoder must have
-     * this bit-depth.
-     */
-    unsigned int           g_input_bit_depth;
-
-    /*!\brief Stream timebase units
-     *
-     * Indicates the smallest interval of time, in seconds, used by the stream.
-     * For fixed frame rate material, or variable frame rate material where
-     * frames are timed at a multiple of a given clock (ex: video capture),
-     * the \ref RECOMMENDED method is to set the timebase to the reciprocal
-     * of the frame rate (ex: 1001/30000 for 29.970 Hz NTSC). This allows the
-     * pts to correspond to the frame number, which can be handy. For
-     * re-encoding video from containers with absolute time timestamps, the
-     * \ref RECOMMENDED method is to set the timebase to that of the parent
-     * container or multimedia framework (ex: 1/1000 for ms, as in FLV).
-     */
-    struct vpx_rational    g_timebase;
-
-
-    /*!\brief Enable error resilient modes.
-     *
-     * The error resilient bitfield indicates to the encoder which features
-     * it should enable to take measures for streaming over lossy or noisy
-     * links.
-     */
-    vpx_codec_er_flags_t   g_error_resilient;
-
-
-    /*!\brief Multi-pass Encoding Mode
-     *
-     * This value should be set to the current phase for multi-pass encoding.
-     * For single pass, set to #VPX_RC_ONE_PASS.
-     */
-    enum vpx_enc_pass      g_pass;
-
-
-    /*!\brief Allow lagged encoding
-     *
-     * If set, this value allows the encoder to consume a number of input
-     * frames before producing output frames. This allows the encoder to
-     * base decisions for the current frame on future frames. This does
-     * increase the latency of the encoding pipeline, so it is not appropriate
-     * in all situations (ex: realtime encoding).
-     *
-     * Note that this is a maximum value -- the encoder may produce frames
-     * sooner than the given limit. Set this value to 0 to disable this
-     * feature.
-     */
-    unsigned int           g_lag_in_frames;
-
-
-    /*
-     * rate control settings (rc)
-     */
-
-    /*!\brief Temporal resampling configuration, if supported by the codec.
-     *
-     * Temporal resampling allows the codec to "drop" frames as a strategy to
-     * meet its target data rate. This can cause temporal discontinuities in
-     * the encoded video, which may appear as stuttering during playback. This
-     * trade-off is often acceptable, but for many applications is not. It can
-     * be disabled in these cases.
-     *
-     * Note that not all codecs support this feature. All vpx VPx codecs do.
-     * For other codecs, consult the documentation for that algorithm.
-     *
-     * This threshold is described as a percentage of the target data buffer.
-     * When the data buffer falls below this percentage of fullness, a
-     * dropped frame is indicated. Set the threshold to zero (0) to disable
-     * this feature.
-     */
-    unsigned int           rc_dropframe_thresh;
-
-
-    /*!\brief Enable/disable spatial resampling, if supported by the codec.
-     *
-     * Spatial resampling allows the codec to compress a lower resolution
-     * version of the frame, which is then upscaled by the encoder to the
-     * correct presentation resolution. This increases visual quality at
-     * low data rates, at the expense of CPU time on the encoder/decoder.
-     */
-    unsigned int           rc_resize_allowed;
-
-    /*!\brief Internal coded frame width.
-     *
-     * If spatial resampling is enabled this specifies the width of the
-     * encoded frame.
-     */
-    unsigned int           rc_scaled_width;
-
-    /*!\brief Internal coded frame height.
-     *
-     * If spatial resampling is enabled this specifies the height of the
-     * encoded frame.
-     */
-    unsigned int           rc_scaled_height;
-
-    /*!\brief Spatial resampling up watermark.
-     *
-     * This threshold is described as a percentage of the target data buffer.
-     * When the data buffer rises above this percentage of fullness, the
-     * encoder will step up to a higher resolution version of the frame.
-     */
-    unsigned int           rc_resize_up_thresh;
-
-
-    /*!\brief Spatial resampling down watermark.
-     *
-     * This threshold is described as a percentage of the target data buffer.
-     * When the data buffer falls below this percentage of fullness, the
-     * encoder will step down to a lower resolution version of the frame.
-     */
-    unsigned int           rc_resize_down_thresh;
-
-
-    /*!\brief Rate control algorithm to use.
-     *
-     * Indicates whether the end usage of this stream is to be streamed over
-     * a bandwidth constrained link, indicating that Constant Bit Rate (CBR)
-     * mode should be used, or whether it will be played back on a high
-     * bandwidth link, as from a local disk, where higher variations in
-     * bitrate are acceptable.
-     */
-    enum vpx_rc_mode       rc_end_usage;
-
-
-    /*!\brief Two-pass stats buffer.
-     *
-     * A buffer containing all of the stats packets produced in the first
-     * pass, concatenated.
-     */
-    vpx_fixed_buf_t   rc_twopass_stats_in;
-
-    /*!\brief first pass mb stats buffer.
-     *
-     * A buffer containing all of the first pass mb stats packets produced
-     * in the first pass, concatenated.
-     */
-    vpx_fixed_buf_t   rc_firstpass_mb_stats_in;
-
-    /*!\brief Target data rate
-     *
-     * Target bandwidth to use for this stream, in kilobits per second.
-     */
-    unsigned int           rc_target_bitrate;
-
-
-    /*
-     * quantizer settings
-     */
-
-
-    /*!\brief Minimum (Best Quality) Quantizer
-     *
-     * The quantizer is the most direct control over the quality of the
-     * encoded image. The range of valid values for the quantizer is codec
-     * specific. Consult the documentation for the codec to determine the
-     * values to use. To determine the range programmatically, call
-     * vpx_codec_enc_config_default() with a usage value of 0.
-     */
-    unsigned int           rc_min_quantizer;
-
-
-    /*!\brief Maximum (Worst Quality) Quantizer
-     *
-     * The quantizer is the most direct control over the quality of the
-     * encoded image. The range of valid values for the quantizer is codec
-     * specific. Consult the documentation for the codec to determine the
-     * values to use. To determine the range programmatically, call
-     * vpx_codec_enc_config_default() with a usage value of 0.
-     */
-    unsigned int           rc_max_quantizer;
-
-
-    /*
-     * bitrate tolerance
-     */
-
-
-    /*!\brief Rate control adaptation undershoot control
-     *
-     * This value, expressed as a percentage of the target bitrate,
-     * controls the maximum allowed adaptation speed of the codec.
-     * This factor controls the maximum amount of bits that can
-     * be subtracted from the target bitrate in order to compensate
-     * for prior overshoot.
-     *
-     * Valid values in the range 0-1000.
-     */
-    unsigned int           rc_undershoot_pct;
-
-
-    /*!\brief Rate control adaptation overshoot control
-     *
-     * This value, expressed as a percentage of the target bitrate,
-     * controls the maximum allowed adaptation speed of the codec.
-     * This factor controls the maximum amount of bits that can
-     * be added to the target bitrate in order to compensate for
-     * prior undershoot.
-     *
-     * Valid values in the range 0-1000.
-     */
-    unsigned int           rc_overshoot_pct;
-
-
-    /*
-     * decoder buffer model parameters
-     */
-
-
-    /*!\brief Decoder Buffer Size
-     *
-     * This value indicates the amount of data that may be buffered by the
-     * decoding application. Note that this value is expressed in units of
-     * time (milliseconds). For example, a value of 5000 indicates that the
-     * client will buffer (at least) 5000ms worth of encoded data. Use the
-     * target bitrate (#rc_target_bitrate) to convert to bits/bytes, if
-     * necessary.
-     */
-    unsigned int           rc_buf_sz;
-
-
-    /*!\brief Decoder Buffer Initial Size
-     *
-     * This value indicates the amount of data that will be buffered by the
-     * decoding application prior to beginning playback. This value is
-     * expressed in units of time (milliseconds). Use the target bitrate
-     * (#rc_target_bitrate) to convert to bits/bytes, if necessary.
-     */
-    unsigned int           rc_buf_initial_sz;
-
-
-    /*!\brief Decoder Buffer Optimal Size
-     *
-     * This value indicates the amount of data that the encoder should try
-     * to maintain in the decoder's buffer. This value is expressed in units
-     * of time (milliseconds). Use the target bitrate (#rc_target_bitrate)
-     * to convert to bits/bytes, if necessary.
-     */
-    unsigned int           rc_buf_optimal_sz;
-
-
-    /*
-     * 2 pass rate control parameters
-     */
-
-
-    /*!\brief Two-pass mode CBR/VBR bias
-     *
-     * Bias, expressed on a scale of 0 to 100, for determining target size
-     * for the current frame. The value 0 indicates the optimal CBR mode
-     * value should be used. The value 100 indicates the optimal VBR mode
-     * value should be used. Values in between indicate which way the
-     * encoder should "lean."
-     */
-    unsigned int           rc_2pass_vbr_bias_pct;       /**< RC mode bias between CBR and VBR(0-100: 0->CBR, 100->VBR)   */
-
-
-    /*!\brief Two-pass mode per-GOP minimum bitrate
-     *
-     * This value, expressed as a percentage of the target bitrate, indicates
-     * the minimum bitrate to be used for a single GOP (aka "section")
-     */
-    unsigned int           rc_2pass_vbr_minsection_pct;
+  vpx_codec_er_flags_t g_error_resilient;
 
+  /*!\brief Multi-pass Encoding Mode
+   *
+   * This value should be set to the current phase for multi-pass encoding.
+   * For single pass, set to #VPX_RC_ONE_PASS.
+   */
+  enum vpx_enc_pass g_pass;
 
-    /*!\brief Two-pass mode per-GOP maximum bitrate
-     *
-     * This value, expressed as a percentage of the target bitrate, indicates
-     * the maximum bitrate to be used for a single GOP (aka "section")
-     */
-    unsigned int           rc_2pass_vbr_maxsection_pct;
+  /*!\brief Allow lagged encoding
+   *
+   * If set, this value allows the encoder to consume a number of input
+   * frames before producing output frames. This allows the encoder to
+   * base decisions for the current frame on future frames. This does
+   * increase the latency of the encoding pipeline, so it is not appropriate
+   * in all situations (ex: realtime encoding).
+   *
+   * Note that this is a maximum value -- the encoder may produce frames
+   * sooner than the given limit. Set this value to 0 to disable this
+   * feature.
+   */
+  unsigned int g_lag_in_frames;
 
+  /*
+   * rate control settings (rc)
+   */
 
-    /*
-     * keyframing settings (kf)
-     */
+  /*!\brief Temporal resampling configuration, if supported by the codec.
+   *
+   * Temporal resampling allows the codec to "drop" frames as a strategy to
+   * meet its target data rate. This can cause temporal discontinuities in
+   * the encoded video, which may appear as stuttering during playback. This
+   * trade-off is often acceptable, but for many applications is not. It can
+   * be disabled in these cases.
+   *
+   * Note that not all codecs support this feature. All vpx VPx codecs do.
+   * For other codecs, consult the documentation for that algorithm.
+   *
+   * This threshold is described as a percentage of the target data buffer.
+   * When the data buffer falls below this percentage of fullness, a
+   * dropped frame is indicated. Set the threshold to zero (0) to disable
+   * this feature.
+   */
+  unsigned int rc_dropframe_thresh;
 
-    /*!\brief Keyframe placement mode
-     *
-     * This value indicates whether the encoder should place keyframes at a
-     * fixed interval, or determine the optimal placement automatically
-     * (as governed by the #kf_min_dist and #kf_max_dist parameters)
-     */
-    enum vpx_kf_mode       kf_mode;
+  /*!\brief Enable/disable spatial resampling, if supported by the codec.
+   *
+   * Spatial resampling allows the codec to compress a lower resolution
+   * version of the frame, which is then upscaled by the encoder to the
+   * correct presentation resolution. This increases visual quality at
+   * low data rates, at the expense of CPU time on the encoder/decoder.
+   */
+  unsigned int rc_resize_allowed;
 
+  /*!\brief Internal coded frame width.
+   *
+   * If spatial resampling is enabled this specifies the width of the
+   * encoded frame.
+   */
+  unsigned int rc_scaled_width;
 
-    /*!\brief Keyframe minimum interval
-     *
-     * This value, expressed as a number of frames, prevents the encoder from
-     * placing a keyframe nearer than kf_min_dist to the previous keyframe. At
-     * least kf_min_dist frames non-keyframes will be coded before the next
-     * keyframe. Set kf_min_dist equal to kf_max_dist for a fixed interval.
-     */
-    unsigned int           kf_min_dist;
+  /*!\brief Internal coded frame height.
+   *
+   * If spatial resampling is enabled this specifies the height of the
+   * encoded frame.
+   */
+  unsigned int rc_scaled_height;
 
+  /*!\brief Spatial resampling up watermark.
+   *
+   * This threshold is described as a percentage of the target data buffer.
+   * When the data buffer rises above this percentage of fullness, the
+   * encoder will step up to a higher resolution version of the frame.
+   */
+  unsigned int rc_resize_up_thresh;
 
-    /*!\brief Keyframe maximum interval
-     *
-     * This value, expressed as a number of frames, forces the encoder to code
-     * a keyframe if one has not been coded in the last kf_max_dist frames.
-     * A value of 0 implies all frames will be keyframes. Set kf_min_dist
-     * equal to kf_max_dist for a fixed interval.
-     */
-    unsigned int           kf_max_dist;
+  /*!\brief Spatial resampling down watermark.
+   *
+   * This threshold is described as a percentage of the target data buffer.
+   * When the data buffer falls below this percentage of fullness, the
+   * encoder will step down to a lower resolution version of the frame.
+   */
+  unsigned int rc_resize_down_thresh;
 
-    /*
-     * Spatial scalability settings (ss)
-     */
+  /*!\brief Rate control algorithm to use.
+   *
+   * Indicates whether the end usage of this stream is to be streamed over
+   * a bandwidth constrained link, indicating that Constant Bit Rate (CBR)
+   * mode should be used, or whether it will be played back on a high
+   * bandwidth link, as from a local disk, where higher variations in
+   * bitrate are acceptable.
+   */
+  enum vpx_rc_mode rc_end_usage;
 
-    /*!\brief Number of spatial coding layers.
-     *
-     * This value specifies the number of spatial coding layers to be used.
-     */
-    unsigned int           ss_number_layers;
+  /*!\brief Two-pass stats buffer.
+   *
+   * A buffer containing all of the stats packets produced in the first
+   * pass, concatenated.
+   */
+  vpx_fixed_buf_t rc_twopass_stats_in;
 
-    /*!\brief Enable auto alt reference flags for each spatial layer.
-     *
-     * These values specify if auto alt reference frame is enabled for each
-     * spatial layer.
-     */
-    int                    ss_enable_auto_alt_ref[VPX_SS_MAX_LAYERS];
+  /*!\brief first pass mb stats buffer.
+   *
+   * A buffer containing all of the first pass mb stats packets produced
+   * in the first pass, concatenated.
+   */
+  vpx_fixed_buf_t rc_firstpass_mb_stats_in;
 
-    /*!\brief Target bitrate for each spatial layer.
-     *
-     * These values specify the target coding bitrate to be used for each
-     * spatial layer.
-     */
-    unsigned int           ss_target_bitrate[VPX_SS_MAX_LAYERS];
+  /*!\brief Target data rate
+   *
+   * Target bandwidth to use for this stream, in kilobits per second.
+   */
+  unsigned int rc_target_bitrate;
 
-    /*!\brief Number of temporal coding layers.
-     *
-     * This value specifies the number of temporal layers to be used.
-     */
-    unsigned int           ts_number_layers;
+  /*
+   * quantizer settings
+   */
 
-    /*!\brief Target bitrate for each temporal layer.
-     *
-     * These values specify the target coding bitrate to be used for each
-     * temporal layer.
-     */
-    unsigned int           ts_target_bitrate[VPX_TS_MAX_LAYERS];
+  /*!\brief Minimum (Best Quality) Quantizer
+   *
+   * The quantizer is the most direct control over the quality of the
+   * encoded image. The range of valid values for the quantizer is codec
+   * specific. Consult the documentation for the codec to determine the
+   * values to use. To determine the range programmatically, call
+   * vpx_codec_enc_config_default() with a usage value of 0.
+   */
+  unsigned int rc_min_quantizer;
 
-    /*!\brief Frame rate decimation factor for each temporal layer.
-     *
-     * These values specify the frame rate decimation factors to apply
-     * to each temporal layer.
-     */
-    unsigned int           ts_rate_decimator[VPX_TS_MAX_LAYERS];
-
-    /*!\brief Length of the sequence defining frame temporal layer membership.
-     *
-     * This value specifies the length of the sequence that defines the
-     * membership of frames to temporal layers. For example, if the
-     * ts_periodicity = 8, then the frames are assigned to coding layers with a
-     * repeated sequence of length 8.
-    */
-    unsigned int           ts_periodicity;
-
-    /*!\brief Template defining the membership of frames to temporal layers.
-     *
-     * This array defines the membership of frames to temporal coding layers.
-     * For a 2-layer encoding that assigns even numbered frames to one temporal
-     * layer (0) and odd numbered frames to a second temporal layer (1) with
-     * ts_periodicity=8, then ts_layer_id = (0,1,0,1,0,1,0,1).
-    */
-    unsigned int           ts_layer_id[VPX_TS_MAX_PERIODICITY];
-
-    /*!\brief Target bitrate for each spatial/temporal layer.
-     *
-     * These values specify the target coding bitrate to be used for each
-     * spatial/temporal layer.
-     *
-     */
-    unsigned int           layer_target_bitrate[VPX_MAX_LAYERS];
+  /*!\brief Maximum (Worst Quality) Quantizer
+   *
+   * The quantizer is the most direct control over the quality of the
+   * encoded image. The range of valid values for the quantizer is codec
+   * specific. Consult the documentation for the codec to determine the
+   * values to use. To determine the range programmatically, call
+   * vpx_codec_enc_config_default() with a usage value of 0.
+   */
+  unsigned int rc_max_quantizer;
 
-    /*!\brief Temporal layering mode indicating which temporal layering scheme to use.
-     *
-     * The value (refer to VP9E_TEMPORAL_LAYERING_MODE) specifies the
-     * temporal layering mode to use.
-     *
-     */
-    int                    temporal_layering_mode;
-  } vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */
+  /*
+   * bitrate tolerance
+   */
 
-  /*!\brief  vp9 svc extra configure parameters
+  /*!\brief Rate control adaptation undershoot control
    *
-   * This defines max/min quantizers and scale factors for each layer
+   * This value, expressed as a percentage of the target bitrate,
+   * controls the maximum allowed adaptation speed of the codec.
+   * This factor controls the maximum amount of bits that can
+   * be subtracted from the target bitrate in order to compensate
+   * for prior overshoot.
    *
+   * Valid values in the range 0-1000.
    */
-  typedef struct vpx_svc_parameters {
-    int max_quantizers[VPX_MAX_LAYERS]; /**< Max Q for each layer */
-    int min_quantizers[VPX_MAX_LAYERS]; /**< Min Q for each layer */
-    int scaling_factor_num[VPX_MAX_LAYERS]; /**< Scaling factor-numerator */
-    int scaling_factor_den[VPX_MAX_LAYERS]; /**< Scaling factor-denominator */
-    int temporal_layering_mode; /**< Temporal layering mode */
-  } vpx_svc_extra_cfg_t;
-
+  unsigned int rc_undershoot_pct;
 
-  /*!\brief Initialize an encoder instance
+  /*!\brief Rate control adaptation overshoot control
    *
-   * Initializes a encoder context using the given interface. Applications
-   * should call the vpx_codec_enc_init convenience macro instead of this
-   * function directly, to ensure that the ABI version number parameter
-   * is properly initialized.
+   * This value, expressed as a percentage of the target bitrate,
+   * controls the maximum allowed adaptation speed of the codec.
+   * This factor controls the maximum amount of bits that can
+   * be added to the target bitrate in order to compensate for
+   * prior undershoot.
    *
-   * If the library was configured with --disable-multithread, this call
-   * is not thread safe and should be guarded with a lock if being used
-   * in a multithreaded context.
-   *
-   * \param[in]    ctx     Pointer to this instance's context.
-   * \param[in]    iface   Pointer to the algorithm interface to use.
-   * \param[in]    cfg     Configuration to use, if known. May be NULL.
-   * \param[in]    flags   Bitfield of VPX_CODEC_USE_* flags
-   * \param[in]    ver     ABI version number. Must be set to
-   *                       VPX_ENCODER_ABI_VERSION
-   * \retval #VPX_CODEC_OK
-   *     The decoder algorithm initialized.
-   * \retval #VPX_CODEC_MEM_ERROR
-   *     Memory allocation failed.
+   * Valid values in the range 0-1000.
    */
-  vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t      *ctx,
-                                         vpx_codec_iface_t    *iface,
-                                         const vpx_codec_enc_cfg_t *cfg,
-                                         vpx_codec_flags_t     flags,
-                                         int                   ver);
+  unsigned int rc_overshoot_pct;
 
+  /*
+   * decoder buffer model parameters
+   */
 
-  /*!\brief Convenience macro for vpx_codec_enc_init_ver()
+  /*!\brief Decoder Buffer Size
    *
-   * Ensures the ABI version parameter is properly set.
+   * This value indicates the amount of data that may be buffered by the
+   * decoding application. Note that this value is expressed in units of
+   * time (milliseconds). For example, a value of 5000 indicates that the
+   * client will buffer (at least) 5000ms worth of encoded data. Use the
+   * target bitrate (#rc_target_bitrate) to convert to bits/bytes, if
+   * necessary.
    */
-#define vpx_codec_enc_init(ctx, iface, cfg, flags) \
-  vpx_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION)
-
+  unsigned int rc_buf_sz;
 
-  /*!\brief Initialize multi-encoder instance
+  /*!\brief Decoder Buffer Initial Size
    *
-   * Initializes multi-encoder context using the given interface.
-   * Applications should call the vpx_codec_enc_init_multi convenience macro
-   * instead of this function directly, to ensure that the ABI version number
-   * parameter is properly initialized.
-   *
-   * \param[in]    ctx     Pointer to this instance's context.
-   * \param[in]    iface   Pointer to the algorithm interface to use.
-   * \param[in]    cfg     Configuration to use, if known. May be NULL.
-   * \param[in]    num_enc Total number of encoders.
-   * \param[in]    flags   Bitfield of VPX_CODEC_USE_* flags
-   * \param[in]    dsf     Pointer to down-sampling factors.
-   * \param[in]    ver     ABI version number. Must be set to
-   *                       VPX_ENCODER_ABI_VERSION
-   * \retval #VPX_CODEC_OK
-   *     The decoder algorithm initialized.
-   * \retval #VPX_CODEC_MEM_ERROR
-   *     Memory allocation failed.
+   * This value indicates the amount of data that will be buffered by the
+   * decoding application prior to beginning playback. This value is
+   * expressed in units of time (milliseconds). Use the target bitrate
+   * (#rc_target_bitrate) to convert to bits/bytes, if necessary.
    */
-  vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t      *ctx,
-                                               vpx_codec_iface_t    *iface,
-                                               vpx_codec_enc_cfg_t  *cfg,
-                                               int                   num_enc,
-                                               vpx_codec_flags_t     flags,
-                                               vpx_rational_t       *dsf,
-                                               int                   ver);
-
+  unsigned int rc_buf_initial_sz;
 
-  /*!\brief Convenience macro for vpx_codec_enc_init_multi_ver()
+  /*!\brief Decoder Buffer Optimal Size
    *
-   * Ensures the ABI version parameter is properly set.
+   * This value indicates the amount of data that the encoder should try
+   * to maintain in the decoder's buffer. This value is expressed in units
+   * of time (milliseconds). Use the target bitrate (#rc_target_bitrate)
+   * to convert to bits/bytes, if necessary.
    */
-#define vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \
-  vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf, \
-                               VPX_ENCODER_ABI_VERSION)
+  unsigned int rc_buf_optimal_sz;
 
+  /*
+   * 2 pass rate control parameters
+   */
 
-  /*!\brief Get a default configuration
-   *
-   * Initializes a encoder configuration structure with default values. Supports
-   * the notion of "usages" so that an algorithm may offer different default
-   * settings depending on the user's intended goal. This function \ref SHOULD
-   * be called by all applications to initialize the configuration structure
-   * before specializing the configuration with application specific values.
-   *
-   * \param[in]    iface     Pointer to the algorithm interface to use.
-   * \param[out]   cfg       Configuration buffer to populate.
-   * \param[in]    reserved  Must set to 0 for VP8 and VP9.
+  /*!\brief Two-pass mode CBR/VBR bias
    *
-   * \retval #VPX_CODEC_OK
-   *     The configuration was populated.
-   * \retval #VPX_CODEC_INCAPABLE
-   *     Interface is not an encoder interface.
-   * \retval #VPX_CODEC_INVALID_PARAM
-   *     A parameter was NULL, or the usage value was not recognized.
+   * Bias, expressed on a scale of 0 to 100, for determining target size
+   * for the current frame. The value 0 indicates the optimal CBR mode
+   * value should be used. The value 100 indicates the optimal VBR mode
+   * value should be used. Values in between indicate which way the
+   * encoder should "lean."
    */
-  vpx_codec_err_t  vpx_codec_enc_config_default(vpx_codec_iface_t    *iface,
-                                                vpx_codec_enc_cfg_t  *cfg,
-                                                unsigned int          reserved);
+  unsigned int rc_2pass_vbr_bias_pct; /**< RC mode bias between CBR and
+                                         VBR(0-100: 0->CBR, 100->VBR)   */
 
-
-  /*!\brief Set or change configuration
-   *
-   * Reconfigures an encoder instance according to the given configuration.
+  /*!\brief Two-pass mode per-GOP minimum bitrate
    *
-   * \param[in]    ctx     Pointer to this instance's context
-   * \param[in]    cfg     Configuration buffer to use
+   * This value, expressed as a percentage of the target bitrate, indicates
+   * the minimum bitrate to be used for a single GOP (aka "section")
+   */
+  unsigned int rc_2pass_vbr_minsection_pct;
+
+  /*!\brief Two-pass mode per-GOP maximum bitrate
    *
-   * \retval #VPX_CODEC_OK
-   *     The configuration was populated.
-   * \retval #VPX_CODEC_INCAPABLE
-   *     Interface is not an encoder interface.
-   * \retval #VPX_CODEC_INVALID_PARAM
-   *     A parameter was NULL, or the usage value was not recognized.
+   * This value, expressed as a percentage of the target bitrate, indicates
+   * the maximum bitrate to be used for a single GOP (aka "section")
    */
-  vpx_codec_err_t  vpx_codec_enc_config_set(vpx_codec_ctx_t            *ctx,
-                                            const vpx_codec_enc_cfg_t  *cfg);
+  unsigned int rc_2pass_vbr_maxsection_pct;
 
+  /*
+   * keyframing settings (kf)
+   */
 
-  /*!\brief Get global stream headers
+  /*!\brief Keyframe placement mode
    *
-   * Retrieves a stream level global header packet, if supported by the codec.
+   * This value indicates whether the encoder should place keyframes at a
+   * fixed interval, or determine the optimal placement automatically
+   * (as governed by the #kf_min_dist and #kf_max_dist parameters)
+   */
+  enum vpx_kf_mode kf_mode;
+
+  /*!\brief Keyframe minimum interval
    *
-   * \param[in]    ctx     Pointer to this instance's context
+   * This value, expressed as a number of frames, prevents the encoder from
+   * placing a keyframe nearer than kf_min_dist to the previous keyframe. At
+   * least kf_min_dist frames non-keyframes will be coded before the next
+   * keyframe. Set kf_min_dist equal to kf_max_dist for a fixed interval.
+   */
+  unsigned int kf_min_dist;
+
+  /*!\brief Keyframe maximum interval
    *
-   * \retval NULL
-   *     Encoder does not support global header
-   * \retval Non-NULL
-   *     Pointer to buffer containing global header packet
+   * This value, expressed as a number of frames, forces the encoder to code
+   * a keyframe if one has not been coded in the last kf_max_dist frames.
+   * A value of 0 implies all frames will be keyframes. Set kf_min_dist
+   * equal to kf_max_dist for a fixed interval.
    */
-  vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t   *ctx);
+  unsigned int kf_max_dist;
 
+  /*
+   * Spatial scalability settings (ss)
+   */
 
-#define VPX_DL_REALTIME     (1)        /**< deadline parameter analogous to
-  *   VPx REALTIME mode. */
-#define VPX_DL_GOOD_QUALITY (1000000)  /**< deadline parameter analogous to
-  *   VPx GOOD QUALITY mode. */
-#define VPX_DL_BEST_QUALITY (0)        /**< deadline parameter analogous to
-  *   VPx BEST QUALITY mode. */
-  /*!\brief Encode a frame
+  /*!\brief Number of spatial coding layers.
    *
-   * Encodes a video frame at the given "presentation time." The presentation
-   * time stamp (PTS) \ref MUST be strictly increasing.
-   *
-   * The encoder supports the notion of a soft real-time deadline. Given a
-   * non-zero value to the deadline parameter, the encoder will make a "best
-   * effort" guarantee to  return before the given time slice expires. It is
-   * implicit that limiting the available time to encode will degrade the
-   * output quality. The encoder can be given an unlimited time to produce the
-   * best possible frame by specifying a deadline of '0'. This deadline
-   * supercedes the VPx notion of "best quality, good quality, realtime".
-   * Applications that wish to map these former settings to the new deadline
-   * based system can use the symbols #VPX_DL_REALTIME, #VPX_DL_GOOD_QUALITY,
-   * and #VPX_DL_BEST_QUALITY.
-   *
-   * When the last frame has been passed to the encoder, this function should
-   * continue to be called, with the img parameter set to NULL. This will
-   * signal the end-of-stream condition to the encoder and allow it to encode
-   * any held buffers. Encoding is complete when vpx_codec_encode() is called
-   * and vpx_codec_get_cx_data() returns no data.
-   *
-   * \param[in]    ctx       Pointer to this instance's context
-   * \param[in]    img       Image data to encode, NULL to flush.
-   * \param[in]    pts       Presentation time stamp, in timebase units.
-   * \param[in]    duration  Duration to show frame, in timebase units.
-   * \param[in]    flags     Flags to use for encoding this frame.
-   * \param[in]    deadline  Time to spend encoding, in microseconds. (0=infinite)
-   *
-   * \retval #VPX_CODEC_OK
-   *     The configuration was populated.
-   * \retval #VPX_CODEC_INCAPABLE
-   *     Interface is not an encoder interface.
-   * \retval #VPX_CODEC_INVALID_PARAM
-   *     A parameter was NULL, the image format is unsupported, etc.
+   * This value specifies the number of spatial coding layers to be used.
    */
-  vpx_codec_err_t  vpx_codec_encode(vpx_codec_ctx_t            *ctx,
-                                    const vpx_image_t          *img,
-                                    vpx_codec_pts_t             pts,
-                                    unsigned long               duration,
-                                    vpx_enc_frame_flags_t       flags,
-                                    unsigned long               deadline);
-
-  /*!\brief Set compressed data output buffer
-   *
-   * Sets the buffer that the codec should output the compressed data
-   * into. This call effectively sets the buffer pointer returned in the
-   * next VPX_CODEC_CX_FRAME_PKT packet. Subsequent packets will be
-   * appended into this buffer. The buffer is preserved across frames,
-   * so applications must periodically call this function after flushing
-   * the accumulated compressed data to disk or to the network to reset
-   * the pointer to the buffer's head.
-   *
-   * `pad_before` bytes will be skipped before writing the compressed
-   * data, and `pad_after` bytes will be appended to the packet. The size
-   * of the packet will be the sum of the size of the actual compressed
-   * data, pad_before, and pad_after. The padding bytes will be preserved
-   * (not overwritten).
-   *
-   * Note that calling this function does not guarantee that the returned
-   * compressed data will be placed into the specified buffer. In the
-   * event that the encoded data will not fit into the buffer provided,
-   * the returned packet \ref MAY point to an internal buffer, as it would
-   * if this call were never used. In this event, the output packet will
-   * NOT have any padding, and the application must free space and copy it
-   * to the proper place. This is of particular note in configurations
-   * that may output multiple packets for a single encoded frame (e.g., lagged
-   * encoding) or if the application does not reset the buffer periodically.
-   *
-   * Applications may restore the default behavior of the codec providing
-   * the compressed data buffer by calling this function with a NULL
-   * buffer.
-   *
-   * Applications \ref MUSTNOT call this function during iteration of
-   * vpx_codec_get_cx_data().
-   *
-   * \param[in]    ctx         Pointer to this instance's context
-   * \param[in]    buf         Buffer to store compressed data into
-   * \param[in]    pad_before  Bytes to skip before writing compressed data
-   * \param[in]    pad_after   Bytes to skip after writing compressed data
+  unsigned int ss_number_layers;
+
+  /*!\brief Enable auto alt reference flags for each spatial layer.
    *
-   * \retval #VPX_CODEC_OK
-   *     The buffer was set successfully.
-   * \retval #VPX_CODEC_INVALID_PARAM
-   *     A parameter was NULL, the image format is unsupported, etc.
+   * These values specify if auto alt reference frame is enabled for each
+   * spatial layer.
    */
-  vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t       *ctx,
-                                            const vpx_fixed_buf_t *buf,
-                                            unsigned int           pad_before,
-                                            unsigned int           pad_after);
+  int ss_enable_auto_alt_ref[VPX_SS_MAX_LAYERS];
 
+  /*!\brief Target bitrate for each spatial layer.
+   *
+   * These values specify the target coding bitrate to be used for each
+   * spatial layer.
+   */
+  unsigned int ss_target_bitrate[VPX_SS_MAX_LAYERS];
 
-  /*!\brief Encoded data iterator
+  /*!\brief Number of temporal coding layers.
    *
-   * Iterates over a list of data packets to be passed from the encoder to the
-   * application. The different kinds of packets available are enumerated in
-   * #vpx_codec_cx_pkt_kind.
+   * This value specifies the number of temporal layers to be used.
+   */
+  unsigned int ts_number_layers;
+
+  /*!\brief Target bitrate for each temporal layer.
    *
-   * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's
-   * muxer. Multiple compressed frames may be in the list.
-   * #VPX_CODEC_STATS_PKT packets should be appended to a global buffer.
+   * These values specify the target coding bitrate to be used for each
+   * temporal layer.
+   */
+  unsigned int ts_target_bitrate[VPX_TS_MAX_LAYERS];
+
+  /*!\brief Frame rate decimation factor for each temporal layer.
    *
-   * The application \ref MUST silently ignore any packet kinds that it does
-   * not recognize or support.
+   * These values specify the frame rate decimation factors to apply
+   * to each temporal layer.
+   */
+  unsigned int ts_rate_decimator[VPX_TS_MAX_LAYERS];
+
+  /*!\brief Length of the sequence defining frame temporal layer membership.
    *
-   * The data buffers returned from this function are only guaranteed to be
-   * valid until the application makes another call to any vpx_codec_* function.
+   * This value specifies the length of the sequence that defines the
+   * membership of frames to temporal layers. For example, if the
+   * ts_periodicity = 8, then the frames are assigned to coding layers with a
+   * repeated sequence of length 8.
+  */
+  unsigned int ts_periodicity;
+
+  /*!\brief Template defining the membership of frames to temporal layers.
    *
-   * \param[in]     ctx      Pointer to this instance's context
-   * \param[in,out] iter     Iterator storage, initialized to NULL
+   * This array defines the membership of frames to temporal coding layers.
+   * For a 2-layer encoding that assigns even numbered frames to one temporal
+   * layer (0) and odd numbered frames to a second temporal layer (1) with
+   * ts_periodicity=8, then ts_layer_id = (0,1,0,1,0,1,0,1).
+  */
+  unsigned int ts_layer_id[VPX_TS_MAX_PERIODICITY];
+
+  /*!\brief Target bitrate for each spatial/temporal layer.
    *
-   * \return Returns a pointer to an output data packet (compressed frame data,
-   *         two-pass statistics, etc.) or NULL to signal end-of-list.
+   * These values specify the target coding bitrate to be used for each
+   * spatial/temporal layer.
    *
    */
-  const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t   *ctx,
-                                                  vpx_codec_iter_t  *iter);
+  unsigned int layer_target_bitrate[VPX_MAX_LAYERS];
 
-
-  /*!\brief Get Preview Frame
-   *
-   * Returns an image that can be used as a preview. Shows the image as it would
-   * exist at the decompressor. The application \ref MUST NOT write into this
-   * image buffer.
-   *
-   * \param[in]     ctx      Pointer to this instance's context
+  /*!\brief Temporal layering mode indicating which temporal layering scheme to
+   * use.
    *
-   * \return Returns a pointer to a preview image, or NULL if no image is
-   *         available.
+   * The value (refer to VP9E_TEMPORAL_LAYERING_MODE) specifies the
+   * temporal layering mode to use.
    *
    */
-  const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t   *ctx);
+  int temporal_layering_mode;
+} vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */
 
+/*!\brief  vp9 svc extra configure parameters
+ *
+ * This defines max/min quantizers and scale factors for each layer
+ *
+ */
+typedef struct vpx_svc_parameters {
+  int max_quantizers[VPX_MAX_LAYERS];     /**< Max Q for each layer */
+  int min_quantizers[VPX_MAX_LAYERS];     /**< Min Q for each layer */
+  int scaling_factor_num[VPX_MAX_LAYERS]; /**< Scaling factor-numerator */
+  int scaling_factor_den[VPX_MAX_LAYERS]; /**< Scaling factor-denominator */
+  int temporal_layering_mode;             /**< Temporal layering mode */
+} vpx_svc_extra_cfg_t;
+
+/*!\brief Initialize an encoder instance
+ *
+ * Initializes a encoder context using the given interface. Applications
+ * should call the vpx_codec_enc_init convenience macro instead of this
+ * function directly, to ensure that the ABI version number parameter
+ * is properly initialized.
+ *
+ * If the library was configured with --disable-multithread, this call
+ * is not thread safe and should be guarded with a lock if being used
+ * in a multithreaded context.
+ *
+ * \param[in]    ctx     Pointer to this instance's context.
+ * \param[in]    iface   Pointer to the algorithm interface to use.
+ * \param[in]    cfg     Configuration to use, if known. May be NULL.
+ * \param[in]    flags   Bitfield of VPX_CODEC_USE_* flags
+ * \param[in]    ver     ABI version number. Must be set to
+ *                       VPX_ENCODER_ABI_VERSION
+ * \retval #VPX_CODEC_OK
+ *     The decoder algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ *     Memory allocation failed.
+ */
+vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
+                                       vpx_codec_iface_t *iface,
+                                       const vpx_codec_enc_cfg_t *cfg,
+                                       vpx_codec_flags_t flags, int ver);
 
-  /*!@} - end defgroup encoder*/
+/*!\brief Convenience macro for vpx_codec_enc_init_ver()
+ *
+ * Ensures the ABI version parameter is properly set.
+ */
+#define vpx_codec_enc_init(ctx, iface, cfg, flags) \
+  vpx_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION)
+
+/*!\brief Initialize multi-encoder instance
+ *
+ * Initializes multi-encoder context using the given interface.
+ * Applications should call the vpx_codec_enc_init_multi convenience macro
+ * instead of this function directly, to ensure that the ABI version number
+ * parameter is properly initialized.
+ *
+ * \param[in]    ctx     Pointer to this instance's context.
+ * \param[in]    iface   Pointer to the algorithm interface to use.
+ * \param[in]    cfg     Configuration to use, if known. May be NULL.
+ * \param[in]    num_enc Total number of encoders.
+ * \param[in]    flags   Bitfield of VPX_CODEC_USE_* flags
+ * \param[in]    dsf     Pointer to down-sampling factors.
+ * \param[in]    ver     ABI version number. Must be set to
+ *                       VPX_ENCODER_ABI_VERSION
+ * \retval #VPX_CODEC_OK
+ *     The decoder algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ *     Memory allocation failed.
+ */
+vpx_codec_err_t vpx_codec_enc_init_multi_ver(
+    vpx_codec_ctx_t *ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *cfg,
+    int num_enc, vpx_codec_flags_t flags, vpx_rational_t *dsf, int ver);
+
+/*!\brief Convenience macro for vpx_codec_enc_init_multi_ver()
+ *
+ * Ensures the ABI version parameter is properly set.
+ */
+#define vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \
+  vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf,   \
+                               VPX_ENCODER_ABI_VERSION)
+
+/*!\brief Get a default configuration
+ *
+ * Initializes a encoder configuration structure with default values. Supports
+ * the notion of "usages" so that an algorithm may offer different default
+ * settings depending on the user's intended goal. This function \ref SHOULD
+ * be called by all applications to initialize the configuration structure
+ * before specializing the configuration with application specific values.
+ *
+ * \param[in]    iface     Pointer to the algorithm interface to use.
+ * \param[out]   cfg       Configuration buffer to populate.
+ * \param[in]    reserved  Must set to 0 for VP8 and VP9.
+ *
+ * \retval #VPX_CODEC_OK
+ *     The configuration was populated.
+ * \retval #VPX_CODEC_INCAPABLE
+ *     Interface is not an encoder interface.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ *     A parameter was NULL, or the usage value was not recognized.
+ */
+vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
+                                             vpx_codec_enc_cfg_t *cfg,
+                                             unsigned int reserved);
+
+/*!\brief Set or change configuration
+ *
+ * Reconfigures an encoder instance according to the given configuration.
+ *
+ * \param[in]    ctx     Pointer to this instance's context
+ * \param[in]    cfg     Configuration buffer to use
+ *
+ * \retval #VPX_CODEC_OK
+ *     The configuration was populated.
+ * \retval #VPX_CODEC_INCAPABLE
+ *     Interface is not an encoder interface.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ *     A parameter was NULL, or the usage value was not recognized.
+ */
+vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
+                                         const vpx_codec_enc_cfg_t *cfg);
+
+/*!\brief Get global stream headers
+ *
+ * Retrieves a stream level global header packet, if supported by the codec.
+ *
+ * \param[in]    ctx     Pointer to this instance's context
+ *
+ * \retval NULL
+ *     Encoder does not support global header
+ * \retval Non-NULL
+ *     Pointer to buffer containing global header packet
+ */
+vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx);
+
+#define VPX_DL_REALTIME (1)
+/**< deadline parameter analogous to VPx REALTIME mode. */
+#define VPX_DL_GOOD_QUALITY (1000000)
+/**< deadline parameter analogous to VPx GOOD QUALITY mode. */
+#define VPX_DL_BEST_QUALITY (0)
+/**< deadline parameter analogous to VPx BEST QUALITY mode. */
+/*!\brief Encode a frame
+ *
+ * Encodes a video frame at the given "presentation time." The presentation
+ * time stamp (PTS) \ref MUST be strictly increasing.
+ *
+ * The encoder supports the notion of a soft real-time deadline. Given a
+ * non-zero value to the deadline parameter, the encoder will make a "best
+ * effort" guarantee to  return before the given time slice expires. It is
+ * implicit that limiting the available time to encode will degrade the
+ * output quality. The encoder can be given an unlimited time to produce the
+ * best possible frame by specifying a deadline of '0'. This deadline
+ * supercedes the VPx notion of "best quality, good quality, realtime".
+ * Applications that wish to map these former settings to the new deadline
+ * based system can use the symbols #VPX_DL_REALTIME, #VPX_DL_GOOD_QUALITY,
+ * and #VPX_DL_BEST_QUALITY.
+ *
+ * When the last frame has been passed to the encoder, this function should
+ * continue to be called, with the img parameter set to NULL. This will
+ * signal the end-of-stream condition to the encoder and allow it to encode
+ * any held buffers. Encoding is complete when vpx_codec_encode() is called
+ * and vpx_codec_get_cx_data() returns no data.
+ *
+ * \param[in]    ctx       Pointer to this instance's context
+ * \param[in]    img       Image data to encode, NULL to flush.
+ * \param[in]    pts       Presentation time stamp, in timebase units.
+ * \param[in]    duration  Duration to show frame, in timebase units.
+ * \param[in]    flags     Flags to use for encoding this frame.
+ * \param[in]    deadline  Time to spend encoding, in microseconds. (0=infinite)
+ *
+ * \retval #VPX_CODEC_OK
+ *     The configuration was populated.
+ * \retval #VPX_CODEC_INCAPABLE
+ *     Interface is not an encoder interface.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ *     A parameter was NULL, the image format is unsupported, etc.
+ */
+vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img,
+                                 vpx_codec_pts_t pts, unsigned long duration,
+                                 vpx_enc_frame_flags_t flags,
+                                 unsigned long deadline);
+
+/*!\brief Set compressed data output buffer
+ *
+ * Sets the buffer that the codec should output the compressed data
+ * into. This call effectively sets the buffer pointer returned in the
+ * next VPX_CODEC_CX_FRAME_PKT packet. Subsequent packets will be
+ * appended into this buffer. The buffer is preserved across frames,
+ * so applications must periodically call this function after flushing
+ * the accumulated compressed data to disk or to the network to reset
+ * the pointer to the buffer's head.
+ *
+ * `pad_before` bytes will be skipped before writing the compressed
+ * data, and `pad_after` bytes will be appended to the packet. The size
+ * of the packet will be the sum of the size of the actual compressed
+ * data, pad_before, and pad_after. The padding bytes will be preserved
+ * (not overwritten).
+ *
+ * Note that calling this function does not guarantee that the returned
+ * compressed data will be placed into the specified buffer. In the
+ * event that the encoded data will not fit into the buffer provided,
+ * the returned packet \ref MAY point to an internal buffer, as it would
+ * if this call were never used. In this event, the output packet will
+ * NOT have any padding, and the application must free space and copy it
+ * to the proper place. This is of particular note in configurations
+ * that may output multiple packets for a single encoded frame (e.g., lagged
+ * encoding) or if the application does not reset the buffer periodically.
+ *
+ * Applications may restore the default behavior of the codec providing
+ * the compressed data buffer by calling this function with a NULL
+ * buffer.
+ *
+ * Applications \ref MUSTNOT call this function during iteration of
+ * vpx_codec_get_cx_data().
+ *
+ * \param[in]    ctx         Pointer to this instance's context
+ * \param[in]    buf         Buffer to store compressed data into
+ * \param[in]    pad_before  Bytes to skip before writing compressed data
+ * \param[in]    pad_after   Bytes to skip after writing compressed data
+ *
+ * \retval #VPX_CODEC_OK
+ *     The buffer was set successfully.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ *     A parameter was NULL, the image format is unsupported, etc.
+ */
+vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
+                                          const vpx_fixed_buf_t *buf,
+                                          unsigned int pad_before,
+                                          unsigned int pad_after);
+
+/*!\brief Encoded data iterator
+ *
+ * Iterates over a list of data packets to be passed from the encoder to the
+ * application. The different kinds of packets available are enumerated in
+ * #vpx_codec_cx_pkt_kind.
+ *
+ * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's
+ * muxer. Multiple compressed frames may be in the list.
+ * #VPX_CODEC_STATS_PKT packets should be appended to a global buffer.
+ *
+ * The application \ref MUST silently ignore any packet kinds that it does
+ * not recognize or support.
+ *
+ * The data buffers returned from this function are only guaranteed to be
+ * valid until the application makes another call to any vpx_codec_* function.
+ *
+ * \param[in]     ctx      Pointer to this instance's context
+ * \param[in,out] iter     Iterator storage, initialized to NULL
+ *
+ * \return Returns a pointer to an output data packet (compressed frame data,
+ *         two-pass statistics, etc.) or NULL to signal end-of-list.
+ *
+ */
+const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
+                                                vpx_codec_iter_t *iter);
+
+/*!\brief Get Preview Frame
+ *
+ * Returns an image that can be used as a preview. Shows the image as it would
+ * exist at the decompressor. The application \ref MUST NOT write into this
+ * image buffer.
+ *
+ * \param[in]     ctx      Pointer to this instance's context
+ *
+ * \return Returns a pointer to a preview image, or NULL if no image is
+ *         available.
+ *
+ */
+const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx);
+
+/*!@} - end defgroup encoder*/
 #ifdef __cplusplus
 }
 #endif
 #endif  // VPX_VPX_ENCODER_H_
-
diff --git a/vpx/vpx_frame_buffer.h b/vpx/vpx_frame_buffer.h
index 109aec445c1ec8d8e8596312021031762cf071b5..8adbe25aad815a51ace428e6b38b1325ffdeba3d 100644
--- a/vpx/vpx_frame_buffer.h
+++ b/vpx/vpx_frame_buffer.h
@@ -37,9 +37,9 @@ extern "C" {
  * This structure holds allocated frame buffers used by the decoder.
  */
 typedef struct vpx_codec_frame_buffer {
-  uint8_t *data;  /**< Pointer to the data buffer */
-  size_t size;  /**< Size of data in bytes */
-  void *priv;  /**< Frame's private data */
+  uint8_t *data; /**< Pointer to the data buffer */
+  size_t size;   /**< Size of data in bytes */
+  void *priv;    /**< Frame's private data */
 } vpx_codec_frame_buffer_t;
 
 /*!\brief get frame buffer callback prototype
@@ -60,8 +60,8 @@ typedef struct vpx_codec_frame_buffer {
  * \param[in] new_size     Size in bytes needed by the buffer
  * \param[in,out] fb       Pointer to vpx_codec_frame_buffer_t
  */
-typedef int (*vpx_get_frame_buffer_cb_fn_t)(
-    void *priv, size_t min_size, vpx_codec_frame_buffer_t *fb);
+typedef int (*vpx_get_frame_buffer_cb_fn_t)(void *priv, size_t min_size,
+                                            vpx_codec_frame_buffer_t *fb);
 
 /*!\brief release frame buffer callback prototype
  *
@@ -73,8 +73,8 @@ typedef int (*vpx_get_frame_buffer_cb_fn_t)(
  * \param[in] priv         Callback's private data
  * \param[in] fb           Pointer to vpx_codec_frame_buffer_t
  */
-typedef int (*vpx_release_frame_buffer_cb_fn_t)(
-    void *priv, vpx_codec_frame_buffer_t *fb);
+typedef int (*vpx_release_frame_buffer_cb_fn_t)(void *priv,
+                                                vpx_codec_frame_buffer_t *fb);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vpx/vpx_image.h b/vpx/vpx_image.h
index 7958c69806ed0fbefdcca9ce1b075a5eb734d815..3e58b87996e99e9dea214a758cc867c84a4e53c4 100644
--- a/vpx/vpx_image.h
+++ b/vpx/vpx_image.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 /*!\file
  * \brief Describes the vpx image descriptor and associated operations
  *
@@ -20,213 +19,204 @@
 extern "C" {
 #endif
 
-  /*!\brief Current ABI version number
-   *
-   * \internal
-   * If this file is altered in any way that changes the ABI, this value
-   * must be bumped.  Examples include, but are not limited to, changing
-   * types, removing or reassigning enums, adding/removing/rearranging
-   * fields to structures
-   */
+/*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped.  Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
 #define VPX_IMAGE_ABI_VERSION (4) /**<\hideinitializer*/
 
-
-#define VPX_IMG_FMT_PLANAR     0x100  /**< Image is a planar format. */
-#define VPX_IMG_FMT_UV_FLIP    0x200  /**< V plane precedes U in memory. */
-#define VPX_IMG_FMT_HAS_ALPHA  0x400  /**< Image has an alpha channel. */
-#define VPX_IMG_FMT_HIGHBITDEPTH 0x800  /**< Image uses 16bit framebuffer. */
-
-  /*!\brief List of supported image formats */
-  typedef enum vpx_img_fmt {
-    VPX_IMG_FMT_NONE,
-    VPX_IMG_FMT_RGB24,   /**< 24 bit per pixel packed RGB */
-    VPX_IMG_FMT_RGB32,   /**< 32 bit per pixel packed 0RGB */
-    VPX_IMG_FMT_RGB565,  /**< 16 bit per pixel, 565 */
-    VPX_IMG_FMT_RGB555,  /**< 16 bit per pixel, 555 */
-    VPX_IMG_FMT_UYVY,    /**< UYVY packed YUV */
-    VPX_IMG_FMT_YUY2,    /**< YUYV packed YUV */
-    VPX_IMG_FMT_YVYU,    /**< YVYU packed YUV */
-    VPX_IMG_FMT_BGR24,   /**< 24 bit per pixel packed BGR */
-    VPX_IMG_FMT_RGB32_LE, /**< 32 bit packed BGR0 */
-    VPX_IMG_FMT_ARGB,     /**< 32 bit packed ARGB, alpha=255 */
-    VPX_IMG_FMT_ARGB_LE,  /**< 32 bit packed BGRA, alpha=255 */
-    VPX_IMG_FMT_RGB565_LE,  /**< 16 bit per pixel, gggbbbbb rrrrrggg */
-    VPX_IMG_FMT_RGB555_LE,  /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
-    VPX_IMG_FMT_YV12    = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
-    VPX_IMG_FMT_I420    = VPX_IMG_FMT_PLANAR | 2,
-    VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 3, /** < planar 4:2:0 format with vpx color space */
-    VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4,
-    VPX_IMG_FMT_I422    = VPX_IMG_FMT_PLANAR | 5,
-    VPX_IMG_FMT_I444    = VPX_IMG_FMT_PLANAR | 6,
-    VPX_IMG_FMT_I440    = VPX_IMG_FMT_PLANAR | 7,
-    VPX_IMG_FMT_444A    = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 6,
-    VPX_IMG_FMT_I42016    = VPX_IMG_FMT_I420 | VPX_IMG_FMT_HIGHBITDEPTH,
-    VPX_IMG_FMT_I42216    = VPX_IMG_FMT_I422 | VPX_IMG_FMT_HIGHBITDEPTH,
-    VPX_IMG_FMT_I44416    = VPX_IMG_FMT_I444 | VPX_IMG_FMT_HIGHBITDEPTH,
-    VPX_IMG_FMT_I44016    = VPX_IMG_FMT_I440 | VPX_IMG_FMT_HIGHBITDEPTH
-  } vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
-
-  /*!\brief List of supported color spaces */
-  typedef enum vpx_color_space {
-    VPX_CS_UNKNOWN    = 0,  /**< Unknown */
-    VPX_CS_BT_601     = 1,  /**< BT.601 */
-    VPX_CS_BT_709     = 2,  /**< BT.709 */
-    VPX_CS_SMPTE_170  = 3,  /**< SMPTE.170 */
-    VPX_CS_SMPTE_240  = 4,  /**< SMPTE.240 */
-    VPX_CS_BT_2020    = 5,  /**< BT.2020 */
-    VPX_CS_RESERVED   = 6,  /**< Reserved */
-    VPX_CS_SRGB       = 7   /**< sRGB */
-  } vpx_color_space_t; /**< alias for enum vpx_color_space */
-
-  /*!\brief List of supported color range */
-  typedef enum vpx_color_range {
-    VPX_CR_STUDIO_RANGE = 0,    /**< Y [16..235], UV [16..240] */
-    VPX_CR_FULL_RANGE   = 1     /**< YUV/RGB [0..255] */
-  } vpx_color_range_t; /**< alias for enum vpx_color_range */
-
-  /**\brief Image Descriptor */
-  typedef struct vpx_image {
-    vpx_img_fmt_t fmt; /**< Image Format */
-    vpx_color_space_t cs; /**< Color Space */
-    vpx_color_range_t range; /**< Color Range */
-
-    /* Image storage dimensions */
-    unsigned int  w;           /**< Stored image width */
-    unsigned int  h;           /**< Stored image height */
-    unsigned int  bit_depth;   /**< Stored image bit-depth */
-
-    /* Image display dimensions */
-    unsigned int  d_w;   /**< Displayed image width */
-    unsigned int  d_h;   /**< Displayed image height */
-
-    /* Image intended rendering dimensions */
-    unsigned int  r_w;   /**< Intended rendering image width */
-    unsigned int  r_h;   /**< Intended rendering image height */
-
-    /* Chroma subsampling info */
-    unsigned int  x_chroma_shift;   /**< subsampling order, X */
-    unsigned int  y_chroma_shift;   /**< subsampling order, Y */
-
-    /* Image data pointers. */
-#define VPX_PLANE_PACKED 0   /**< To be used for all packed formats */
-#define VPX_PLANE_Y      0   /**< Y (Luminance) plane */
-#define VPX_PLANE_U      1   /**< U (Chroma) plane */
-#define VPX_PLANE_V      2   /**< V (Chroma) plane */
-#define VPX_PLANE_ALPHA  3   /**< A (Transparency) plane */
-    unsigned char *planes[4];  /**< pointer to the top left pixel for each plane */
-    int      stride[4];  /**< stride between rows for each plane */
-
-    int     bps; /**< bits per sample (for packed formats) */
-
-    /* The following member may be set by the application to associate data
-     * with this image.
-     */
-    void    *user_priv; /**< may be set by the application to associate data
-                         *   with this image. */
-
-    /* The following members should be treated as private. */
-    unsigned char *img_data;       /**< private */
-    int      img_data_owner; /**< private */
-    int      self_allocd;    /**< private */
-
-    void    *fb_priv; /**< Frame buffer data associated with the image. */
-  } vpx_image_t; /**< alias for struct vpx_image */
-
-  /**\brief Representation of a rectangle on a surface */
-  typedef struct vpx_image_rect {
-    unsigned int x; /**< leftmost column */
-    unsigned int y; /**< topmost row */
-    unsigned int w; /**< width */
-    unsigned int h; /**< height */
-  } vpx_image_rect_t; /**< alias for struct vpx_image_rect */
-
-  /*!\brief Open a descriptor, allocating storage for the underlying image
-   *
-   * Returns a descriptor for storing an image of the given format. The
-   * storage for the descriptor is allocated on the heap.
-   *
-   * \param[in]    img       Pointer to storage for descriptor. If this parameter
-   *                         is NULL, the storage for the descriptor will be
-   *                         allocated on the heap.
-   * \param[in]    fmt       Format for the image
-   * \param[in]    d_w       Width of the image
-   * \param[in]    d_h       Height of the image
-   * \param[in]    align     Alignment, in bytes, of the image buffer and
-   *                         each row in the image(stride).
-   *
-   * \return Returns a pointer to the initialized image descriptor. If the img
-   *         parameter is non-null, the value of the img parameter will be
-   *         returned.
-   */
-  vpx_image_t *vpx_img_alloc(vpx_image_t  *img,
-                             vpx_img_fmt_t fmt,
-                             unsigned int d_w,
-                             unsigned int d_h,
-                             unsigned int align);
-
-  /*!\brief Open a descriptor, using existing storage for the underlying image
-   *
-   * Returns a descriptor for storing an image of the given format. The
-   * storage for descriptor has been allocated elsewhere, and a descriptor is
-   * desired to "wrap" that storage.
-   *
-   * \param[in]    img       Pointer to storage for descriptor. If this parameter
-   *                         is NULL, the storage for the descriptor will be
-   *                         allocated on the heap.
-   * \param[in]    fmt       Format for the image
-   * \param[in]    d_w       Width of the image
-   * \param[in]    d_h       Height of the image
-   * \param[in]    align     Alignment, in bytes, of each row in the image.
-   * \param[in]    img_data  Storage to use for the image
-   *
-   * \return Returns a pointer to the initialized image descriptor. If the img
-   *         parameter is non-null, the value of the img parameter will be
-   *         returned.
-   */
-  vpx_image_t *vpx_img_wrap(vpx_image_t  *img,
-                            vpx_img_fmt_t fmt,
-                            unsigned int d_w,
-                            unsigned int d_h,
-                            unsigned int align,
-                            unsigned char      *img_data);
-
-
-  /*!\brief Set the rectangle identifying the displayed portion of the image
-   *
-   * Updates the displayed rectangle (aka viewport) on the image surface to
-   * match the specified coordinates and size.
-   *
-   * \param[in]    img       Image descriptor
-   * \param[in]    x         leftmost column
-   * \param[in]    y         topmost row
-   * \param[in]    w         width
-   * \param[in]    h         height
-   *
-   * \return 0 if the requested rectangle is valid, nonzero otherwise.
-   */
-  int vpx_img_set_rect(vpx_image_t  *img,
-                       unsigned int  x,
-                       unsigned int  y,
-                       unsigned int  w,
-                       unsigned int  h);
-
-
-  /*!\brief Flip the image vertically (top for bottom)
-   *
-   * Adjusts the image descriptor's pointers and strides to make the image
-   * be referenced upside-down.
-   *
-   * \param[in]    img       Image descriptor
+#define VPX_IMG_FMT_PLANAR 0x100       /**< Image is a planar format. */
+#define VPX_IMG_FMT_UV_FLIP 0x200      /**< V plane precedes U in memory. */
+#define VPX_IMG_FMT_HAS_ALPHA 0x400    /**< Image has an alpha channel. */
+#define VPX_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */
+
+/*!\brief List of supported image formats */
+typedef enum vpx_img_fmt {
+  VPX_IMG_FMT_NONE,
+  VPX_IMG_FMT_RGB24,     /**< 24 bit per pixel packed RGB */
+  VPX_IMG_FMT_RGB32,     /**< 32 bit per pixel packed 0RGB */
+  VPX_IMG_FMT_RGB565,    /**< 16 bit per pixel, 565 */
+  VPX_IMG_FMT_RGB555,    /**< 16 bit per pixel, 555 */
+  VPX_IMG_FMT_UYVY,      /**< UYVY packed YUV */
+  VPX_IMG_FMT_YUY2,      /**< YUYV packed YUV */
+  VPX_IMG_FMT_YVYU,      /**< YVYU packed YUV */
+  VPX_IMG_FMT_BGR24,     /**< 24 bit per pixel packed BGR */
+  VPX_IMG_FMT_RGB32_LE,  /**< 32 bit packed BGR0 */
+  VPX_IMG_FMT_ARGB,      /**< 32 bit packed ARGB, alpha=255 */
+  VPX_IMG_FMT_ARGB_LE,   /**< 32 bit packed BGRA, alpha=255 */
+  VPX_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */
+  VPX_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
+  VPX_IMG_FMT_YV12 =
+      VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
+  VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2,
+  VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP |
+                        3, /** < planar 4:2:0 format with vpx color space */
+  VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4,
+  VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5,
+  VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6,
+  VPX_IMG_FMT_I440 = VPX_IMG_FMT_PLANAR | 7,
+  VPX_IMG_FMT_444A = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 6,
+  VPX_IMG_FMT_I42016 = VPX_IMG_FMT_I420 | VPX_IMG_FMT_HIGHBITDEPTH,
+  VPX_IMG_FMT_I42216 = VPX_IMG_FMT_I422 | VPX_IMG_FMT_HIGHBITDEPTH,
+  VPX_IMG_FMT_I44416 = VPX_IMG_FMT_I444 | VPX_IMG_FMT_HIGHBITDEPTH,
+  VPX_IMG_FMT_I44016 = VPX_IMG_FMT_I440 | VPX_IMG_FMT_HIGHBITDEPTH
+} vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
+
+/*!\brief List of supported color spaces */
+typedef enum vpx_color_space {
+  VPX_CS_UNKNOWN = 0,   /**< Unknown */
+  VPX_CS_BT_601 = 1,    /**< BT.601 */
+  VPX_CS_BT_709 = 2,    /**< BT.709 */
+  VPX_CS_SMPTE_170 = 3, /**< SMPTE.170 */
+  VPX_CS_SMPTE_240 = 4, /**< SMPTE.240 */
+  VPX_CS_BT_2020 = 5,   /**< BT.2020 */
+  VPX_CS_RESERVED = 6,  /**< Reserved */
+  VPX_CS_SRGB = 7       /**< sRGB */
+} vpx_color_space_t;    /**< alias for enum vpx_color_space */
+
+/*!\brief List of supported color range */
+typedef enum vpx_color_range {
+  VPX_CR_STUDIO_RANGE = 0, /**< Y [16..235], UV [16..240] */
+  VPX_CR_FULL_RANGE = 1    /**< YUV/RGB [0..255] */
+} vpx_color_range_t;       /**< alias for enum vpx_color_range */
+
+/**\brief Image Descriptor */
+typedef struct vpx_image {
+  vpx_img_fmt_t fmt;       /**< Image Format */
+  vpx_color_space_t cs;    /**< Color Space */
+  vpx_color_range_t range; /**< Color Range */
+
+  /* Image storage dimensions */
+  unsigned int w;         /**< Stored image width */
+  unsigned int h;         /**< Stored image height */
+  unsigned int bit_depth; /**< Stored image bit-depth */
+
+  /* Image display dimensions */
+  unsigned int d_w; /**< Displayed image width */
+  unsigned int d_h; /**< Displayed image height */
+
+  /* Image intended rendering dimensions */
+  unsigned int r_w; /**< Intended rendering image width */
+  unsigned int r_h; /**< Intended rendering image height */
+
+  /* Chroma subsampling info */
+  unsigned int x_chroma_shift; /**< subsampling order, X */
+  unsigned int y_chroma_shift; /**< subsampling order, Y */
+
+/* Image data pointers. */
+#define VPX_PLANE_PACKED 0  /**< To be used for all packed formats */
+#define VPX_PLANE_Y 0       /**< Y (Luminance) plane */
+#define VPX_PLANE_U 1       /**< U (Chroma) plane */
+#define VPX_PLANE_V 2       /**< V (Chroma) plane */
+#define VPX_PLANE_ALPHA 3   /**< A (Transparency) plane */
+  unsigned char *planes[4]; /**< pointer to the top left pixel for each plane */
+  int stride[4];            /**< stride between rows for each plane */
+
+  int bps; /**< bits per sample (for packed formats) */
+
+  /* The following member may be set by the application to associate data
+   * with this image.
    */
-  void vpx_img_flip(vpx_image_t *img);
+  void *user_priv; /**< may be set by the application to associate data
+                    *   with this image. */
+
+  /* The following members should be treated as private. */
+  unsigned char *img_data; /**< private */
+  int img_data_owner;      /**< private */
+  int self_allocd;         /**< private */
+
+  void *fb_priv; /**< Frame buffer data associated with the image. */
+} vpx_image_t;   /**< alias for struct vpx_image */
+
+/**\brief Representation of a rectangle on a surface */
+typedef struct vpx_image_rect {
+  unsigned int x;   /**< leftmost column */
+  unsigned int y;   /**< topmost row */
+  unsigned int w;   /**< width */
+  unsigned int h;   /**< height */
+} vpx_image_rect_t; /**< alias for struct vpx_image_rect */
+
+/*!\brief Open a descriptor, allocating storage for the underlying image
+ *
+ * Returns a descriptor for storing an image of the given format. The
+ * storage for the descriptor is allocated on the heap.
+ *
+ * \param[in]    img       Pointer to storage for descriptor. If this parameter
+ *                         is NULL, the storage for the descriptor will be
+ *                         allocated on the heap.
+ * \param[in]    fmt       Format for the image
+ * \param[in]    d_w       Width of the image
+ * \param[in]    d_h       Height of the image
+ * \param[in]    align     Alignment, in bytes, of the image buffer and
+ *                         each row in the image(stride).
+ *
+ * \return Returns a pointer to the initialized image descriptor. If the img
+ *         parameter is non-null, the value of the img parameter will be
+ *         returned.
+ */
+vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt,
+                           unsigned int d_w, unsigned int d_h,
+                           unsigned int align);
 
-  /*!\brief Close an image descriptor
-   *
-   * Frees all allocated storage associated with an image descriptor.
-   *
-   * \param[in]    img       Image descriptor
-   */
-  void vpx_img_free(vpx_image_t *img);
+/*!\brief Open a descriptor, using existing storage for the underlying image
+ *
+ * Returns a descriptor for storing an image of the given format. The
+ * storage for descriptor has been allocated elsewhere, and a descriptor is
+ * desired to "wrap" that storage.
+ *
+ * \param[in]    img       Pointer to storage for descriptor. If this parameter
+ *                         is NULL, the storage for the descriptor will be
+ *                         allocated on the heap.
+ * \param[in]    fmt       Format for the image
+ * \param[in]    d_w       Width of the image
+ * \param[in]    d_h       Height of the image
+ * \param[in]    align     Alignment, in bytes, of each row in the image.
+ * \param[in]    img_data  Storage to use for the image
+ *
+ * \return Returns a pointer to the initialized image descriptor. If the img
+ *         parameter is non-null, the value of the img parameter will be
+ *         returned.
+ */
+vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w,
+                          unsigned int d_h, unsigned int align,
+                          unsigned char *img_data);
+
+/*!\brief Set the rectangle identifying the displayed portion of the image
+ *
+ * Updates the displayed rectangle (aka viewport) on the image surface to
+ * match the specified coordinates and size.
+ *
+ * \param[in]    img       Image descriptor
+ * \param[in]    x         leftmost column
+ * \param[in]    y         topmost row
+ * \param[in]    w         width
+ * \param[in]    h         height
+ *
+ * \return 0 if the requested rectangle is valid, nonzero otherwise.
+ */
+int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y,
+                     unsigned int w, unsigned int h);
+
+/*!\brief Flip the image vertically (top for bottom)
+ *
+ * Adjusts the image descriptor's pointers and strides to make the image
+ * be referenced upside-down.
+ *
+ * \param[in]    img       Image descriptor
+ */
+void vpx_img_flip(vpx_image_t *img);
+
+/*!\brief Close an image descriptor
+ *
+ * Frees all allocated storage associated with an image descriptor.
+ *
+ * \param[in]    img       Image descriptor
+ */
+void vpx_img_free(vpx_image_t *img);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/vpx/vpx_integer.h b/vpx/vpx_integer.h
index 829c9d132c8c16dcd92a5e688e4c524787c143fb..43e4b388b2e0dcc34c81865dc5826414fb9f4894 100644
--- a/vpx/vpx_integer.h
+++ b/vpx/vpx_integer.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VPX_VPX_INTEGER_H_
 #define VPX_VPX_INTEGER_H_
 
@@ -25,16 +24,16 @@
 #endif
 
 #if (defined(_MSC_VER) && (_MSC_VER < 1600)) || defined(VPX_EMULATE_INTTYPES)
-typedef signed char  int8_t;
+typedef signed char int8_t;
 typedef signed short int16_t;
-typedef signed int   int32_t;
+typedef signed int int32_t;
 
-typedef unsigned char  uint8_t;
+typedef unsigned char uint8_t;
 typedef unsigned short uint16_t;
-typedef unsigned int   uint32_t;
+typedef unsigned int uint32_t;
 
 #if (defined(_MSC_VER) && (_MSC_VER < 1600))
-typedef signed __int64   int64_t;
+typedef signed __int64 int64_t;
 typedef unsigned __int64 uint64_t;
 #define INT64_MAX _I64_MAX
 #define INT32_MAX _I32_MAX
@@ -52,12 +51,12 @@ typedef size_t uintptr_t;
 /* Most platforms have the C99 standard integer types. */
 
 #if defined(__cplusplus)
-# if !defined(__STDC_FORMAT_MACROS)
-#  define __STDC_FORMAT_MACROS
-# endif
-# if !defined(__STDC_LIMIT_MACROS)
-#  define __STDC_LIMIT_MACROS
-# endif
+#if !defined(__STDC_FORMAT_MACROS)
+#define __STDC_FORMAT_MACROS
+#endif
+#if !defined(__STDC_LIMIT_MACROS)
+#define __STDC_LIMIT_MACROS
+#endif
 #endif  // __cplusplus
 
 #include <stdint.h>
diff --git a/vpx_dsp/arm/fwd_txfm_neon.c b/vpx_dsp/arm/fwd_txfm_neon.c
index 9f9de98d90eb65093d5c0617b51eadbcce6d765b..7cb2ba90d2fe6cd63ecac35bc432fac58b9e35ba 100644
--- a/vpx_dsp/arm/fwd_txfm_neon.c
+++ b/vpx_dsp/arm/fwd_txfm_neon.c
@@ -131,14 +131,14 @@ void vpx_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) {
       // 14 15 16 17 54 55 56 57
       // 24 25 26 27 64 65 66 67
       // 34 35 36 37 74 75 76 77
-      const int32x4x2_t r02_s32 = vtrnq_s32(vreinterpretq_s32_s16(out_0),
-                                            vreinterpretq_s32_s16(out_2));
-      const int32x4x2_t r13_s32 = vtrnq_s32(vreinterpretq_s32_s16(out_1),
-                                            vreinterpretq_s32_s16(out_3));
-      const int32x4x2_t r46_s32 = vtrnq_s32(vreinterpretq_s32_s16(out_4),
-                                            vreinterpretq_s32_s16(out_6));
-      const int32x4x2_t r57_s32 = vtrnq_s32(vreinterpretq_s32_s16(out_5),
-                                            vreinterpretq_s32_s16(out_7));
+      const int32x4x2_t r02_s32 =
+          vtrnq_s32(vreinterpretq_s32_s16(out_0), vreinterpretq_s32_s16(out_2));
+      const int32x4x2_t r13_s32 =
+          vtrnq_s32(vreinterpretq_s32_s16(out_1), vreinterpretq_s32_s16(out_3));
+      const int32x4x2_t r46_s32 =
+          vtrnq_s32(vreinterpretq_s32_s16(out_4), vreinterpretq_s32_s16(out_6));
+      const int32x4x2_t r57_s32 =
+          vtrnq_s32(vreinterpretq_s32_s16(out_5), vreinterpretq_s32_s16(out_7));
       const int16x8x2_t r01_s16 =
           vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[0]),
                     vreinterpretq_s16_s32(r13_s32.val[0]));
diff --git a/vpx_dsp/arm/idct16x16_1_add_neon.c b/vpx_dsp/arm/idct16x16_1_add_neon.c
index f734e48027944b9630e39768c3258269fa1fb53c..466b408893e2bfc9d367e65658e5f6de3b59c55d 100644
--- a/vpx_dsp/arm/idct16x16_1_add_neon.c
+++ b/vpx_dsp/arm/idct16x16_1_add_neon.c
@@ -13,49 +13,46 @@
 #include "vpx_dsp/inv_txfm.h"
 #include "vpx_ports/mem.h"
 
-void vpx_idct16x16_1_add_neon(
-        int16_t *input,
-        uint8_t *dest,
-        int dest_stride) {
-    uint8x8_t d2u8, d3u8, d30u8, d31u8;
-    uint64x1_t d2u64, d3u64, d4u64, d5u64;
-    uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
-    int16x8_t q0s16;
-    uint8_t *d1, *d2;
-    int16_t i, j, a1, cospi_16_64 = 11585;
-    int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
-    out = dct_const_round_shift(out * cospi_16_64);
-    a1 = ROUND_POWER_OF_TWO(out, 6);
-
-    q0s16 = vdupq_n_s16(a1);
-    q0u16 = vreinterpretq_u16_s16(q0s16);
-
-    for (d1 = d2 = dest, i = 0; i < 4; i++) {
-        for (j = 0; j < 2; j++) {
-            d2u64 = vld1_u64((const uint64_t *)d1);
-            d3u64 = vld1_u64((const uint64_t *)(d1 + 8));
-            d1 += dest_stride;
-            d4u64 = vld1_u64((const uint64_t *)d1);
-            d5u64 = vld1_u64((const uint64_t *)(d1 + 8));
-            d1 += dest_stride;
-
-            q9u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d2u64));
-            q10u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d3u64));
-            q11u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d4u64));
-            q12u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d5u64));
-
-            d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-            d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
-            d30u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
-            d31u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
-
-            vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
-            vst1_u64((uint64_t *)(d2 + 8), vreinterpret_u64_u8(d3u8));
-            d2 += dest_stride;
-            vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d30u8));
-            vst1_u64((uint64_t *)(d2 + 8), vreinterpret_u64_u8(d31u8));
-            d2 += dest_stride;
-        }
+void vpx_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+  uint8x8_t d2u8, d3u8, d30u8, d31u8;
+  uint64x1_t d2u64, d3u64, d4u64, d5u64;
+  uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
+  int16x8_t q0s16;
+  uint8_t *d1, *d2;
+  int16_t i, j, a1, cospi_16_64 = 11585;
+  int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+  out = dct_const_round_shift(out * cospi_16_64);
+  a1 = ROUND_POWER_OF_TWO(out, 6);
+
+  q0s16 = vdupq_n_s16(a1);
+  q0u16 = vreinterpretq_u16_s16(q0s16);
+
+  for (d1 = d2 = dest, i = 0; i < 4; i++) {
+    for (j = 0; j < 2; j++) {
+      d2u64 = vld1_u64((const uint64_t *)d1);
+      d3u64 = vld1_u64((const uint64_t *)(d1 + 8));
+      d1 += dest_stride;
+      d4u64 = vld1_u64((const uint64_t *)d1);
+      d5u64 = vld1_u64((const uint64_t *)(d1 + 8));
+      d1 += dest_stride;
+
+      q9u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d2u64));
+      q10u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d3u64));
+      q11u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d4u64));
+      q12u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d5u64));
+
+      d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+      d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
+      d30u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
+      d31u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
+
+      vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
+      vst1_u64((uint64_t *)(d2 + 8), vreinterpret_u64_u8(d3u8));
+      d2 += dest_stride;
+      vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d30u8));
+      vst1_u64((uint64_t *)(d2 + 8), vreinterpret_u64_u8(d31u8));
+      d2 += dest_stride;
     }
-    return;
+  }
+  return;
 }
diff --git a/vpx_dsp/arm/idct16x16_add_neon.c b/vpx_dsp/arm/idct16x16_add_neon.c
index 651ebb21f9967d4ac37380f403e7907ce8f3a2d9..6c03aff609b911e62c55b744ee8e1c9b1fdad24d 100644
--- a/vpx_dsp/arm/idct16x16_add_neon.c
+++ b/vpx_dsp/arm/idct16x16_add_neon.c
@@ -13,1175 +13,736 @@
 #include "./vpx_config.h"
 #include "vpx_dsp/txfm_common.h"
 
-static INLINE void TRANSPOSE8X8(
-        int16x8_t *q8s16,
-        int16x8_t *q9s16,
-        int16x8_t *q10s16,
-        int16x8_t *q11s16,
-        int16x8_t *q12s16,
-        int16x8_t *q13s16,
-        int16x8_t *q14s16,
-        int16x8_t *q15s16) {
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-    int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32;
-    int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16;
-
-    d16s16 = vget_low_s16(*q8s16);
-    d17s16 = vget_high_s16(*q8s16);
-    d18s16 = vget_low_s16(*q9s16);
-    d19s16 = vget_high_s16(*q9s16);
-    d20s16 = vget_low_s16(*q10s16);
-    d21s16 = vget_high_s16(*q10s16);
-    d22s16 = vget_low_s16(*q11s16);
-    d23s16 = vget_high_s16(*q11s16);
-    d24s16 = vget_low_s16(*q12s16);
-    d25s16 = vget_high_s16(*q12s16);
-    d26s16 = vget_low_s16(*q13s16);
-    d27s16 = vget_high_s16(*q13s16);
-    d28s16 = vget_low_s16(*q14s16);
-    d29s16 = vget_high_s16(*q14s16);
-    d30s16 = vget_low_s16(*q15s16);
-    d31s16 = vget_high_s16(*q15s16);
-
-    *q8s16  = vcombine_s16(d16s16, d24s16);  // vswp d17, d24
-    *q9s16  = vcombine_s16(d18s16, d26s16);  // vswp d19, d26
-    *q10s16 = vcombine_s16(d20s16, d28s16);  // vswp d21, d28
-    *q11s16 = vcombine_s16(d22s16, d30s16);  // vswp d23, d30
-    *q12s16 = vcombine_s16(d17s16, d25s16);
-    *q13s16 = vcombine_s16(d19s16, d27s16);
-    *q14s16 = vcombine_s16(d21s16, d29s16);
-    *q15s16 = vcombine_s16(d23s16, d31s16);
-
-    q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q8s16),
-                        vreinterpretq_s32_s16(*q10s16));
-    q1x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q9s16),
-                        vreinterpretq_s32_s16(*q11s16));
-    q2x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q12s16),
-                        vreinterpretq_s32_s16(*q14s16));
-    q3x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q13s16),
-                        vreinterpretq_s32_s16(*q15s16));
-
-    q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]),   // q8
-                        vreinterpretq_s16_s32(q1x2s32.val[0]));  // q9
-    q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]),   // q10
-                        vreinterpretq_s16_s32(q1x2s32.val[1]));  // q11
-    q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]),   // q12
-                        vreinterpretq_s16_s32(q3x2s32.val[0]));  // q13
-    q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]),   // q14
-                        vreinterpretq_s16_s32(q3x2s32.val[1]));  // q15
-
-    *q8s16  = q0x2s16.val[0];
-    *q9s16  = q0x2s16.val[1];
-    *q10s16 = q1x2s16.val[0];
-    *q11s16 = q1x2s16.val[1];
-    *q12s16 = q2x2s16.val[0];
-    *q13s16 = q2x2s16.val[1];
-    *q14s16 = q3x2s16.val[0];
-    *q15s16 = q3x2s16.val[1];
-    return;
+static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16,
+                                int16x8_t *q10s16, int16x8_t *q11s16,
+                                int16x8_t *q12s16, int16x8_t *q13s16,
+                                int16x8_t *q14s16, int16x8_t *q15s16) {
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
+  int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32;
+  int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16;
+
+  d16s16 = vget_low_s16(*q8s16);
+  d17s16 = vget_high_s16(*q8s16);
+  d18s16 = vget_low_s16(*q9s16);
+  d19s16 = vget_high_s16(*q9s16);
+  d20s16 = vget_low_s16(*q10s16);
+  d21s16 = vget_high_s16(*q10s16);
+  d22s16 = vget_low_s16(*q11s16);
+  d23s16 = vget_high_s16(*q11s16);
+  d24s16 = vget_low_s16(*q12s16);
+  d25s16 = vget_high_s16(*q12s16);
+  d26s16 = vget_low_s16(*q13s16);
+  d27s16 = vget_high_s16(*q13s16);
+  d28s16 = vget_low_s16(*q14s16);
+  d29s16 = vget_high_s16(*q14s16);
+  d30s16 = vget_low_s16(*q15s16);
+  d31s16 = vget_high_s16(*q15s16);
+
+  *q8s16 = vcombine_s16(d16s16, d24s16);   // vswp d17, d24
+  *q9s16 = vcombine_s16(d18s16, d26s16);   // vswp d19, d26
+  *q10s16 = vcombine_s16(d20s16, d28s16);  // vswp d21, d28
+  *q11s16 = vcombine_s16(d22s16, d30s16);  // vswp d23, d30
+  *q12s16 = vcombine_s16(d17s16, d25s16);
+  *q13s16 = vcombine_s16(d19s16, d27s16);
+  *q14s16 = vcombine_s16(d21s16, d29s16);
+  *q15s16 = vcombine_s16(d23s16, d31s16);
+
+  q0x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q8s16), vreinterpretq_s32_s16(*q10s16));
+  q1x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q9s16), vreinterpretq_s32_s16(*q11s16));
+  q2x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q12s16), vreinterpretq_s32_s16(*q14s16));
+  q3x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q13s16), vreinterpretq_s32_s16(*q15s16));
+
+  q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]),   // q8
+                      vreinterpretq_s16_s32(q1x2s32.val[0]));  // q9
+  q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]),   // q10
+                      vreinterpretq_s16_s32(q1x2s32.val[1]));  // q11
+  q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]),   // q12
+                      vreinterpretq_s16_s32(q3x2s32.val[0]));  // q13
+  q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]),   // q14
+                      vreinterpretq_s16_s32(q3x2s32.val[1]));  // q15
+
+  *q8s16 = q0x2s16.val[0];
+  *q9s16 = q0x2s16.val[1];
+  *q10s16 = q1x2s16.val[0];
+  *q11s16 = q1x2s16.val[1];
+  *q12s16 = q2x2s16.val[0];
+  *q13s16 = q2x2s16.val[1];
+  *q14s16 = q3x2s16.val[0];
+  *q15s16 = q3x2s16.val[1];
+  return;
 }
 
-void vpx_idct16x16_256_add_neon_pass1(
-        int16_t *in,
-        int16_t *out,
-        int output_stride) {
-    int16x4_t d0s16, d1s16, d2s16, d3s16;
-    int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-    uint64x1_t d16u64, d17u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64;
-    uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
-    int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    int32x4_t q0s32, q1s32, q2s32, q3s32, q5s32, q6s32, q9s32;
-    int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32;
-    int16x8x2_t q0x2s16;
-
-    q0x2s16 = vld2q_s16(in);
-    q8s16  = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q9s16  = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q10s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q11s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q12s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q13s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q14s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q15s16 = q0x2s16.val[0];
-
-    TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                 &q12s16, &q13s16, &q14s16, &q15s16);
-
-    d16s16 = vget_low_s16(q8s16);
-    d17s16 = vget_high_s16(q8s16);
-    d18s16 = vget_low_s16(q9s16);
-    d19s16 = vget_high_s16(q9s16);
-    d20s16 = vget_low_s16(q10s16);
-    d21s16 = vget_high_s16(q10s16);
-    d22s16 = vget_low_s16(q11s16);
-    d23s16 = vget_high_s16(q11s16);
-    d24s16 = vget_low_s16(q12s16);
-    d25s16 = vget_high_s16(q12s16);
-    d26s16 = vget_low_s16(q13s16);
-    d27s16 = vget_high_s16(q13s16);
-    d28s16 = vget_low_s16(q14s16);
-    d29s16 = vget_high_s16(q14s16);
-    d30s16 = vget_low_s16(q15s16);
-    d31s16 = vget_high_s16(q15s16);
-
-    // stage 3
-    d0s16 = vdup_n_s16(cospi_28_64);
-    d1s16 = vdup_n_s16(cospi_4_64);
-
-    q2s32 = vmull_s16(d18s16, d0s16);
-    q3s32 = vmull_s16(d19s16, d0s16);
-    q5s32 = vmull_s16(d18s16, d1s16);
-    q6s32 = vmull_s16(d19s16, d1s16);
-
-    q2s32 = vmlsl_s16(q2s32, d30s16, d1s16);
-    q3s32 = vmlsl_s16(q3s32, d31s16, d1s16);
-    q5s32 = vmlal_s16(q5s32, d30s16, d0s16);
-    q6s32 = vmlal_s16(q6s32, d31s16, d0s16);
-
-    d2s16 = vdup_n_s16(cospi_12_64);
-    d3s16 = vdup_n_s16(cospi_20_64);
-
-    d8s16 = vqrshrn_n_s32(q2s32, 14);
-    d9s16 = vqrshrn_n_s32(q3s32, 14);
-    d14s16 = vqrshrn_n_s32(q5s32, 14);
-    d15s16 = vqrshrn_n_s32(q6s32, 14);
-    q4s16 = vcombine_s16(d8s16, d9s16);
-    q7s16 = vcombine_s16(d14s16, d15s16);
-
-    q2s32 = vmull_s16(d26s16, d2s16);
-    q3s32 = vmull_s16(d27s16, d2s16);
-    q9s32 = vmull_s16(d26s16, d3s16);
-    q15s32 = vmull_s16(d27s16, d3s16);
-
-    q2s32 = vmlsl_s16(q2s32, d22s16, d3s16);
-    q3s32 = vmlsl_s16(q3s32, d23s16, d3s16);
-    q9s32 = vmlal_s16(q9s32, d22s16, d2s16);
-    q15s32 = vmlal_s16(q15s32, d23s16, d2s16);
-
-    d10s16 = vqrshrn_n_s32(q2s32, 14);
-    d11s16 = vqrshrn_n_s32(q3s32, 14);
-    d12s16 = vqrshrn_n_s32(q9s32, 14);
-    d13s16 = vqrshrn_n_s32(q15s32, 14);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    // stage 4
-    d30s16 = vdup_n_s16(cospi_16_64);
-
-    q2s32 = vmull_s16(d16s16, d30s16);
-    q11s32 = vmull_s16(d17s16, d30s16);
-    q0s32 = vmull_s16(d24s16, d30s16);
-    q1s32 = vmull_s16(d25s16, d30s16);
-
-    d30s16 = vdup_n_s16(cospi_24_64);
-    d31s16 = vdup_n_s16(cospi_8_64);
-
-    q3s32 = vaddq_s32(q2s32, q0s32);
-    q12s32 = vaddq_s32(q11s32, q1s32);
-    q13s32 = vsubq_s32(q2s32, q0s32);
-    q1s32 = vsubq_s32(q11s32, q1s32);
-
-    d16s16 = vqrshrn_n_s32(q3s32, 14);
-    d17s16 = vqrshrn_n_s32(q12s32, 14);
-    d18s16 = vqrshrn_n_s32(q13s32, 14);
-    d19s16 = vqrshrn_n_s32(q1s32, 14);
-    q8s16 = vcombine_s16(d16s16, d17s16);
-    q9s16 = vcombine_s16(d18s16, d19s16);
-
-    q0s32 = vmull_s16(d20s16, d31s16);
-    q1s32 = vmull_s16(d21s16, d31s16);
-    q12s32 = vmull_s16(d20s16, d30s16);
-    q13s32 = vmull_s16(d21s16, d30s16);
-
-    q0s32 = vmlal_s16(q0s32, d28s16, d30s16);
-    q1s32 = vmlal_s16(q1s32, d29s16, d30s16);
-    q12s32 = vmlsl_s16(q12s32, d28s16, d31s16);
-    q13s32 = vmlsl_s16(q13s32, d29s16, d31s16);
-
-    d22s16 = vqrshrn_n_s32(q0s32, 14);
-    d23s16 = vqrshrn_n_s32(q1s32, 14);
-    d20s16 = vqrshrn_n_s32(q12s32, 14);
-    d21s16 = vqrshrn_n_s32(q13s32, 14);
-    q10s16 = vcombine_s16(d20s16, d21s16);
-    q11s16 = vcombine_s16(d22s16, d23s16);
-
-    q13s16 = vsubq_s16(q4s16, q5s16);
-    q4s16 = vaddq_s16(q4s16, q5s16);
-    q14s16 = vsubq_s16(q7s16, q6s16);
-    q15s16 = vaddq_s16(q6s16, q7s16);
-    d26s16 = vget_low_s16(q13s16);
-    d27s16 = vget_high_s16(q13s16);
-    d28s16 = vget_low_s16(q14s16);
-    d29s16 = vget_high_s16(q14s16);
-
-    // stage 5
-    q0s16 = vaddq_s16(q8s16, q11s16);
-    q1s16 = vaddq_s16(q9s16, q10s16);
-    q2s16 = vsubq_s16(q9s16, q10s16);
-    q3s16 = vsubq_s16(q8s16, q11s16);
-
-    d16s16 = vdup_n_s16(cospi_16_64);
-
-    q11s32 = vmull_s16(d26s16, d16s16);
-    q12s32 = vmull_s16(d27s16, d16s16);
-    q9s32 = vmull_s16(d28s16, d16s16);
-    q10s32 = vmull_s16(d29s16, d16s16);
-
-    q6s32 = vsubq_s32(q9s32, q11s32);
-    q13s32 = vsubq_s32(q10s32, q12s32);
-    q9s32 = vaddq_s32(q9s32, q11s32);
-    q10s32 = vaddq_s32(q10s32, q12s32);
-
-    d10s16 = vqrshrn_n_s32(q6s32, 14);
-    d11s16 = vqrshrn_n_s32(q13s32, 14);
-    d12s16 = vqrshrn_n_s32(q9s32, 14);
-    d13s16 = vqrshrn_n_s32(q10s32, 14);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    // stage 6
-    q8s16 = vaddq_s16(q0s16, q15s16);
-    q9s16 = vaddq_s16(q1s16, q6s16);
-    q10s16 = vaddq_s16(q2s16, q5s16);
-    q11s16 = vaddq_s16(q3s16, q4s16);
-    q12s16 = vsubq_s16(q3s16, q4s16);
-    q13s16 = vsubq_s16(q2s16, q5s16);
-    q14s16 = vsubq_s16(q1s16, q6s16);
+void vpx_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out,
+                                      int output_stride) {
+  int16x4_t d0s16, d1s16, d2s16, d3s16;
+  int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
+  uint64x1_t d16u64, d17u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64;
+  uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
+  int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
+  int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+  int32x4_t q0s32, q1s32, q2s32, q3s32, q5s32, q6s32, q9s32;
+  int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32;
+  int16x8x2_t q0x2s16;
+
+  q0x2s16 = vld2q_s16(in);
+  q8s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q9s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q10s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q11s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q12s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q13s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q14s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q15s16 = q0x2s16.val[0];
+
+  TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+               &q15s16);
+
+  d16s16 = vget_low_s16(q8s16);
+  d17s16 = vget_high_s16(q8s16);
+  d18s16 = vget_low_s16(q9s16);
+  d19s16 = vget_high_s16(q9s16);
+  d20s16 = vget_low_s16(q10s16);
+  d21s16 = vget_high_s16(q10s16);
+  d22s16 = vget_low_s16(q11s16);
+  d23s16 = vget_high_s16(q11s16);
+  d24s16 = vget_low_s16(q12s16);
+  d25s16 = vget_high_s16(q12s16);
+  d26s16 = vget_low_s16(q13s16);
+  d27s16 = vget_high_s16(q13s16);
+  d28s16 = vget_low_s16(q14s16);
+  d29s16 = vget_high_s16(q14s16);
+  d30s16 = vget_low_s16(q15s16);
+  d31s16 = vget_high_s16(q15s16);
+
+  // stage 3
+  d0s16 = vdup_n_s16(cospi_28_64);
+  d1s16 = vdup_n_s16(cospi_4_64);
+
+  q2s32 = vmull_s16(d18s16, d0s16);
+  q3s32 = vmull_s16(d19s16, d0s16);
+  q5s32 = vmull_s16(d18s16, d1s16);
+  q6s32 = vmull_s16(d19s16, d1s16);
+
+  q2s32 = vmlsl_s16(q2s32, d30s16, d1s16);
+  q3s32 = vmlsl_s16(q3s32, d31s16, d1s16);
+  q5s32 = vmlal_s16(q5s32, d30s16, d0s16);
+  q6s32 = vmlal_s16(q6s32, d31s16, d0s16);
+
+  d2s16 = vdup_n_s16(cospi_12_64);
+  d3s16 = vdup_n_s16(cospi_20_64);
+
+  d8s16 = vqrshrn_n_s32(q2s32, 14);
+  d9s16 = vqrshrn_n_s32(q3s32, 14);
+  d14s16 = vqrshrn_n_s32(q5s32, 14);
+  d15s16 = vqrshrn_n_s32(q6s32, 14);
+  q4s16 = vcombine_s16(d8s16, d9s16);
+  q7s16 = vcombine_s16(d14s16, d15s16);
+
+  q2s32 = vmull_s16(d26s16, d2s16);
+  q3s32 = vmull_s16(d27s16, d2s16);
+  q9s32 = vmull_s16(d26s16, d3s16);
+  q15s32 = vmull_s16(d27s16, d3s16);
+
+  q2s32 = vmlsl_s16(q2s32, d22s16, d3s16);
+  q3s32 = vmlsl_s16(q3s32, d23s16, d3s16);
+  q9s32 = vmlal_s16(q9s32, d22s16, d2s16);
+  q15s32 = vmlal_s16(q15s32, d23s16, d2s16);
+
+  d10s16 = vqrshrn_n_s32(q2s32, 14);
+  d11s16 = vqrshrn_n_s32(q3s32, 14);
+  d12s16 = vqrshrn_n_s32(q9s32, 14);
+  d13s16 = vqrshrn_n_s32(q15s32, 14);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  // stage 4
+  d30s16 = vdup_n_s16(cospi_16_64);
+
+  q2s32 = vmull_s16(d16s16, d30s16);
+  q11s32 = vmull_s16(d17s16, d30s16);
+  q0s32 = vmull_s16(d24s16, d30s16);
+  q1s32 = vmull_s16(d25s16, d30s16);
+
+  d30s16 = vdup_n_s16(cospi_24_64);
+  d31s16 = vdup_n_s16(cospi_8_64);
+
+  q3s32 = vaddq_s32(q2s32, q0s32);
+  q12s32 = vaddq_s32(q11s32, q1s32);
+  q13s32 = vsubq_s32(q2s32, q0s32);
+  q1s32 = vsubq_s32(q11s32, q1s32);
+
+  d16s16 = vqrshrn_n_s32(q3s32, 14);
+  d17s16 = vqrshrn_n_s32(q12s32, 14);
+  d18s16 = vqrshrn_n_s32(q13s32, 14);
+  d19s16 = vqrshrn_n_s32(q1s32, 14);
+  q8s16 = vcombine_s16(d16s16, d17s16);
+  q9s16 = vcombine_s16(d18s16, d19s16);
+
+  q0s32 = vmull_s16(d20s16, d31s16);
+  q1s32 = vmull_s16(d21s16, d31s16);
+  q12s32 = vmull_s16(d20s16, d30s16);
+  q13s32 = vmull_s16(d21s16, d30s16);
+
+  q0s32 = vmlal_s16(q0s32, d28s16, d30s16);
+  q1s32 = vmlal_s16(q1s32, d29s16, d30s16);
+  q12s32 = vmlsl_s16(q12s32, d28s16, d31s16);
+  q13s32 = vmlsl_s16(q13s32, d29s16, d31s16);
+
+  d22s16 = vqrshrn_n_s32(q0s32, 14);
+  d23s16 = vqrshrn_n_s32(q1s32, 14);
+  d20s16 = vqrshrn_n_s32(q12s32, 14);
+  d21s16 = vqrshrn_n_s32(q13s32, 14);
+  q10s16 = vcombine_s16(d20s16, d21s16);
+  q11s16 = vcombine_s16(d22s16, d23s16);
+
+  q13s16 = vsubq_s16(q4s16, q5s16);
+  q4s16 = vaddq_s16(q4s16, q5s16);
+  q14s16 = vsubq_s16(q7s16, q6s16);
+  q15s16 = vaddq_s16(q6s16, q7s16);
+  d26s16 = vget_low_s16(q13s16);
+  d27s16 = vget_high_s16(q13s16);
+  d28s16 = vget_low_s16(q14s16);
+  d29s16 = vget_high_s16(q14s16);
+
+  // stage 5
+  q0s16 = vaddq_s16(q8s16, q11s16);
+  q1s16 = vaddq_s16(q9s16, q10s16);
+  q2s16 = vsubq_s16(q9s16, q10s16);
+  q3s16 = vsubq_s16(q8s16, q11s16);
+
+  d16s16 = vdup_n_s16(cospi_16_64);
+
+  q11s32 = vmull_s16(d26s16, d16s16);
+  q12s32 = vmull_s16(d27s16, d16s16);
+  q9s32 = vmull_s16(d28s16, d16s16);
+  q10s32 = vmull_s16(d29s16, d16s16);
+
+  q6s32 = vsubq_s32(q9s32, q11s32);
+  q13s32 = vsubq_s32(q10s32, q12s32);
+  q9s32 = vaddq_s32(q9s32, q11s32);
+  q10s32 = vaddq_s32(q10s32, q12s32);
+
+  d10s16 = vqrshrn_n_s32(q6s32, 14);
+  d11s16 = vqrshrn_n_s32(q13s32, 14);
+  d12s16 = vqrshrn_n_s32(q9s32, 14);
+  d13s16 = vqrshrn_n_s32(q10s32, 14);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  // stage 6
+  q8s16 = vaddq_s16(q0s16, q15s16);
+  q9s16 = vaddq_s16(q1s16, q6s16);
+  q10s16 = vaddq_s16(q2s16, q5s16);
+  q11s16 = vaddq_s16(q3s16, q4s16);
+  q12s16 = vsubq_s16(q3s16, q4s16);
+  q13s16 = vsubq_s16(q2s16, q5s16);
+  q14s16 = vsubq_s16(q1s16, q6s16);
+  q15s16 = vsubq_s16(q0s16, q15s16);
+
+  d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16));
+  d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16));
+  d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
+  d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
+  d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16));
+  d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16));
+  d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16));
+  d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16));
+  d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
+  d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
+  d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
+  d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
+  d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
+  d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
+  d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
+  d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
+
+  // store the data
+  output_stride >>= 1;  // output_stride / 2, out is int16_t
+  vst1_u64((uint64_t *)out, d16u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d17u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d18u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d19u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d20u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d21u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d22u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d23u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d24u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d25u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d26u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d27u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d28u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d29u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d30u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d31u64);
+  return;
+}
+
+void vpx_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
+                                      int16_t *pass1Output, int16_t skip_adding,
+                                      uint8_t *dest, int dest_stride) {
+  uint8_t *d;
+  uint8x8_t d12u8, d13u8;
+  int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
+  int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
+  uint64x1_t d24u64, d25u64, d26u64, d27u64;
+  int64x1_t d12s64, d13s64;
+  uint16x8_t q2u16, q3u16, q4u16, q5u16, q8u16;
+  uint16x8_t q9u16, q12u16, q13u16, q14u16, q15u16;
+  int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
+  int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+  int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32;
+  int32x4_t q10s32, q11s32, q12s32, q13s32;
+  int16x8x2_t q0x2s16;
+
+  q0x2s16 = vld2q_s16(src);
+  q8s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q9s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q10s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q11s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q12s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q13s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q14s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q15s16 = q0x2s16.val[0];
+
+  TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+               &q15s16);
+
+  d16s16 = vget_low_s16(q8s16);
+  d17s16 = vget_high_s16(q8s16);
+  d18s16 = vget_low_s16(q9s16);
+  d19s16 = vget_high_s16(q9s16);
+  d20s16 = vget_low_s16(q10s16);
+  d21s16 = vget_high_s16(q10s16);
+  d22s16 = vget_low_s16(q11s16);
+  d23s16 = vget_high_s16(q11s16);
+  d24s16 = vget_low_s16(q12s16);
+  d25s16 = vget_high_s16(q12s16);
+  d26s16 = vget_low_s16(q13s16);
+  d27s16 = vget_high_s16(q13s16);
+  d28s16 = vget_low_s16(q14s16);
+  d29s16 = vget_high_s16(q14s16);
+  d30s16 = vget_low_s16(q15s16);
+  d31s16 = vget_high_s16(q15s16);
+
+  // stage 3
+  d12s16 = vdup_n_s16(cospi_30_64);
+  d13s16 = vdup_n_s16(cospi_2_64);
+
+  q2s32 = vmull_s16(d16s16, d12s16);
+  q3s32 = vmull_s16(d17s16, d12s16);
+  q1s32 = vmull_s16(d16s16, d13s16);
+  q4s32 = vmull_s16(d17s16, d13s16);
+
+  q2s32 = vmlsl_s16(q2s32, d30s16, d13s16);
+  q3s32 = vmlsl_s16(q3s32, d31s16, d13s16);
+  q1s32 = vmlal_s16(q1s32, d30s16, d12s16);
+  q4s32 = vmlal_s16(q4s32, d31s16, d12s16);
+
+  d0s16 = vqrshrn_n_s32(q2s32, 14);
+  d1s16 = vqrshrn_n_s32(q3s32, 14);
+  d14s16 = vqrshrn_n_s32(q1s32, 14);
+  d15s16 = vqrshrn_n_s32(q4s32, 14);
+  q0s16 = vcombine_s16(d0s16, d1s16);
+  q7s16 = vcombine_s16(d14s16, d15s16);
+
+  d30s16 = vdup_n_s16(cospi_14_64);
+  d31s16 = vdup_n_s16(cospi_18_64);
+
+  q2s32 = vmull_s16(d24s16, d30s16);
+  q3s32 = vmull_s16(d25s16, d30s16);
+  q4s32 = vmull_s16(d24s16, d31s16);
+  q5s32 = vmull_s16(d25s16, d31s16);
+
+  q2s32 = vmlsl_s16(q2s32, d22s16, d31s16);
+  q3s32 = vmlsl_s16(q3s32, d23s16, d31s16);
+  q4s32 = vmlal_s16(q4s32, d22s16, d30s16);
+  q5s32 = vmlal_s16(q5s32, d23s16, d30s16);
+
+  d2s16 = vqrshrn_n_s32(q2s32, 14);
+  d3s16 = vqrshrn_n_s32(q3s32, 14);
+  d12s16 = vqrshrn_n_s32(q4s32, 14);
+  d13s16 = vqrshrn_n_s32(q5s32, 14);
+  q1s16 = vcombine_s16(d2s16, d3s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  d30s16 = vdup_n_s16(cospi_22_64);
+  d31s16 = vdup_n_s16(cospi_10_64);
+
+  q11s32 = vmull_s16(d20s16, d30s16);
+  q12s32 = vmull_s16(d21s16, d30s16);
+  q4s32 = vmull_s16(d20s16, d31s16);
+  q5s32 = vmull_s16(d21s16, d31s16);
+
+  q11s32 = vmlsl_s16(q11s32, d26s16, d31s16);
+  q12s32 = vmlsl_s16(q12s32, d27s16, d31s16);
+  q4s32 = vmlal_s16(q4s32, d26s16, d30s16);
+  q5s32 = vmlal_s16(q5s32, d27s16, d30s16);
+
+  d4s16 = vqrshrn_n_s32(q11s32, 14);
+  d5s16 = vqrshrn_n_s32(q12s32, 14);
+  d11s16 = vqrshrn_n_s32(q5s32, 14);
+  d10s16 = vqrshrn_n_s32(q4s32, 14);
+  q2s16 = vcombine_s16(d4s16, d5s16);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+
+  d30s16 = vdup_n_s16(cospi_6_64);
+  d31s16 = vdup_n_s16(cospi_26_64);
+
+  q10s32 = vmull_s16(d28s16, d30s16);
+  q11s32 = vmull_s16(d29s16, d30s16);
+  q12s32 = vmull_s16(d28s16, d31s16);
+  q13s32 = vmull_s16(d29s16, d31s16);
+
+  q10s32 = vmlsl_s16(q10s32, d18s16, d31s16);
+  q11s32 = vmlsl_s16(q11s32, d19s16, d31s16);
+  q12s32 = vmlal_s16(q12s32, d18s16, d30s16);
+  q13s32 = vmlal_s16(q13s32, d19s16, d30s16);
+
+  d6s16 = vqrshrn_n_s32(q10s32, 14);
+  d7s16 = vqrshrn_n_s32(q11s32, 14);
+  d8s16 = vqrshrn_n_s32(q12s32, 14);
+  d9s16 = vqrshrn_n_s32(q13s32, 14);
+  q3s16 = vcombine_s16(d6s16, d7s16);
+  q4s16 = vcombine_s16(d8s16, d9s16);
+
+  // stage 3
+  q9s16 = vsubq_s16(q0s16, q1s16);
+  q0s16 = vaddq_s16(q0s16, q1s16);
+  q10s16 = vsubq_s16(q3s16, q2s16);
+  q11s16 = vaddq_s16(q2s16, q3s16);
+  q12s16 = vaddq_s16(q4s16, q5s16);
+  q13s16 = vsubq_s16(q4s16, q5s16);
+  q14s16 = vsubq_s16(q7s16, q6s16);
+  q7s16 = vaddq_s16(q6s16, q7s16);
+
+  // stage 4
+  d18s16 = vget_low_s16(q9s16);
+  d19s16 = vget_high_s16(q9s16);
+  d20s16 = vget_low_s16(q10s16);
+  d21s16 = vget_high_s16(q10s16);
+  d26s16 = vget_low_s16(q13s16);
+  d27s16 = vget_high_s16(q13s16);
+  d28s16 = vget_low_s16(q14s16);
+  d29s16 = vget_high_s16(q14s16);
+
+  d30s16 = vdup_n_s16(cospi_8_64);
+  d31s16 = vdup_n_s16(cospi_24_64);
+
+  q2s32 = vmull_s16(d18s16, d31s16);
+  q3s32 = vmull_s16(d19s16, d31s16);
+  q4s32 = vmull_s16(d28s16, d31s16);
+  q5s32 = vmull_s16(d29s16, d31s16);
+
+  q2s32 = vmlal_s16(q2s32, d28s16, d30s16);
+  q3s32 = vmlal_s16(q3s32, d29s16, d30s16);
+  q4s32 = vmlsl_s16(q4s32, d18s16, d30s16);
+  q5s32 = vmlsl_s16(q5s32, d19s16, d30s16);
+
+  d12s16 = vqrshrn_n_s32(q2s32, 14);
+  d13s16 = vqrshrn_n_s32(q3s32, 14);
+  d2s16 = vqrshrn_n_s32(q4s32, 14);
+  d3s16 = vqrshrn_n_s32(q5s32, 14);
+  q1s16 = vcombine_s16(d2s16, d3s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  q3s16 = q11s16;
+  q4s16 = q12s16;
+
+  d30s16 = vdup_n_s16(-cospi_8_64);
+  q11s32 = vmull_s16(d26s16, d30s16);
+  q12s32 = vmull_s16(d27s16, d30s16);
+  q8s32 = vmull_s16(d20s16, d30s16);
+  q9s32 = vmull_s16(d21s16, d30s16);
+
+  q11s32 = vmlsl_s16(q11s32, d20s16, d31s16);
+  q12s32 = vmlsl_s16(q12s32, d21s16, d31s16);
+  q8s32 = vmlal_s16(q8s32, d26s16, d31s16);
+  q9s32 = vmlal_s16(q9s32, d27s16, d31s16);
+
+  d4s16 = vqrshrn_n_s32(q11s32, 14);
+  d5s16 = vqrshrn_n_s32(q12s32, 14);
+  d10s16 = vqrshrn_n_s32(q8s32, 14);
+  d11s16 = vqrshrn_n_s32(q9s32, 14);
+  q2s16 = vcombine_s16(d4s16, d5s16);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+
+  // stage 5
+  q8s16 = vaddq_s16(q0s16, q3s16);
+  q9s16 = vaddq_s16(q1s16, q2s16);
+  q10s16 = vsubq_s16(q1s16, q2s16);
+  q11s16 = vsubq_s16(q0s16, q3s16);
+  q12s16 = vsubq_s16(q7s16, q4s16);
+  q13s16 = vsubq_s16(q6s16, q5s16);
+  q14s16 = vaddq_s16(q6s16, q5s16);
+  q15s16 = vaddq_s16(q7s16, q4s16);
+
+  // stage 6
+  d20s16 = vget_low_s16(q10s16);
+  d21s16 = vget_high_s16(q10s16);
+  d22s16 = vget_low_s16(q11s16);
+  d23s16 = vget_high_s16(q11s16);
+  d24s16 = vget_low_s16(q12s16);
+  d25s16 = vget_high_s16(q12s16);
+  d26s16 = vget_low_s16(q13s16);
+  d27s16 = vget_high_s16(q13s16);
+
+  d14s16 = vdup_n_s16(cospi_16_64);
+
+  q3s32 = vmull_s16(d26s16, d14s16);
+  q4s32 = vmull_s16(d27s16, d14s16);
+  q0s32 = vmull_s16(d20s16, d14s16);
+  q1s32 = vmull_s16(d21s16, d14s16);
+
+  q5s32 = vsubq_s32(q3s32, q0s32);
+  q6s32 = vsubq_s32(q4s32, q1s32);
+  q10s32 = vaddq_s32(q3s32, q0s32);
+  q4s32 = vaddq_s32(q4s32, q1s32);
+
+  d4s16 = vqrshrn_n_s32(q5s32, 14);
+  d5s16 = vqrshrn_n_s32(q6s32, 14);
+  d10s16 = vqrshrn_n_s32(q10s32, 14);
+  d11s16 = vqrshrn_n_s32(q4s32, 14);
+  q2s16 = vcombine_s16(d4s16, d5s16);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+
+  q0s32 = vmull_s16(d22s16, d14s16);
+  q1s32 = vmull_s16(d23s16, d14s16);
+  q13s32 = vmull_s16(d24s16, d14s16);
+  q6s32 = vmull_s16(d25s16, d14s16);
+
+  q10s32 = vsubq_s32(q13s32, q0s32);
+  q4s32 = vsubq_s32(q6s32, q1s32);
+  q13s32 = vaddq_s32(q13s32, q0s32);
+  q6s32 = vaddq_s32(q6s32, q1s32);
+
+  d6s16 = vqrshrn_n_s32(q10s32, 14);
+  d7s16 = vqrshrn_n_s32(q4s32, 14);
+  d8s16 = vqrshrn_n_s32(q13s32, 14);
+  d9s16 = vqrshrn_n_s32(q6s32, 14);
+  q3s16 = vcombine_s16(d6s16, d7s16);
+  q4s16 = vcombine_s16(d8s16, d9s16);
+
+  // stage 7
+  if (skip_adding != 0) {
+    d = dest;
+    // load the data in pass1
+    q0s16 = vld1q_s16(pass1Output);
+    pass1Output += 8;
+    q1s16 = vld1q_s16(pass1Output);
+    pass1Output += 8;
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    d13s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+
+    q12s16 = vaddq_s16(q0s16, q15s16);
+    q13s16 = vaddq_s16(q1s16, q14s16);
+    q12s16 = vrshrq_n_s16(q12s16, 6);
+    q13s16 = vrshrq_n_s16(q13s16, 6);
+    q12u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q12s16), vreinterpret_u8_s64(d12s64));
+    q13u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q13s16), vreinterpret_u8_s64(d13s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
+    d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8));
+    d += dest_stride;
+    q14s16 = vsubq_s16(q1s16, q14s16);
     q15s16 = vsubq_s16(q0s16, q15s16);
 
-    d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16));
-    d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16));
-    d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
-    d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
-    d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16));
-    d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16));
-    d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16));
-    d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16));
-    d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
-    d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
-    d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
-    d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
-    d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
-    d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
-    d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
-    d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
-
-    // store the data
-    output_stride >>= 1;  // output_stride / 2, out is int16_t
-    vst1_u64((uint64_t *)out, d16u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d17u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d18u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d19u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d20u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d21u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d22u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d23u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d24u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d25u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d26u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d27u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d28u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d29u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d30u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d31u64);
-    return;
-}
+    q10s16 = vld1q_s16(pass1Output);
+    pass1Output += 8;
+    q11s16 = vld1q_s16(pass1Output);
+    pass1Output += 8;
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    d13s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q12s16 = vaddq_s16(q10s16, q5s16);
+    q13s16 = vaddq_s16(q11s16, q4s16);
+    q12s16 = vrshrq_n_s16(q12s16, 6);
+    q13s16 = vrshrq_n_s16(q13s16, 6);
+    q12u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q12s16), vreinterpret_u8_s64(d12s64));
+    q13u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q13s16), vreinterpret_u8_s64(d13s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
+    d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8));
+    d += dest_stride;
+    q4s16 = vsubq_s16(q11s16, q4s16);
+    q5s16 = vsubq_s16(q10s16, q5s16);
 
-void vpx_idct16x16_256_add_neon_pass2(
-        int16_t *src,
-        int16_t *out,
-        int16_t *pass1Output,
-        int16_t skip_adding,
-        uint8_t *dest,
-        int dest_stride) {
-    uint8_t *d;
-    uint8x8_t d12u8, d13u8;
-    int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
-    int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-    uint64x1_t d24u64, d25u64, d26u64, d27u64;
-    int64x1_t d12s64, d13s64;
-    uint16x8_t q2u16, q3u16, q4u16, q5u16, q8u16;
-    uint16x8_t q9u16, q12u16, q13u16, q14u16, q15u16;
-    int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32;
-    int32x4_t q10s32, q11s32, q12s32, q13s32;
-    int16x8x2_t q0x2s16;
-
-    q0x2s16 = vld2q_s16(src);
-    q8s16  = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q9s16  = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q10s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q11s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q12s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q13s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q14s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q15s16 = q0x2s16.val[0];
-
-    TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                 &q12s16, &q13s16, &q14s16, &q15s16);
-
-    d16s16 = vget_low_s16(q8s16);
-    d17s16 = vget_high_s16(q8s16);
-    d18s16 = vget_low_s16(q9s16);
-    d19s16 = vget_high_s16(q9s16);
-    d20s16 = vget_low_s16(q10s16);
-    d21s16 = vget_high_s16(q10s16);
-    d22s16 = vget_low_s16(q11s16);
-    d23s16 = vget_high_s16(q11s16);
-    d24s16 = vget_low_s16(q12s16);
-    d25s16 = vget_high_s16(q12s16);
-    d26s16 = vget_low_s16(q13s16);
-    d27s16 = vget_high_s16(q13s16);
-    d28s16 = vget_low_s16(q14s16);
-    d29s16 = vget_high_s16(q14s16);
-    d30s16 = vget_low_s16(q15s16);
-    d31s16 = vget_high_s16(q15s16);
-
-    // stage 3
-    d12s16 = vdup_n_s16(cospi_30_64);
-    d13s16 = vdup_n_s16(cospi_2_64);
-
-    q2s32 = vmull_s16(d16s16, d12s16);
-    q3s32 = vmull_s16(d17s16, d12s16);
-    q1s32 = vmull_s16(d16s16, d13s16);
-    q4s32 = vmull_s16(d17s16, d13s16);
-
-    q2s32 = vmlsl_s16(q2s32, d30s16, d13s16);
-    q3s32 = vmlsl_s16(q3s32, d31s16, d13s16);
-    q1s32 = vmlal_s16(q1s32, d30s16, d12s16);
-    q4s32 = vmlal_s16(q4s32, d31s16, d12s16);
-
-    d0s16 = vqrshrn_n_s32(q2s32, 14);
-    d1s16 = vqrshrn_n_s32(q3s32, 14);
-    d14s16 = vqrshrn_n_s32(q1s32, 14);
-    d15s16 = vqrshrn_n_s32(q4s32, 14);
-    q0s16 = vcombine_s16(d0s16, d1s16);
-    q7s16 = vcombine_s16(d14s16, d15s16);
-
-    d30s16 = vdup_n_s16(cospi_14_64);
-    d31s16 = vdup_n_s16(cospi_18_64);
-
-    q2s32 = vmull_s16(d24s16, d30s16);
-    q3s32 = vmull_s16(d25s16, d30s16);
-    q4s32 = vmull_s16(d24s16, d31s16);
-    q5s32 = vmull_s16(d25s16, d31s16);
-
-    q2s32 = vmlsl_s16(q2s32, d22s16, d31s16);
-    q3s32 = vmlsl_s16(q3s32, d23s16, d31s16);
-    q4s32 = vmlal_s16(q4s32, d22s16, d30s16);
-    q5s32 = vmlal_s16(q5s32, d23s16, d30s16);
-
-    d2s16 = vqrshrn_n_s32(q2s32, 14);
-    d3s16 = vqrshrn_n_s32(q3s32, 14);
-    d12s16 = vqrshrn_n_s32(q4s32, 14);
-    d13s16 = vqrshrn_n_s32(q5s32, 14);
-    q1s16 = vcombine_s16(d2s16, d3s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    d30s16 = vdup_n_s16(cospi_22_64);
-    d31s16 = vdup_n_s16(cospi_10_64);
-
-    q11s32 = vmull_s16(d20s16, d30s16);
-    q12s32 = vmull_s16(d21s16, d30s16);
-    q4s32 = vmull_s16(d20s16, d31s16);
-    q5s32 = vmull_s16(d21s16, d31s16);
-
-    q11s32 = vmlsl_s16(q11s32, d26s16, d31s16);
-    q12s32 = vmlsl_s16(q12s32, d27s16, d31s16);
-    q4s32 = vmlal_s16(q4s32, d26s16, d30s16);
-    q5s32 = vmlal_s16(q5s32, d27s16, d30s16);
-
-    d4s16 = vqrshrn_n_s32(q11s32, 14);
-    d5s16 = vqrshrn_n_s32(q12s32, 14);
-    d11s16 = vqrshrn_n_s32(q5s32, 14);
-    d10s16 = vqrshrn_n_s32(q4s32, 14);
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-
-    d30s16 = vdup_n_s16(cospi_6_64);
-    d31s16 = vdup_n_s16(cospi_26_64);
-
-    q10s32 = vmull_s16(d28s16, d30s16);
-    q11s32 = vmull_s16(d29s16, d30s16);
-    q12s32 = vmull_s16(d28s16, d31s16);
-    q13s32 = vmull_s16(d29s16, d31s16);
-
-    q10s32 = vmlsl_s16(q10s32, d18s16, d31s16);
-    q11s32 = vmlsl_s16(q11s32, d19s16, d31s16);
-    q12s32 = vmlal_s16(q12s32, d18s16, d30s16);
-    q13s32 = vmlal_s16(q13s32, d19s16, d30s16);
-
-    d6s16 = vqrshrn_n_s32(q10s32, 14);
-    d7s16 = vqrshrn_n_s32(q11s32, 14);
-    d8s16 = vqrshrn_n_s32(q12s32, 14);
-    d9s16 = vqrshrn_n_s32(q13s32, 14);
-    q3s16 = vcombine_s16(d6s16, d7s16);
-    q4s16 = vcombine_s16(d8s16, d9s16);
-
-    // stage 3
-    q9s16  = vsubq_s16(q0s16, q1s16);
-    q0s16  = vaddq_s16(q0s16, q1s16);
-    q10s16 = vsubq_s16(q3s16, q2s16);
-    q11s16 = vaddq_s16(q2s16, q3s16);
-    q12s16 = vaddq_s16(q4s16, q5s16);
-    q13s16 = vsubq_s16(q4s16, q5s16);
-    q14s16 = vsubq_s16(q7s16, q6s16);
-    q7s16  = vaddq_s16(q6s16, q7s16);
-
-    // stage 4
-    d18s16 = vget_low_s16(q9s16);
-    d19s16 = vget_high_s16(q9s16);
-    d20s16 = vget_low_s16(q10s16);
-    d21s16 = vget_high_s16(q10s16);
-    d26s16 = vget_low_s16(q13s16);
-    d27s16 = vget_high_s16(q13s16);
-    d28s16 = vget_low_s16(q14s16);
-    d29s16 = vget_high_s16(q14s16);
-
-    d30s16 = vdup_n_s16(cospi_8_64);
-    d31s16 = vdup_n_s16(cospi_24_64);
-
-    q2s32 = vmull_s16(d18s16, d31s16);
-    q3s32 = vmull_s16(d19s16, d31s16);
-    q4s32 = vmull_s16(d28s16, d31s16);
-    q5s32 = vmull_s16(d29s16, d31s16);
-
-    q2s32 = vmlal_s16(q2s32, d28s16, d30s16);
-    q3s32 = vmlal_s16(q3s32, d29s16, d30s16);
-    q4s32 = vmlsl_s16(q4s32, d18s16, d30s16);
-    q5s32 = vmlsl_s16(q5s32, d19s16, d30s16);
-
-    d12s16 = vqrshrn_n_s32(q2s32, 14);
-    d13s16 = vqrshrn_n_s32(q3s32, 14);
-    d2s16 = vqrshrn_n_s32(q4s32, 14);
-    d3s16 = vqrshrn_n_s32(q5s32, 14);
-    q1s16 = vcombine_s16(d2s16, d3s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    q3s16 = q11s16;
-    q4s16 = q12s16;
-
-    d30s16 = vdup_n_s16(-cospi_8_64);
-    q11s32 = vmull_s16(d26s16, d30s16);
-    q12s32 = vmull_s16(d27s16, d30s16);
-    q8s32 = vmull_s16(d20s16, d30s16);
-    q9s32 = vmull_s16(d21s16, d30s16);
-
-    q11s32 = vmlsl_s16(q11s32, d20s16, d31s16);
-    q12s32 = vmlsl_s16(q12s32, d21s16, d31s16);
-    q8s32 = vmlal_s16(q8s32, d26s16, d31s16);
-    q9s32 = vmlal_s16(q9s32, d27s16, d31s16);
-
-    d4s16 = vqrshrn_n_s32(q11s32, 14);
-    d5s16 = vqrshrn_n_s32(q12s32, 14);
-    d10s16 = vqrshrn_n_s32(q8s32, 14);
-    d11s16 = vqrshrn_n_s32(q9s32, 14);
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-
-    // stage 5
-    q8s16  = vaddq_s16(q0s16, q3s16);
-    q9s16  = vaddq_s16(q1s16, q2s16);
-    q10s16 = vsubq_s16(q1s16, q2s16);
-    q11s16 = vsubq_s16(q0s16, q3s16);
-    q12s16 = vsubq_s16(q7s16, q4s16);
-    q13s16 = vsubq_s16(q6s16, q5s16);
-    q14s16 = vaddq_s16(q6s16, q5s16);
-    q15s16 = vaddq_s16(q7s16, q4s16);
-
-    // stage 6
-    d20s16 = vget_low_s16(q10s16);
-    d21s16 = vget_high_s16(q10s16);
-    d22s16 = vget_low_s16(q11s16);
-    d23s16 = vget_high_s16(q11s16);
-    d24s16 = vget_low_s16(q12s16);
-    d25s16 = vget_high_s16(q12s16);
-    d26s16 = vget_low_s16(q13s16);
-    d27s16 = vget_high_s16(q13s16);
-
-    d14s16 = vdup_n_s16(cospi_16_64);
-
-    q3s32 = vmull_s16(d26s16, d14s16);
-    q4s32 = vmull_s16(d27s16, d14s16);
-    q0s32 = vmull_s16(d20s16, d14s16);
-    q1s32 = vmull_s16(d21s16, d14s16);
-
-    q5s32 = vsubq_s32(q3s32, q0s32);
-    q6s32 = vsubq_s32(q4s32, q1s32);
-    q10s32 = vaddq_s32(q3s32, q0s32);
-    q4s32 = vaddq_s32(q4s32, q1s32);
-
-    d4s16 = vqrshrn_n_s32(q5s32, 14);
-    d5s16 = vqrshrn_n_s32(q6s32, 14);
-    d10s16 = vqrshrn_n_s32(q10s32, 14);
-    d11s16 = vqrshrn_n_s32(q4s32, 14);
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-
-    q0s32 = vmull_s16(d22s16, d14s16);
-    q1s32 = vmull_s16(d23s16, d14s16);
-    q13s32 = vmull_s16(d24s16, d14s16);
-    q6s32 = vmull_s16(d25s16, d14s16);
-
-    q10s32 = vsubq_s32(q13s32, q0s32);
-    q4s32 = vsubq_s32(q6s32, q1s32);
-    q13s32 = vaddq_s32(q13s32, q0s32);
-    q6s32 = vaddq_s32(q6s32, q1s32);
-
-    d6s16 = vqrshrn_n_s32(q10s32, 14);
-    d7s16 = vqrshrn_n_s32(q4s32, 14);
-    d8s16 = vqrshrn_n_s32(q13s32, 14);
-    d9s16 = vqrshrn_n_s32(q6s32, 14);
-    q3s16 = vcombine_s16(d6s16, d7s16);
-    q4s16 = vcombine_s16(d8s16, d9s16);
-
-    // stage 7
-    if (skip_adding != 0) {
-        d = dest;
-        // load the data in pass1
-        q0s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q1s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        d13s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-
-        q12s16 = vaddq_s16(q0s16, q15s16);
-        q13s16 = vaddq_s16(q1s16, q14s16);
-        q12s16 = vrshrq_n_s16(q12s16, 6);
-        q13s16 = vrshrq_n_s16(q13s16, 6);
-        q12u16 = vaddw_u8(vreinterpretq_u16_s16(q12s16),
-                          vreinterpret_u8_s64(d12s64));
-        q13u16 = vaddw_u8(vreinterpretq_u16_s16(q13s16),
-                          vreinterpret_u8_s64(d13s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
-        d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8));
-        d += dest_stride;
-        q14s16 = vsubq_s16(q1s16, q14s16);
-        q15s16 = vsubq_s16(q0s16, q15s16);
-
-        q10s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q11s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        d13s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q12s16 = vaddq_s16(q10s16, q5s16);
-        q13s16 = vaddq_s16(q11s16, q4s16);
-        q12s16 = vrshrq_n_s16(q12s16, 6);
-        q13s16 = vrshrq_n_s16(q13s16, 6);
-        q12u16 = vaddw_u8(vreinterpretq_u16_s16(q12s16),
-                          vreinterpret_u8_s64(d12s64));
-        q13u16 = vaddw_u8(vreinterpretq_u16_s16(q13s16),
-                          vreinterpret_u8_s64(d13s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
-        d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8));
-        d += dest_stride;
-        q4s16 = vsubq_s16(q11s16, q4s16);
-        q5s16 = vsubq_s16(q10s16, q5s16);
-
-        q0s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q1s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        d13s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q12s16 = vaddq_s16(q0s16, q3s16);
-        q13s16 = vaddq_s16(q1s16, q2s16);
-        q12s16 = vrshrq_n_s16(q12s16, 6);
-        q13s16 = vrshrq_n_s16(q13s16, 6);
-        q12u16 = vaddw_u8(vreinterpretq_u16_s16(q12s16),
-                          vreinterpret_u8_s64(d12s64));
-        q13u16 = vaddw_u8(vreinterpretq_u16_s16(q13s16),
-                          vreinterpret_u8_s64(d13s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
-        d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8));
-        d += dest_stride;
-        q2s16 = vsubq_s16(q1s16, q2s16);
-        q3s16 = vsubq_s16(q0s16, q3s16);
-
-        q10s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q11s16 = vld1q_s16(pass1Output);
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        d13s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q12s16 = vaddq_s16(q10s16, q9s16);
-        q13s16 = vaddq_s16(q11s16, q8s16);
-        q12s16 = vrshrq_n_s16(q12s16, 6);
-        q13s16 = vrshrq_n_s16(q13s16, 6);
-        q12u16 = vaddw_u8(vreinterpretq_u16_s16(q12s16),
-                          vreinterpret_u8_s64(d12s64));
-        q13u16 = vaddw_u8(vreinterpretq_u16_s16(q13s16),
-                          vreinterpret_u8_s64(d13s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
-        d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8));
-        d += dest_stride;
-        q8s16 = vsubq_s16(q11s16, q8s16);
-        q9s16 = vsubq_s16(q10s16, q9s16);
-
-        // store the data  out 8,9,10,11,12,13,14,15
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q8s16 = vrshrq_n_s16(q8s16, 6);
-        q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16),
-                         vreinterpret_u8_s64(d12s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q9s16 = vrshrq_n_s16(q9s16, 6);
-        q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16),
-                          vreinterpret_u8_s64(d12s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q2s16 = vrshrq_n_s16(q2s16, 6);
-        q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2s16),
-                          vreinterpret_u8_s64(d12s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q3s16 = vrshrq_n_s16(q3s16, 6);
-        q3u16 = vaddw_u8(vreinterpretq_u16_s16(q3s16),
-                         vreinterpret_u8_s64(d12s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q3u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q4s16 = vrshrq_n_s16(q4s16, 6);
-        q4u16 = vaddw_u8(vreinterpretq_u16_s16(q4s16),
-                         vreinterpret_u8_s64(d12s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q4u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q5s16 = vrshrq_n_s16(q5s16, 6);
-        q5u16 = vaddw_u8(vreinterpretq_u16_s16(q5s16),
-                         vreinterpret_u8_s64(d12s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q5u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-
-        d12s64 = vld1_s64((int64_t *)dest);
-        dest += dest_stride;
-        q14s16 = vrshrq_n_s16(q14s16, 6);
-        q14u16 = vaddw_u8(vreinterpretq_u16_s16(q14s16),
-                          vreinterpret_u8_s64(d12s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q14u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-        d += dest_stride;
-
-        d12s64 = vld1_s64((int64_t *)dest);
-        q15s16 = vrshrq_n_s16(q15s16, 6);
-        q15u16 = vaddw_u8(vreinterpretq_u16_s16(q15s16),
-                          vreinterpret_u8_s64(d12s64));
-        d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q15u16));
-        vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
-    } else {  // skip_adding_dest
-        q0s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q1s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q12s16 = vaddq_s16(q0s16, q15s16);
-        q13s16 = vaddq_s16(q1s16, q14s16);
-        d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
-        d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
-        d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
-        d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
-        vst1_u64((uint64_t *)out, d24u64);
-        out += 4;
-        vst1_u64((uint64_t *)out, d25u64);
-        out += 12;
-        vst1_u64((uint64_t *)out, d26u64);
-        out += 4;
-        vst1_u64((uint64_t *)out, d27u64);
-        out += 12;
-        q14s16 = vsubq_s16(q1s16, q14s16);
-        q15s16 = vsubq_s16(q0s16, q15s16);
-
-        q10s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q11s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q12s16 = vaddq_s16(q10s16, q5s16);
-        q13s16 = vaddq_s16(q11s16, q4s16);
-        d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
-        d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
-        d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
-        d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
-        vst1_u64((uint64_t *)out, d24u64);
-        out += 4;
-        vst1_u64((uint64_t *)out, d25u64);
-        out += 12;
-        vst1_u64((uint64_t *)out, d26u64);
-        out += 4;
-        vst1_u64((uint64_t *)out, d27u64);
-        out += 12;
-        q4s16 = vsubq_s16(q11s16, q4s16);
-        q5s16 = vsubq_s16(q10s16, q5s16);
-
-        q0s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q1s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q12s16 = vaddq_s16(q0s16, q3s16);
-        q13s16 = vaddq_s16(q1s16, q2s16);
-        d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
-        d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
-        d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
-        d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
-        vst1_u64((uint64_t *)out, d24u64);
-        out += 4;
-        vst1_u64((uint64_t *)out, d25u64);
-        out += 12;
-        vst1_u64((uint64_t *)out, d26u64);
-        out += 4;
-        vst1_u64((uint64_t *)out, d27u64);
-        out += 12;
-        q2s16 = vsubq_s16(q1s16, q2s16);
-        q3s16 = vsubq_s16(q0s16, q3s16);
-
-        q10s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q11s16 = vld1q_s16(pass1Output);
-        pass1Output += 8;
-        q12s16 = vaddq_s16(q10s16, q9s16);
-        q13s16 = vaddq_s16(q11s16, q8s16);
-        d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
-        d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
-        d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
-        d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
-        vst1_u64((uint64_t *)out, d24u64);
-        out += 4;
-        vst1_u64((uint64_t *)out, d25u64);
-        out += 12;
-        vst1_u64((uint64_t *)out, d26u64);
-        out += 4;
-        vst1_u64((uint64_t *)out, d27u64);
-        out += 12;
-        q8s16 = vsubq_s16(q11s16, q8s16);
-        q9s16 = vsubq_s16(q10s16, q9s16);
-
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q8s16)));
-        out += 4;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q8s16)));
-        out += 12;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q9s16)));
-        out += 4;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q9s16)));
-        out += 12;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q2s16)));
-        out += 4;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q2s16)));
-        out += 12;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q3s16)));
-        out += 4;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q3s16)));
-        out += 12;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q4s16)));
-        out += 4;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q4s16)));
-        out += 12;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q5s16)));
-        out += 4;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q5s16)));
-        out += 12;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q14s16)));
-        out += 4;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q14s16)));
-        out += 12;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q15s16)));
-        out += 4;
-        vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q15s16)));
-    }
-    return;
-}
+    q0s16 = vld1q_s16(pass1Output);
+    pass1Output += 8;
+    q1s16 = vld1q_s16(pass1Output);
+    pass1Output += 8;
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    d13s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q12s16 = vaddq_s16(q0s16, q3s16);
+    q13s16 = vaddq_s16(q1s16, q2s16);
+    q12s16 = vrshrq_n_s16(q12s16, 6);
+    q13s16 = vrshrq_n_s16(q13s16, 6);
+    q12u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q12s16), vreinterpret_u8_s64(d12s64));
+    q13u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q13s16), vreinterpret_u8_s64(d13s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
+    d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8));
+    d += dest_stride;
+    q2s16 = vsubq_s16(q1s16, q2s16);
+    q3s16 = vsubq_s16(q0s16, q3s16);
 
-void vpx_idct16x16_10_add_neon_pass1(
-        int16_t *in,
-        int16_t *out,
-        int output_stride) {
-    int16x4_t d4s16;
-    int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
-    uint64x1_t d4u64, d5u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64;
-    uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
-    int16x8_t q0s16, q1s16, q2s16, q4s16, q5s16, q6s16, q7s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    int32x4_t q6s32, q9s32;
-    int32x4_t q10s32, q11s32, q12s32, q15s32;
-    int16x8x2_t q0x2s16;
-
-    q0x2s16 = vld2q_s16(in);
-    q8s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q9s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q10s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q11s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q12s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q13s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q14s16 = q0x2s16.val[0];
-    in += 16;
-    q0x2s16 = vld2q_s16(in);
-    q15s16 = q0x2s16.val[0];
-
-    TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                 &q12s16, &q13s16, &q14s16, &q15s16);
-
-    // stage 3
-    q0s16 = vdupq_n_s16(cospi_28_64 * 2);
-    q1s16 = vdupq_n_s16(cospi_4_64 * 2);
-
-    q4s16 = vqrdmulhq_s16(q9s16, q0s16);
-    q7s16 = vqrdmulhq_s16(q9s16, q1s16);
-
-    // stage 4
-    q1s16 = vdupq_n_s16(cospi_16_64 * 2);
-    d4s16 = vdup_n_s16(cospi_16_64);
-
-    q8s16 = vqrdmulhq_s16(q8s16, q1s16);
-
-    d8s16 = vget_low_s16(q4s16);
-    d9s16 = vget_high_s16(q4s16);
-    d14s16 = vget_low_s16(q7s16);
-    d15s16 = vget_high_s16(q7s16);
-    q9s32  = vmull_s16(d14s16, d4s16);
-    q10s32 = vmull_s16(d15s16, d4s16);
-    q12s32 = vmull_s16(d9s16, d4s16);
-    q11s32 = vmull_s16(d8s16, d4s16);
-
-    q15s32 = vsubq_s32(q10s32, q12s32);
-    q6s32 = vsubq_s32(q9s32, q11s32);
-    q9s32 = vaddq_s32(q9s32, q11s32);
-    q10s32 = vaddq_s32(q10s32, q12s32);
-
-    d11s16 = vqrshrn_n_s32(q15s32, 14);
-    d10s16 = vqrshrn_n_s32(q6s32, 14);
-    d12s16 = vqrshrn_n_s32(q9s32, 14);
-    d13s16 = vqrshrn_n_s32(q10s32, 14);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    // stage 6
-    q2s16 = vaddq_s16(q8s16, q7s16);
-    q9s16 = vaddq_s16(q8s16, q6s16);
-    q10s16 = vaddq_s16(q8s16, q5s16);
-    q11s16 = vaddq_s16(q8s16, q4s16);
-    q12s16 = vsubq_s16(q8s16, q4s16);
-    q13s16 = vsubq_s16(q8s16, q5s16);
-    q14s16 = vsubq_s16(q8s16, q6s16);
-    q15s16 = vsubq_s16(q8s16, q7s16);
-
-    d4u64 = vreinterpret_u64_s16(vget_low_s16(q2s16));
-    d5u64 = vreinterpret_u64_s16(vget_high_s16(q2s16));
-    d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
-    d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
-    d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16));
-    d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16));
-    d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16));
-    d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16));
-    d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
-    d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
-    d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
-    d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
-    d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
-    d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
-    d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
-    d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
-
-    // store the data
-    output_stride >>= 1;  // output_stride / 2, out is int16_t
-    vst1_u64((uint64_t *)out, d4u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d5u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d18u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d19u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d20u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d21u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d22u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d23u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d24u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d25u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d26u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d27u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d28u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d29u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d30u64);
-    out += output_stride;
-    vst1_u64((uint64_t *)out, d31u64);
-    return;
-}
+    q10s16 = vld1q_s16(pass1Output);
+    pass1Output += 8;
+    q11s16 = vld1q_s16(pass1Output);
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    d13s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q12s16 = vaddq_s16(q10s16, q9s16);
+    q13s16 = vaddq_s16(q11s16, q8s16);
+    q12s16 = vrshrq_n_s16(q12s16, 6);
+    q13s16 = vrshrq_n_s16(q13s16, 6);
+    q12u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q12s16), vreinterpret_u8_s64(d12s64));
+    q13u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q13s16), vreinterpret_u8_s64(d13s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
+    d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8));
+    d += dest_stride;
+    q8s16 = vsubq_s16(q11s16, q8s16);
+    q9s16 = vsubq_s16(q10s16, q9s16);
 
-void vpx_idct16x16_10_add_neon_pass2(
-        int16_t *src,
-        int16_t *out,
-        int16_t *pass1Output,
-        int16_t skip_adding,
-        uint8_t *dest,
-        int dest_stride) {
-    int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
-    int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
-    int16x4_t d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d30s16, d31s16;
-    uint64x1_t d4u64, d5u64, d6u64, d7u64, d8u64, d9u64, d10u64, d11u64;
-    uint64x1_t d16u64, d17u64, d18u64, d19u64;
-    uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
-    int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32;
-    int32x4_t q10s32, q11s32, q12s32, q13s32;
-    int16x8x2_t q0x2s16;
-    (void)skip_adding;
-    (void)dest;
-    (void)dest_stride;
-
-    q0x2s16 = vld2q_s16(src);
-    q8s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q9s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q10s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q11s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q12s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q13s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q14s16 = q0x2s16.val[0];
-    src += 16;
-    q0x2s16 = vld2q_s16(src);
-    q15s16 = q0x2s16.val[0];
-
-    TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                 &q12s16, &q13s16, &q14s16, &q15s16);
-
-    // stage 3
-    q6s16 = vdupq_n_s16(cospi_30_64 * 2);
-    q0s16 = vqrdmulhq_s16(q8s16, q6s16);
-    q6s16 = vdupq_n_s16(cospi_2_64 * 2);
-    q7s16 = vqrdmulhq_s16(q8s16, q6s16);
-
-    q15s16 = vdupq_n_s16(-cospi_26_64 * 2);
-    q14s16 = vdupq_n_s16(cospi_6_64 * 2);
-    q3s16 = vqrdmulhq_s16(q9s16, q15s16);
-    q4s16 = vqrdmulhq_s16(q9s16, q14s16);
-
-    // stage 4
-    d0s16 = vget_low_s16(q0s16);
-    d1s16 = vget_high_s16(q0s16);
-    d6s16 = vget_low_s16(q3s16);
-    d7s16 = vget_high_s16(q3s16);
-    d8s16 = vget_low_s16(q4s16);
-    d9s16 = vget_high_s16(q4s16);
-    d14s16 = vget_low_s16(q7s16);
-    d15s16 = vget_high_s16(q7s16);
-
-    d30s16 = vdup_n_s16(cospi_8_64);
-    d31s16 = vdup_n_s16(cospi_24_64);
-
-    q12s32 = vmull_s16(d14s16, d31s16);
-    q5s32 = vmull_s16(d15s16, d31s16);
-    q2s32 = vmull_s16(d0s16, d31s16);
-    q11s32 = vmull_s16(d1s16, d31s16);
-
-    q12s32 = vmlsl_s16(q12s32, d0s16, d30s16);
-    q5s32 = vmlsl_s16(q5s32, d1s16, d30s16);
-    q2s32 = vmlal_s16(q2s32, d14s16, d30s16);
-    q11s32 = vmlal_s16(q11s32, d15s16, d30s16);
-
-    d2s16 = vqrshrn_n_s32(q12s32, 14);
-    d3s16 = vqrshrn_n_s32(q5s32, 14);
-    d12s16 = vqrshrn_n_s32(q2s32, 14);
-    d13s16 = vqrshrn_n_s32(q11s32, 14);
-    q1s16 = vcombine_s16(d2s16, d3s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    d30s16 = vdup_n_s16(-cospi_8_64);
-    q10s32 = vmull_s16(d8s16, d30s16);
-    q13s32 = vmull_s16(d9s16, d30s16);
-    q8s32 = vmull_s16(d6s16, d30s16);
-    q9s32 = vmull_s16(d7s16, d30s16);
-
-    q10s32 = vmlsl_s16(q10s32, d6s16, d31s16);
-    q13s32 = vmlsl_s16(q13s32, d7s16, d31s16);
-    q8s32 = vmlal_s16(q8s32, d8s16, d31s16);
-    q9s32 = vmlal_s16(q9s32, d9s16, d31s16);
-
-    d4s16 = vqrshrn_n_s32(q10s32, 14);
-    d5s16 = vqrshrn_n_s32(q13s32, 14);
-    d10s16 = vqrshrn_n_s32(q8s32, 14);
-    d11s16 = vqrshrn_n_s32(q9s32, 14);
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-
-    // stage 5
-    q8s16  = vaddq_s16(q0s16, q3s16);
-    q9s16  = vaddq_s16(q1s16, q2s16);
-    q10s16 = vsubq_s16(q1s16, q2s16);
-    q11s16 = vsubq_s16(q0s16, q3s16);
-    q12s16 = vsubq_s16(q7s16, q4s16);
-    q13s16 = vsubq_s16(q6s16, q5s16);
-    q14s16 = vaddq_s16(q6s16, q5s16);
-    q15s16 = vaddq_s16(q7s16, q4s16);
-
-    // stage 6
-    d20s16 = vget_low_s16(q10s16);
-    d21s16 = vget_high_s16(q10s16);
-    d22s16 = vget_low_s16(q11s16);
-    d23s16 = vget_high_s16(q11s16);
-    d24s16 = vget_low_s16(q12s16);
-    d25s16 = vget_high_s16(q12s16);
-    d26s16 = vget_low_s16(q13s16);
-    d27s16 = vget_high_s16(q13s16);
-
-    d14s16 = vdup_n_s16(cospi_16_64);
-    q3s32 = vmull_s16(d26s16, d14s16);
-    q4s32 = vmull_s16(d27s16, d14s16);
-    q0s32 = vmull_s16(d20s16, d14s16);
-    q1s32 = vmull_s16(d21s16, d14s16);
-
-    q5s32 = vsubq_s32(q3s32, q0s32);
-    q6s32 = vsubq_s32(q4s32, q1s32);
-    q0s32 = vaddq_s32(q3s32, q0s32);
-    q4s32 = vaddq_s32(q4s32, q1s32);
-
-    d4s16 = vqrshrn_n_s32(q5s32, 14);
-    d5s16 = vqrshrn_n_s32(q6s32, 14);
-    d10s16 = vqrshrn_n_s32(q0s32, 14);
-    d11s16 = vqrshrn_n_s32(q4s32, 14);
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-
-    q0s32 = vmull_s16(d22s16, d14s16);
-    q1s32 = vmull_s16(d23s16, d14s16);
-    q13s32 = vmull_s16(d24s16, d14s16);
-    q6s32 = vmull_s16(d25s16, d14s16);
-
-    q10s32 = vsubq_s32(q13s32, q0s32);
-    q4s32 = vsubq_s32(q6s32, q1s32);
-    q13s32 = vaddq_s32(q13s32, q0s32);
-    q6s32 = vaddq_s32(q6s32, q1s32);
-
-    d6s16 = vqrshrn_n_s32(q10s32, 14);
-    d7s16 = vqrshrn_n_s32(q4s32, 14);
-    d8s16 = vqrshrn_n_s32(q13s32, 14);
-    d9s16 = vqrshrn_n_s32(q6s32, 14);
-    q3s16 = vcombine_s16(d6s16, d7s16);
-    q4s16 = vcombine_s16(d8s16, d9s16);
-
-    // stage 7
+    // store the data  out 8,9,10,11,12,13,14,15
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q8s16 = vrshrq_n_s16(q8s16, 6);
+    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_s64(d12s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q9s16 = vrshrq_n_s16(q9s16, 6);
+    q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_s64(d12s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q2s16 = vrshrq_n_s16(q2s16, 6);
+    q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2s16), vreinterpret_u8_s64(d12s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q3s16 = vrshrq_n_s16(q3s16, 6);
+    q3u16 = vaddw_u8(vreinterpretq_u16_s16(q3s16), vreinterpret_u8_s64(d12s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q3u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q4s16 = vrshrq_n_s16(q4s16, 6);
+    q4u16 = vaddw_u8(vreinterpretq_u16_s16(q4s16), vreinterpret_u8_s64(d12s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q4u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q5s16 = vrshrq_n_s16(q5s16, 6);
+    q5u16 = vaddw_u8(vreinterpretq_u16_s16(q5s16), vreinterpret_u8_s64(d12s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q5u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+
+    d12s64 = vld1_s64((int64_t *)dest);
+    dest += dest_stride;
+    q14s16 = vrshrq_n_s16(q14s16, 6);
+    q14u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q14s16), vreinterpret_u8_s64(d12s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q14u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+    d += dest_stride;
+
+    d12s64 = vld1_s64((int64_t *)dest);
+    q15s16 = vrshrq_n_s16(q15s16, 6);
+    q15u16 =
+        vaddw_u8(vreinterpretq_u16_s16(q15s16), vreinterpret_u8_s64(d12s64));
+    d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q15u16));
+    vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8));
+  } else {  // skip_adding_dest
     q0s16 = vld1q_s16(pass1Output);
     pass1Output += 8;
     q1s16 = vld1q_s16(pass1Output);
@@ -1248,6 +809,7 @@ void vpx_idct16x16_10_add_neon_pass2(
     q10s16 = vld1q_s16(pass1Output);
     pass1Output += 8;
     q11s16 = vld1q_s16(pass1Output);
+    pass1Output += 8;
     q12s16 = vaddq_s16(q10s16, q9s16);
     q13s16 = vaddq_s16(q11s16, q8s16);
     d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
@@ -1265,53 +827,468 @@ void vpx_idct16x16_10_add_neon_pass2(
     q8s16 = vsubq_s16(q11s16, q8s16);
     q9s16 = vsubq_s16(q10s16, q9s16);
 
-    d4u64  = vreinterpret_u64_s16(vget_low_s16(q2s16));
-    d5u64  = vreinterpret_u64_s16(vget_high_s16(q2s16));
-    d6u64  = vreinterpret_u64_s16(vget_low_s16(q3s16));
-    d7u64  = vreinterpret_u64_s16(vget_high_s16(q3s16));
-    d8u64  = vreinterpret_u64_s16(vget_low_s16(q4s16));
-    d9u64  = vreinterpret_u64_s16(vget_high_s16(q4s16));
-    d10u64 = vreinterpret_u64_s16(vget_low_s16(q5s16));
-    d11u64 = vreinterpret_u64_s16(vget_high_s16(q5s16));
-    d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16));
-    d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16));
-    d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
-    d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
-    d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
-    d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
-    d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
-    d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
-
-    vst1_u64((uint64_t *)out, d16u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q8s16)));
     out += 4;
-    vst1_u64((uint64_t *)out, d17u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q8s16)));
     out += 12;
-    vst1_u64((uint64_t *)out, d18u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q9s16)));
     out += 4;
-    vst1_u64((uint64_t *)out, d19u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q9s16)));
     out += 12;
-    vst1_u64((uint64_t *)out, d4u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q2s16)));
     out += 4;
-    vst1_u64((uint64_t *)out, d5u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q2s16)));
     out += 12;
-    vst1_u64((uint64_t *)out, d6u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q3s16)));
     out += 4;
-    vst1_u64((uint64_t *)out, d7u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q3s16)));
     out += 12;
-    vst1_u64((uint64_t *)out, d8u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q4s16)));
     out += 4;
-    vst1_u64((uint64_t *)out, d9u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q4s16)));
     out += 12;
-    vst1_u64((uint64_t *)out, d10u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q5s16)));
     out += 4;
-    vst1_u64((uint64_t *)out, d11u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q5s16)));
     out += 12;
-    vst1_u64((uint64_t *)out, d28u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q14s16)));
     out += 4;
-    vst1_u64((uint64_t *)out, d29u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q14s16)));
     out += 12;
-    vst1_u64((uint64_t *)out, d30u64);
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q15s16)));
     out += 4;
-    vst1_u64((uint64_t *)out, d31u64);
-    return;
+    vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q15s16)));
+  }
+  return;
+}
+
+void vpx_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out,
+                                     int output_stride) {
+  int16x4_t d4s16;
+  int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
+  uint64x1_t d4u64, d5u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64;
+  uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
+  int16x8_t q0s16, q1s16, q2s16, q4s16, q5s16, q6s16, q7s16;
+  int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+  int32x4_t q6s32, q9s32;
+  int32x4_t q10s32, q11s32, q12s32, q15s32;
+  int16x8x2_t q0x2s16;
+
+  q0x2s16 = vld2q_s16(in);
+  q8s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q9s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q10s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q11s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q12s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q13s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q14s16 = q0x2s16.val[0];
+  in += 16;
+  q0x2s16 = vld2q_s16(in);
+  q15s16 = q0x2s16.val[0];
+
+  TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+               &q15s16);
+
+  // stage 3
+  q0s16 = vdupq_n_s16(cospi_28_64 * 2);
+  q1s16 = vdupq_n_s16(cospi_4_64 * 2);
+
+  q4s16 = vqrdmulhq_s16(q9s16, q0s16);
+  q7s16 = vqrdmulhq_s16(q9s16, q1s16);
+
+  // stage 4
+  q1s16 = vdupq_n_s16(cospi_16_64 * 2);
+  d4s16 = vdup_n_s16(cospi_16_64);
+
+  q8s16 = vqrdmulhq_s16(q8s16, q1s16);
+
+  d8s16 = vget_low_s16(q4s16);
+  d9s16 = vget_high_s16(q4s16);
+  d14s16 = vget_low_s16(q7s16);
+  d15s16 = vget_high_s16(q7s16);
+  q9s32 = vmull_s16(d14s16, d4s16);
+  q10s32 = vmull_s16(d15s16, d4s16);
+  q12s32 = vmull_s16(d9s16, d4s16);
+  q11s32 = vmull_s16(d8s16, d4s16);
+
+  q15s32 = vsubq_s32(q10s32, q12s32);
+  q6s32 = vsubq_s32(q9s32, q11s32);
+  q9s32 = vaddq_s32(q9s32, q11s32);
+  q10s32 = vaddq_s32(q10s32, q12s32);
+
+  d11s16 = vqrshrn_n_s32(q15s32, 14);
+  d10s16 = vqrshrn_n_s32(q6s32, 14);
+  d12s16 = vqrshrn_n_s32(q9s32, 14);
+  d13s16 = vqrshrn_n_s32(q10s32, 14);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  // stage 6
+  q2s16 = vaddq_s16(q8s16, q7s16);
+  q9s16 = vaddq_s16(q8s16, q6s16);
+  q10s16 = vaddq_s16(q8s16, q5s16);
+  q11s16 = vaddq_s16(q8s16, q4s16);
+  q12s16 = vsubq_s16(q8s16, q4s16);
+  q13s16 = vsubq_s16(q8s16, q5s16);
+  q14s16 = vsubq_s16(q8s16, q6s16);
+  q15s16 = vsubq_s16(q8s16, q7s16);
+
+  d4u64 = vreinterpret_u64_s16(vget_low_s16(q2s16));
+  d5u64 = vreinterpret_u64_s16(vget_high_s16(q2s16));
+  d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
+  d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
+  d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16));
+  d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16));
+  d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16));
+  d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16));
+  d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
+  d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
+  d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
+  d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
+  d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
+  d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
+  d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
+  d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
+
+  // store the data
+  output_stride >>= 1;  // output_stride / 2, out is int16_t
+  vst1_u64((uint64_t *)out, d4u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d5u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d18u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d19u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d20u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d21u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d22u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d23u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d24u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d25u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d26u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d27u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d28u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d29u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d30u64);
+  out += output_stride;
+  vst1_u64((uint64_t *)out, d31u64);
+  return;
+}
+
+void vpx_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out,
+                                     int16_t *pass1Output, int16_t skip_adding,
+                                     uint8_t *dest, int dest_stride) {
+  int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
+  int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
+  int16x4_t d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d30s16, d31s16;
+  uint64x1_t d4u64, d5u64, d6u64, d7u64, d8u64, d9u64, d10u64, d11u64;
+  uint64x1_t d16u64, d17u64, d18u64, d19u64;
+  uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
+  int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
+  int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+  int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32;
+  int32x4_t q10s32, q11s32, q12s32, q13s32;
+  int16x8x2_t q0x2s16;
+  (void)skip_adding;
+  (void)dest;
+  (void)dest_stride;
+
+  q0x2s16 = vld2q_s16(src);
+  q8s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q9s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q10s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q11s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q12s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q13s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q14s16 = q0x2s16.val[0];
+  src += 16;
+  q0x2s16 = vld2q_s16(src);
+  q15s16 = q0x2s16.val[0];
+
+  TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+               &q15s16);
+
+  // stage 3
+  q6s16 = vdupq_n_s16(cospi_30_64 * 2);
+  q0s16 = vqrdmulhq_s16(q8s16, q6s16);
+  q6s16 = vdupq_n_s16(cospi_2_64 * 2);
+  q7s16 = vqrdmulhq_s16(q8s16, q6s16);
+
+  q15s16 = vdupq_n_s16(-cospi_26_64 * 2);
+  q14s16 = vdupq_n_s16(cospi_6_64 * 2);
+  q3s16 = vqrdmulhq_s16(q9s16, q15s16);
+  q4s16 = vqrdmulhq_s16(q9s16, q14s16);
+
+  // stage 4
+  d0s16 = vget_low_s16(q0s16);
+  d1s16 = vget_high_s16(q0s16);
+  d6s16 = vget_low_s16(q3s16);
+  d7s16 = vget_high_s16(q3s16);
+  d8s16 = vget_low_s16(q4s16);
+  d9s16 = vget_high_s16(q4s16);
+  d14s16 = vget_low_s16(q7s16);
+  d15s16 = vget_high_s16(q7s16);
+
+  d30s16 = vdup_n_s16(cospi_8_64);
+  d31s16 = vdup_n_s16(cospi_24_64);
+
+  q12s32 = vmull_s16(d14s16, d31s16);
+  q5s32 = vmull_s16(d15s16, d31s16);
+  q2s32 = vmull_s16(d0s16, d31s16);
+  q11s32 = vmull_s16(d1s16, d31s16);
+
+  q12s32 = vmlsl_s16(q12s32, d0s16, d30s16);
+  q5s32 = vmlsl_s16(q5s32, d1s16, d30s16);
+  q2s32 = vmlal_s16(q2s32, d14s16, d30s16);
+  q11s32 = vmlal_s16(q11s32, d15s16, d30s16);
+
+  d2s16 = vqrshrn_n_s32(q12s32, 14);
+  d3s16 = vqrshrn_n_s32(q5s32, 14);
+  d12s16 = vqrshrn_n_s32(q2s32, 14);
+  d13s16 = vqrshrn_n_s32(q11s32, 14);
+  q1s16 = vcombine_s16(d2s16, d3s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  d30s16 = vdup_n_s16(-cospi_8_64);
+  q10s32 = vmull_s16(d8s16, d30s16);
+  q13s32 = vmull_s16(d9s16, d30s16);
+  q8s32 = vmull_s16(d6s16, d30s16);
+  q9s32 = vmull_s16(d7s16, d30s16);
+
+  q10s32 = vmlsl_s16(q10s32, d6s16, d31s16);
+  q13s32 = vmlsl_s16(q13s32, d7s16, d31s16);
+  q8s32 = vmlal_s16(q8s32, d8s16, d31s16);
+  q9s32 = vmlal_s16(q9s32, d9s16, d31s16);
+
+  d4s16 = vqrshrn_n_s32(q10s32, 14);
+  d5s16 = vqrshrn_n_s32(q13s32, 14);
+  d10s16 = vqrshrn_n_s32(q8s32, 14);
+  d11s16 = vqrshrn_n_s32(q9s32, 14);
+  q2s16 = vcombine_s16(d4s16, d5s16);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+
+  // stage 5
+  q8s16 = vaddq_s16(q0s16, q3s16);
+  q9s16 = vaddq_s16(q1s16, q2s16);
+  q10s16 = vsubq_s16(q1s16, q2s16);
+  q11s16 = vsubq_s16(q0s16, q3s16);
+  q12s16 = vsubq_s16(q7s16, q4s16);
+  q13s16 = vsubq_s16(q6s16, q5s16);
+  q14s16 = vaddq_s16(q6s16, q5s16);
+  q15s16 = vaddq_s16(q7s16, q4s16);
+
+  // stage 6
+  d20s16 = vget_low_s16(q10s16);
+  d21s16 = vget_high_s16(q10s16);
+  d22s16 = vget_low_s16(q11s16);
+  d23s16 = vget_high_s16(q11s16);
+  d24s16 = vget_low_s16(q12s16);
+  d25s16 = vget_high_s16(q12s16);
+  d26s16 = vget_low_s16(q13s16);
+  d27s16 = vget_high_s16(q13s16);
+
+  d14s16 = vdup_n_s16(cospi_16_64);
+  q3s32 = vmull_s16(d26s16, d14s16);
+  q4s32 = vmull_s16(d27s16, d14s16);
+  q0s32 = vmull_s16(d20s16, d14s16);
+  q1s32 = vmull_s16(d21s16, d14s16);
+
+  q5s32 = vsubq_s32(q3s32, q0s32);
+  q6s32 = vsubq_s32(q4s32, q1s32);
+  q0s32 = vaddq_s32(q3s32, q0s32);
+  q4s32 = vaddq_s32(q4s32, q1s32);
+
+  d4s16 = vqrshrn_n_s32(q5s32, 14);
+  d5s16 = vqrshrn_n_s32(q6s32, 14);
+  d10s16 = vqrshrn_n_s32(q0s32, 14);
+  d11s16 = vqrshrn_n_s32(q4s32, 14);
+  q2s16 = vcombine_s16(d4s16, d5s16);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+
+  q0s32 = vmull_s16(d22s16, d14s16);
+  q1s32 = vmull_s16(d23s16, d14s16);
+  q13s32 = vmull_s16(d24s16, d14s16);
+  q6s32 = vmull_s16(d25s16, d14s16);
+
+  q10s32 = vsubq_s32(q13s32, q0s32);
+  q4s32 = vsubq_s32(q6s32, q1s32);
+  q13s32 = vaddq_s32(q13s32, q0s32);
+  q6s32 = vaddq_s32(q6s32, q1s32);
+
+  d6s16 = vqrshrn_n_s32(q10s32, 14);
+  d7s16 = vqrshrn_n_s32(q4s32, 14);
+  d8s16 = vqrshrn_n_s32(q13s32, 14);
+  d9s16 = vqrshrn_n_s32(q6s32, 14);
+  q3s16 = vcombine_s16(d6s16, d7s16);
+  q4s16 = vcombine_s16(d8s16, d9s16);
+
+  // stage 7
+  q0s16 = vld1q_s16(pass1Output);
+  pass1Output += 8;
+  q1s16 = vld1q_s16(pass1Output);
+  pass1Output += 8;
+  q12s16 = vaddq_s16(q0s16, q15s16);
+  q13s16 = vaddq_s16(q1s16, q14s16);
+  d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
+  d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
+  d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
+  d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
+  vst1_u64((uint64_t *)out, d24u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d25u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d26u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d27u64);
+  out += 12;
+  q14s16 = vsubq_s16(q1s16, q14s16);
+  q15s16 = vsubq_s16(q0s16, q15s16);
+
+  q10s16 = vld1q_s16(pass1Output);
+  pass1Output += 8;
+  q11s16 = vld1q_s16(pass1Output);
+  pass1Output += 8;
+  q12s16 = vaddq_s16(q10s16, q5s16);
+  q13s16 = vaddq_s16(q11s16, q4s16);
+  d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
+  d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
+  d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
+  d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
+  vst1_u64((uint64_t *)out, d24u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d25u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d26u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d27u64);
+  out += 12;
+  q4s16 = vsubq_s16(q11s16, q4s16);
+  q5s16 = vsubq_s16(q10s16, q5s16);
+
+  q0s16 = vld1q_s16(pass1Output);
+  pass1Output += 8;
+  q1s16 = vld1q_s16(pass1Output);
+  pass1Output += 8;
+  q12s16 = vaddq_s16(q0s16, q3s16);
+  q13s16 = vaddq_s16(q1s16, q2s16);
+  d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
+  d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
+  d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
+  d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
+  vst1_u64((uint64_t *)out, d24u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d25u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d26u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d27u64);
+  out += 12;
+  q2s16 = vsubq_s16(q1s16, q2s16);
+  q3s16 = vsubq_s16(q0s16, q3s16);
+
+  q10s16 = vld1q_s16(pass1Output);
+  pass1Output += 8;
+  q11s16 = vld1q_s16(pass1Output);
+  q12s16 = vaddq_s16(q10s16, q9s16);
+  q13s16 = vaddq_s16(q11s16, q8s16);
+  d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
+  d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
+  d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
+  d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
+  vst1_u64((uint64_t *)out, d24u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d25u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d26u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d27u64);
+  out += 12;
+  q8s16 = vsubq_s16(q11s16, q8s16);
+  q9s16 = vsubq_s16(q10s16, q9s16);
+
+  d4u64 = vreinterpret_u64_s16(vget_low_s16(q2s16));
+  d5u64 = vreinterpret_u64_s16(vget_high_s16(q2s16));
+  d6u64 = vreinterpret_u64_s16(vget_low_s16(q3s16));
+  d7u64 = vreinterpret_u64_s16(vget_high_s16(q3s16));
+  d8u64 = vreinterpret_u64_s16(vget_low_s16(q4s16));
+  d9u64 = vreinterpret_u64_s16(vget_high_s16(q4s16));
+  d10u64 = vreinterpret_u64_s16(vget_low_s16(q5s16));
+  d11u64 = vreinterpret_u64_s16(vget_high_s16(q5s16));
+  d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16));
+  d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16));
+  d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
+  d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
+  d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
+  d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
+  d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
+  d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
+
+  vst1_u64((uint64_t *)out, d16u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d17u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d18u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d19u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d4u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d5u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d6u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d7u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d8u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d9u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d10u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d11u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d28u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d29u64);
+  out += 12;
+  vst1_u64((uint64_t *)out, d30u64);
+  out += 4;
+  vst1_u64((uint64_t *)out, d31u64);
+  return;
 }
diff --git a/vpx_dsp/arm/idct16x16_neon.c b/vpx_dsp/arm/idct16x16_neon.c
index 352979aa16f7613c4a4d6c176095921be08a61a5..ecc263df28445be1e3d94b5d85126dd6c82062a5 100644
--- a/vpx_dsp/arm/idct16x16_neon.c
+++ b/vpx_dsp/arm/idct16x16_neon.c
@@ -10,24 +10,16 @@
 
 #include "vpx_dsp/vpx_dsp_common.h"
 
-void vpx_idct16x16_256_add_neon_pass1(const int16_t *input,
-                                      int16_t *output,
+void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output,
                                       int output_stride);
-void vpx_idct16x16_256_add_neon_pass2(const int16_t *src,
-                                      int16_t *output,
-                                      int16_t *pass1Output,
-                                      int16_t skip_adding,
-                                      uint8_t *dest,
-                                      int dest_stride);
-void vpx_idct16x16_10_add_neon_pass1(const int16_t *input,
-                                     int16_t *output,
+void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output,
+                                      int16_t *pass1Output, int16_t skip_adding,
+                                      uint8_t *dest, int dest_stride);
+void vpx_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output,
                                      int output_stride);
-void vpx_idct16x16_10_add_neon_pass2(const int16_t *src,
-                                     int16_t *output,
-                                     int16_t *pass1Output,
-                                     int16_t skip_adding,
-                                     uint8_t *dest,
-                                     int dest_stride);
+void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output,
+                                     int16_t *pass1Output, int16_t skip_adding,
+                                     uint8_t *dest, int dest_stride);
 
 #if HAVE_NEON_ASM
 /* For ARM NEON, d8-d15 are callee-saved registers, and need to be saved. */
@@ -35,13 +27,13 @@ extern void vpx_push_neon(int64_t *store);
 extern void vpx_pop_neon(int64_t *store);
 #endif  // HAVE_NEON_ASM
 
-void vpx_idct16x16_256_add_neon(const int16_t *input,
-                                uint8_t *dest, int dest_stride) {
+void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest,
+                                int dest_stride) {
 #if HAVE_NEON_ASM
   int64_t store_reg[8];
 #endif
-  int16_t pass1_output[16*16] = {0};
-  int16_t row_idct_output[16*16] = {0};
+  int16_t pass1_output[16 * 16] = { 0 };
+  int16_t row_idct_output[16 * 16] = { 0 };
 
 #if HAVE_NEON_ASM
   // save d8-d15 register values.
@@ -56,27 +48,19 @@ void vpx_idct16x16_256_add_neon(const int16_t *input,
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7
   // which will be saved into row_idct_output.
-  vpx_idct16x16_256_add_neon_pass2(input+1,
-                                     row_idct_output,
-                                     pass1_output,
-                                     0,
-                                     dest,
-                                     dest_stride);
+  vpx_idct16x16_256_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
+                                   dest, dest_stride);
 
   /* Parallel idct on the lower 8 rows */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(input+8*16, pass1_output, 8);
+  vpx_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7
   // which will be saved into row_idct_output.
-  vpx_idct16x16_256_add_neon_pass2(input+8*16+1,
-                                     row_idct_output+8,
-                                     pass1_output,
-                                     0,
-                                     dest,
-                                     dest_stride);
+  vpx_idct16x16_256_add_neon_pass2(input + 8 * 16 + 1, row_idct_output + 8,
+                                   pass1_output, 0, dest, dest_stride);
 
   /* Parallel idct on the left 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
@@ -86,27 +70,20 @@ void vpx_idct16x16_256_add_neon(const int16_t *input,
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
   // Then add the result to the destination data.
-  vpx_idct16x16_256_add_neon_pass2(row_idct_output+1,
-                                     row_idct_output,
-                                     pass1_output,
-                                     1,
-                                     dest,
-                                     dest_stride);
+  vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
+                                   pass1_output, 1, dest, dest_stride);
 
   /* Parallel idct on the right 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output+8*16, pass1_output, 8);
+  vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
   // Then add the result to the destination data.
-  vpx_idct16x16_256_add_neon_pass2(row_idct_output+8*16+1,
-                                     row_idct_output+8,
-                                     pass1_output,
-                                     1,
-                                     dest+8,
-                                     dest_stride);
+  vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
+                                   row_idct_output + 8, pass1_output, 1,
+                                   dest + 8, dest_stride);
 
 #if HAVE_NEON_ASM
   // restore d8-d15 register values.
@@ -116,13 +93,13 @@ void vpx_idct16x16_256_add_neon(const int16_t *input,
   return;
 }
 
-void vpx_idct16x16_10_add_neon(const int16_t *input,
-                               uint8_t *dest, int dest_stride) {
+void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
+                               int dest_stride) {
 #if HAVE_NEON_ASM
   int64_t store_reg[8];
 #endif
-  int16_t pass1_output[16*16] = {0};
-  int16_t row_idct_output[16*16] = {0};
+  int16_t pass1_output[16 * 16] = { 0 };
+  int16_t row_idct_output[16 * 16] = { 0 };
 
 #if HAVE_NEON_ASM
   // save d8-d15 register values.
@@ -137,12 +114,8 @@ void vpx_idct16x16_10_add_neon(const int16_t *input,
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7
   // which will be saved into row_idct_output.
-  vpx_idct16x16_10_add_neon_pass2(input+1,
-                                        row_idct_output,
-                                        pass1_output,
-                                        0,
-                                        dest,
-                                        dest_stride);
+  vpx_idct16x16_10_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0,
+                                  dest, dest_stride);
 
   /* Skip Parallel idct on the lower 8 rows as they are all 0s */
 
@@ -154,27 +127,20 @@ void vpx_idct16x16_10_add_neon(const int16_t *input,
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
   // Then add the result to the destination data.
-  vpx_idct16x16_256_add_neon_pass2(row_idct_output+1,
-                                     row_idct_output,
-                                     pass1_output,
-                                     1,
-                                     dest,
-                                     dest_stride);
+  vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output,
+                                   pass1_output, 1, dest, dest_stride);
 
   /* Parallel idct on the right 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output+8*16, pass1_output, 8);
+  vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
   // Then add the result to the destination data.
-  vpx_idct16x16_256_add_neon_pass2(row_idct_output+8*16+1,
-                                     row_idct_output+8,
-                                     pass1_output,
-                                     1,
-                                     dest+8,
-                                     dest_stride);
+  vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1,
+                                   row_idct_output + 8, pass1_output, 1,
+                                   dest + 8, dest_stride);
 
 #if HAVE_NEON_ASM
   // restore d8-d15 register values.
diff --git a/vpx_dsp/arm/idct32x32_1_add_neon.c b/vpx_dsp/arm/idct32x32_1_add_neon.c
index c25c0c4a5c208b4e758793e71fa6bdd40633e078..dab7d098e8abd82f30c1b4b6d43d69f7e3826f98 100644
--- a/vpx_dsp/arm/idct32x32_1_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_1_add_neon.c
@@ -15,151 +15,126 @@
 #include "vpx_dsp/inv_txfm.h"
 #include "vpx_ports/mem.h"
 
-static INLINE void LD_16x8(
-        uint8_t *d,
-        int d_stride,
-        uint8x16_t *q8u8,
-        uint8x16_t *q9u8,
-        uint8x16_t *q10u8,
-        uint8x16_t *q11u8,
-        uint8x16_t *q12u8,
-        uint8x16_t *q13u8,
-        uint8x16_t *q14u8,
-        uint8x16_t *q15u8) {
-    *q8u8 = vld1q_u8(d);
-    d += d_stride;
-    *q9u8 = vld1q_u8(d);
-    d += d_stride;
-    *q10u8 = vld1q_u8(d);
-    d += d_stride;
-    *q11u8 = vld1q_u8(d);
-    d += d_stride;
-    *q12u8 = vld1q_u8(d);
-    d += d_stride;
-    *q13u8 = vld1q_u8(d);
-    d += d_stride;
-    *q14u8 = vld1q_u8(d);
-    d += d_stride;
-    *q15u8 = vld1q_u8(d);
-    return;
+static INLINE void LD_16x8(uint8_t *d, int d_stride, uint8x16_t *q8u8,
+                           uint8x16_t *q9u8, uint8x16_t *q10u8,
+                           uint8x16_t *q11u8, uint8x16_t *q12u8,
+                           uint8x16_t *q13u8, uint8x16_t *q14u8,
+                           uint8x16_t *q15u8) {
+  *q8u8 = vld1q_u8(d);
+  d += d_stride;
+  *q9u8 = vld1q_u8(d);
+  d += d_stride;
+  *q10u8 = vld1q_u8(d);
+  d += d_stride;
+  *q11u8 = vld1q_u8(d);
+  d += d_stride;
+  *q12u8 = vld1q_u8(d);
+  d += d_stride;
+  *q13u8 = vld1q_u8(d);
+  d += d_stride;
+  *q14u8 = vld1q_u8(d);
+  d += d_stride;
+  *q15u8 = vld1q_u8(d);
+  return;
 }
 
-static INLINE void ADD_DIFF_16x8(
-        uint8x16_t qdiffu8,
-        uint8x16_t *q8u8,
-        uint8x16_t *q9u8,
-        uint8x16_t *q10u8,
-        uint8x16_t *q11u8,
-        uint8x16_t *q12u8,
-        uint8x16_t *q13u8,
-        uint8x16_t *q14u8,
-        uint8x16_t *q15u8) {
-    *q8u8 = vqaddq_u8(*q8u8, qdiffu8);
-    *q9u8 = vqaddq_u8(*q9u8, qdiffu8);
-    *q10u8 = vqaddq_u8(*q10u8, qdiffu8);
-    *q11u8 = vqaddq_u8(*q11u8, qdiffu8);
-    *q12u8 = vqaddq_u8(*q12u8, qdiffu8);
-    *q13u8 = vqaddq_u8(*q13u8, qdiffu8);
-    *q14u8 = vqaddq_u8(*q14u8, qdiffu8);
-    *q15u8 = vqaddq_u8(*q15u8, qdiffu8);
-    return;
+static INLINE void ADD_DIFF_16x8(uint8x16_t qdiffu8, uint8x16_t *q8u8,
+                                 uint8x16_t *q9u8, uint8x16_t *q10u8,
+                                 uint8x16_t *q11u8, uint8x16_t *q12u8,
+                                 uint8x16_t *q13u8, uint8x16_t *q14u8,
+                                 uint8x16_t *q15u8) {
+  *q8u8 = vqaddq_u8(*q8u8, qdiffu8);
+  *q9u8 = vqaddq_u8(*q9u8, qdiffu8);
+  *q10u8 = vqaddq_u8(*q10u8, qdiffu8);
+  *q11u8 = vqaddq_u8(*q11u8, qdiffu8);
+  *q12u8 = vqaddq_u8(*q12u8, qdiffu8);
+  *q13u8 = vqaddq_u8(*q13u8, qdiffu8);
+  *q14u8 = vqaddq_u8(*q14u8, qdiffu8);
+  *q15u8 = vqaddq_u8(*q15u8, qdiffu8);
+  return;
 }
 
-static INLINE void SUB_DIFF_16x8(
-        uint8x16_t qdiffu8,
-        uint8x16_t *q8u8,
-        uint8x16_t *q9u8,
-        uint8x16_t *q10u8,
-        uint8x16_t *q11u8,
-        uint8x16_t *q12u8,
-        uint8x16_t *q13u8,
-        uint8x16_t *q14u8,
-        uint8x16_t *q15u8) {
-    *q8u8 = vqsubq_u8(*q8u8, qdiffu8);
-    *q9u8 = vqsubq_u8(*q9u8, qdiffu8);
-    *q10u8 = vqsubq_u8(*q10u8, qdiffu8);
-    *q11u8 = vqsubq_u8(*q11u8, qdiffu8);
-    *q12u8 = vqsubq_u8(*q12u8, qdiffu8);
-    *q13u8 = vqsubq_u8(*q13u8, qdiffu8);
-    *q14u8 = vqsubq_u8(*q14u8, qdiffu8);
-    *q15u8 = vqsubq_u8(*q15u8, qdiffu8);
-    return;
+static INLINE void SUB_DIFF_16x8(uint8x16_t qdiffu8, uint8x16_t *q8u8,
+                                 uint8x16_t *q9u8, uint8x16_t *q10u8,
+                                 uint8x16_t *q11u8, uint8x16_t *q12u8,
+                                 uint8x16_t *q13u8, uint8x16_t *q14u8,
+                                 uint8x16_t *q15u8) {
+  *q8u8 = vqsubq_u8(*q8u8, qdiffu8);
+  *q9u8 = vqsubq_u8(*q9u8, qdiffu8);
+  *q10u8 = vqsubq_u8(*q10u8, qdiffu8);
+  *q11u8 = vqsubq_u8(*q11u8, qdiffu8);
+  *q12u8 = vqsubq_u8(*q12u8, qdiffu8);
+  *q13u8 = vqsubq_u8(*q13u8, qdiffu8);
+  *q14u8 = vqsubq_u8(*q14u8, qdiffu8);
+  *q15u8 = vqsubq_u8(*q15u8, qdiffu8);
+  return;
 }
 
-static INLINE void ST_16x8(
-        uint8_t *d,
-        int d_stride,
-        uint8x16_t *q8u8,
-        uint8x16_t *q9u8,
-        uint8x16_t *q10u8,
-        uint8x16_t *q11u8,
-        uint8x16_t *q12u8,
-        uint8x16_t *q13u8,
-        uint8x16_t *q14u8,
-        uint8x16_t *q15u8) {
-    vst1q_u8(d, *q8u8);
-    d += d_stride;
-    vst1q_u8(d, *q9u8);
-    d += d_stride;
-    vst1q_u8(d, *q10u8);
-    d += d_stride;
-    vst1q_u8(d, *q11u8);
-    d += d_stride;
-    vst1q_u8(d, *q12u8);
-    d += d_stride;
-    vst1q_u8(d, *q13u8);
-    d += d_stride;
-    vst1q_u8(d, *q14u8);
-    d += d_stride;
-    vst1q_u8(d, *q15u8);
-    return;
+static INLINE void ST_16x8(uint8_t *d, int d_stride, uint8x16_t *q8u8,
+                           uint8x16_t *q9u8, uint8x16_t *q10u8,
+                           uint8x16_t *q11u8, uint8x16_t *q12u8,
+                           uint8x16_t *q13u8, uint8x16_t *q14u8,
+                           uint8x16_t *q15u8) {
+  vst1q_u8(d, *q8u8);
+  d += d_stride;
+  vst1q_u8(d, *q9u8);
+  d += d_stride;
+  vst1q_u8(d, *q10u8);
+  d += d_stride;
+  vst1q_u8(d, *q11u8);
+  d += d_stride;
+  vst1q_u8(d, *q12u8);
+  d += d_stride;
+  vst1q_u8(d, *q13u8);
+  d += d_stride;
+  vst1q_u8(d, *q14u8);
+  d += d_stride;
+  vst1q_u8(d, *q15u8);
+  return;
 }
 
-void vpx_idct32x32_1_add_neon(
-        int16_t *input,
-        uint8_t *dest,
-        int dest_stride) {
-    uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8;
-    int i, j, dest_stride8;
-    uint8_t *d;
-    int16_t a1, cospi_16_64 = 11585;
-    int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+void vpx_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+  uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8;
+  int i, j, dest_stride8;
+  uint8_t *d;
+  int16_t a1, cospi_16_64 = 11585;
+  int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
 
-    out = dct_const_round_shift(out * cospi_16_64);
-    a1 = ROUND_POWER_OF_TWO(out, 6);
+  out = dct_const_round_shift(out * cospi_16_64);
+  a1 = ROUND_POWER_OF_TWO(out, 6);
 
-    dest_stride8 = dest_stride * 8;
-    if (a1 >= 0) {  // diff_positive_32_32
-        a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1;
-        q0u8 = vdupq_n_u8(a1);
-        for (i = 0; i < 2; i++, dest += 16) {  // diff_positive_32_32_loop
-            d = dest;
-            for (j = 0; j < 4; j++) {
-                LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
-                                        &q12u8, &q13u8, &q14u8, &q15u8);
-                ADD_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8,
-                                    &q12u8, &q13u8, &q14u8, &q15u8);
-                ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
-                                        &q12u8, &q13u8, &q14u8, &q15u8);
-                d += dest_stride8;
-            }
-        }
-    } else {  // diff_negative_32_32
-        a1 = -a1;
-        a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1;
-        q0u8 = vdupq_n_u8(a1);
-        for (i = 0; i < 2; i++, dest += 16) {  // diff_negative_32_32_loop
-            d = dest;
-            for (j = 0; j < 4; j++) {
-                LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
-                                        &q12u8, &q13u8, &q14u8, &q15u8);
-                SUB_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8,
-                                    &q12u8, &q13u8, &q14u8, &q15u8);
-                ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
-                                        &q12u8, &q13u8, &q14u8, &q15u8);
-                d += dest_stride8;
-            }
-        }
+  dest_stride8 = dest_stride * 8;
+  if (a1 >= 0) {  // diff_positive_32_32
+    a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1;
+    q0u8 = vdupq_n_u8(a1);
+    for (i = 0; i < 2; i++, dest += 16) {  // diff_positive_32_32_loop
+      d = dest;
+      for (j = 0; j < 4; j++) {
+        LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8,
+                &q14u8, &q15u8);
+        ADD_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8,
+                      &q14u8, &q15u8);
+        ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8,
+                &q14u8, &q15u8);
+        d += dest_stride8;
+      }
     }
-    return;
+  } else {  // diff_negative_32_32
+    a1 = -a1;
+    a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1;
+    q0u8 = vdupq_n_u8(a1);
+    for (i = 0; i < 2; i++, dest += 16) {  // diff_negative_32_32_loop
+      d = dest;
+      for (j = 0; j < 4; j++) {
+        LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8,
+                &q14u8, &q15u8);
+        SUB_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8,
+                      &q14u8, &q15u8);
+        ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8,
+                &q14u8, &q15u8);
+        d += dest_stride8;
+      }
+    }
+  }
+  return;
 }
diff --git a/vpx_dsp/arm/idct32x32_add_neon.c b/vpx_dsp/arm/idct32x32_add_neon.c
index 025437eb963bc044a116e46f05be4d648a36064c..88b3d0109afdadf5f725b955ae0883cc673988f8 100644
--- a/vpx_dsp/arm/idct32x32_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_add_neon.c
@@ -14,706 +14,672 @@
 #include "vpx_dsp/txfm_common.h"
 
 #define LOAD_FROM_TRANSPOSED(prev, first, second) \
-    q14s16 = vld1q_s16(trans_buf + first * 8); \
-    q13s16 = vld1q_s16(trans_buf + second * 8);
+  q14s16 = vld1q_s16(trans_buf + first * 8);      \
+  q13s16 = vld1q_s16(trans_buf + second * 8);
 
 #define LOAD_FROM_OUTPUT(prev, first, second, qA, qB) \
-    qA = vld1q_s16(out + first * 32); \
-    qB = vld1q_s16(out + second * 32);
+  qA = vld1q_s16(out + first * 32);                   \
+  qB = vld1q_s16(out + second * 32);
 
 #define STORE_IN_OUTPUT(prev, first, second, qA, qB) \
-    vst1q_s16(out + first * 32, qA); \
-    vst1q_s16(out + second * 32, qB);
-
-#define  STORE_COMBINE_CENTER_RESULTS(r10, r9) \
-       __STORE_COMBINE_CENTER_RESULTS(r10, r9, stride, \
-                                      q6s16, q7s16, q8s16, q9s16);
-static INLINE void __STORE_COMBINE_CENTER_RESULTS(
-        uint8_t *p1,
-        uint8_t *p2,
-        int stride,
-        int16x8_t q6s16,
-        int16x8_t q7s16,
-        int16x8_t q8s16,
-        int16x8_t q9s16) {
-    int16x4_t d8s16, d9s16, d10s16, d11s16;
-
-    d8s16 = vld1_s16((int16_t *)p1);
-    p1 += stride;
-    d11s16 = vld1_s16((int16_t *)p2);
-    p2 -= stride;
-    d9s16 = vld1_s16((int16_t *)p1);
-    d10s16 = vld1_s16((int16_t *)p2);
-
-    q7s16 = vrshrq_n_s16(q7s16, 6);
-    q8s16 = vrshrq_n_s16(q8s16, 6);
-    q9s16 = vrshrq_n_s16(q9s16, 6);
-    q6s16 = vrshrq_n_s16(q6s16, 6);
-
-    q7s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q7s16),
-                                           vreinterpret_u8_s16(d9s16)));
-    q8s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q8s16),
-                                           vreinterpret_u8_s16(d10s16)));
-    q9s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q9s16),
-                                           vreinterpret_u8_s16(d11s16)));
-    q6s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q6s16),
-                                           vreinterpret_u8_s16(d8s16)));
-
-    d9s16  = vreinterpret_s16_u8(vqmovun_s16(q7s16));
-    d10s16 = vreinterpret_s16_u8(vqmovun_s16(q8s16));
-    d11s16 = vreinterpret_s16_u8(vqmovun_s16(q9s16));
-    d8s16  = vreinterpret_s16_u8(vqmovun_s16(q6s16));
-
-    vst1_s16((int16_t *)p1, d9s16);
-    p1 -= stride;
-    vst1_s16((int16_t *)p2, d10s16);
-    p2 += stride;
-    vst1_s16((int16_t *)p1, d8s16);
-    vst1_s16((int16_t *)p2, d11s16);
-    return;
+  vst1q_s16(out + first * 32, qA);                   \
+  vst1q_s16(out + second * 32, qB);
+
+#define STORE_COMBINE_CENTER_RESULTS(r10, r9) \
+  __STORE_COMBINE_CENTER_RESULTS(r10, r9, stride, q6s16, q7s16, q8s16, q9s16);
+static INLINE void __STORE_COMBINE_CENTER_RESULTS(uint8_t *p1, uint8_t *p2,
+                                                  int stride, int16x8_t q6s16,
+                                                  int16x8_t q7s16,
+                                                  int16x8_t q8s16,
+                                                  int16x8_t q9s16) {
+  int16x4_t d8s16, d9s16, d10s16, d11s16;
+
+  d8s16 = vld1_s16((int16_t *)p1);
+  p1 += stride;
+  d11s16 = vld1_s16((int16_t *)p2);
+  p2 -= stride;
+  d9s16 = vld1_s16((int16_t *)p1);
+  d10s16 = vld1_s16((int16_t *)p2);
+
+  q7s16 = vrshrq_n_s16(q7s16, 6);
+  q8s16 = vrshrq_n_s16(q8s16, 6);
+  q9s16 = vrshrq_n_s16(q9s16, 6);
+  q6s16 = vrshrq_n_s16(q6s16, 6);
+
+  q7s16 = vreinterpretq_s16_u16(
+      vaddw_u8(vreinterpretq_u16_s16(q7s16), vreinterpret_u8_s16(d9s16)));
+  q8s16 = vreinterpretq_s16_u16(
+      vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_s16(d10s16)));
+  q9s16 = vreinterpretq_s16_u16(
+      vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_s16(d11s16)));
+  q6s16 = vreinterpretq_s16_u16(
+      vaddw_u8(vreinterpretq_u16_s16(q6s16), vreinterpret_u8_s16(d8s16)));
+
+  d9s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16));
+  d10s16 = vreinterpret_s16_u8(vqmovun_s16(q8s16));
+  d11s16 = vreinterpret_s16_u8(vqmovun_s16(q9s16));
+  d8s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16));
+
+  vst1_s16((int16_t *)p1, d9s16);
+  p1 -= stride;
+  vst1_s16((int16_t *)p2, d10s16);
+  p2 += stride;
+  vst1_s16((int16_t *)p1, d8s16);
+  vst1_s16((int16_t *)p2, d11s16);
+  return;
 }
 
-#define  STORE_COMBINE_EXTREME_RESULTS(r7, r6); \
-       __STORE_COMBINE_EXTREME_RESULTS(r7, r6, stride, \
-                                      q4s16, q5s16, q6s16, q7s16);
-static INLINE void __STORE_COMBINE_EXTREME_RESULTS(
-        uint8_t *p1,
-        uint8_t *p2,
-        int stride,
-        int16x8_t q4s16,
-        int16x8_t q5s16,
-        int16x8_t q6s16,
-        int16x8_t q7s16) {
-    int16x4_t d4s16, d5s16, d6s16, d7s16;
-
-    d4s16 = vld1_s16((int16_t *)p1);
-    p1 += stride;
-    d7s16 = vld1_s16((int16_t *)p2);
-    p2 -= stride;
-    d5s16 = vld1_s16((int16_t *)p1);
-    d6s16 = vld1_s16((int16_t *)p2);
-
-    q5s16 = vrshrq_n_s16(q5s16, 6);
-    q6s16 = vrshrq_n_s16(q6s16, 6);
-    q7s16 = vrshrq_n_s16(q7s16, 6);
-    q4s16 = vrshrq_n_s16(q4s16, 6);
-
-    q5s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q5s16),
-                                           vreinterpret_u8_s16(d5s16)));
-    q6s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q6s16),
-                                           vreinterpret_u8_s16(d6s16)));
-    q7s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q7s16),
-                                           vreinterpret_u8_s16(d7s16)));
-    q4s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q4s16),
-                                           vreinterpret_u8_s16(d4s16)));
-
-    d5s16 = vreinterpret_s16_u8(vqmovun_s16(q5s16));
-    d6s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16));
-    d7s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16));
-    d4s16 = vreinterpret_s16_u8(vqmovun_s16(q4s16));
-
-    vst1_s16((int16_t *)p1, d5s16);
-    p1 -= stride;
-    vst1_s16((int16_t *)p2, d6s16);
-    p2 += stride;
-    vst1_s16((int16_t *)p2, d7s16);
-    vst1_s16((int16_t *)p1, d4s16);
-    return;
+#define STORE_COMBINE_EXTREME_RESULTS(r7, r6) \
+  ;                                           \
+  __STORE_COMBINE_EXTREME_RESULTS(r7, r6, stride, q4s16, q5s16, q6s16, q7s16);
+static INLINE void __STORE_COMBINE_EXTREME_RESULTS(uint8_t *p1, uint8_t *p2,
+                                                   int stride, int16x8_t q4s16,
+                                                   int16x8_t q5s16,
+                                                   int16x8_t q6s16,
+                                                   int16x8_t q7s16) {
+  int16x4_t d4s16, d5s16, d6s16, d7s16;
+
+  d4s16 = vld1_s16((int16_t *)p1);
+  p1 += stride;
+  d7s16 = vld1_s16((int16_t *)p2);
+  p2 -= stride;
+  d5s16 = vld1_s16((int16_t *)p1);
+  d6s16 = vld1_s16((int16_t *)p2);
+
+  q5s16 = vrshrq_n_s16(q5s16, 6);
+  q6s16 = vrshrq_n_s16(q6s16, 6);
+  q7s16 = vrshrq_n_s16(q7s16, 6);
+  q4s16 = vrshrq_n_s16(q4s16, 6);
+
+  q5s16 = vreinterpretq_s16_u16(
+      vaddw_u8(vreinterpretq_u16_s16(q5s16), vreinterpret_u8_s16(d5s16)));
+  q6s16 = vreinterpretq_s16_u16(
+      vaddw_u8(vreinterpretq_u16_s16(q6s16), vreinterpret_u8_s16(d6s16)));
+  q7s16 = vreinterpretq_s16_u16(
+      vaddw_u8(vreinterpretq_u16_s16(q7s16), vreinterpret_u8_s16(d7s16)));
+  q4s16 = vreinterpretq_s16_u16(
+      vaddw_u8(vreinterpretq_u16_s16(q4s16), vreinterpret_u8_s16(d4s16)));
+
+  d5s16 = vreinterpret_s16_u8(vqmovun_s16(q5s16));
+  d6s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16));
+  d7s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16));
+  d4s16 = vreinterpret_s16_u8(vqmovun_s16(q4s16));
+
+  vst1_s16((int16_t *)p1, d5s16);
+  p1 -= stride;
+  vst1_s16((int16_t *)p2, d6s16);
+  p2 += stride;
+  vst1_s16((int16_t *)p2, d7s16);
+  vst1_s16((int16_t *)p1, d4s16);
+  return;
 }
 
 #define DO_BUTTERFLY_STD(const_1, const_2, qA, qB) \
-        DO_BUTTERFLY(q14s16, q13s16, const_1, const_2, qA, qB);
-static INLINE void DO_BUTTERFLY(
-        int16x8_t q14s16,
-        int16x8_t q13s16,
-        int16_t first_const,
-        int16_t second_const,
-        int16x8_t *qAs16,
-        int16x8_t *qBs16) {
-    int16x4_t d30s16, d31s16;
-    int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q15s32;
-    int16x4_t dCs16, dDs16, dAs16, dBs16;
-
-    dCs16 = vget_low_s16(q14s16);
-    dDs16 = vget_high_s16(q14s16);
-    dAs16 = vget_low_s16(q13s16);
-    dBs16 = vget_high_s16(q13s16);
-
-    d30s16 = vdup_n_s16(first_const);
-    d31s16 = vdup_n_s16(second_const);
-
-    q8s32 = vmull_s16(dCs16, d30s16);
-    q10s32 = vmull_s16(dAs16, d31s16);
-    q9s32 = vmull_s16(dDs16, d30s16);
-    q11s32 = vmull_s16(dBs16, d31s16);
-    q12s32 = vmull_s16(dCs16, d31s16);
-
-    q8s32 = vsubq_s32(q8s32, q10s32);
-    q9s32 = vsubq_s32(q9s32, q11s32);
-
-    q10s32 = vmull_s16(dDs16, d31s16);
-    q11s32 = vmull_s16(dAs16, d30s16);
-    q15s32 = vmull_s16(dBs16, d30s16);
-
-    q11s32 = vaddq_s32(q12s32, q11s32);
-    q10s32 = vaddq_s32(q10s32, q15s32);
-
-    *qAs16 = vcombine_s16(vqrshrn_n_s32(q8s32, 14),
-                          vqrshrn_n_s32(q9s32, 14));
-    *qBs16 = vcombine_s16(vqrshrn_n_s32(q11s32, 14),
-                          vqrshrn_n_s32(q10s32, 14));
-    return;
+  DO_BUTTERFLY(q14s16, q13s16, const_1, const_2, qA, qB);
+static INLINE void DO_BUTTERFLY(int16x8_t q14s16, int16x8_t q13s16,
+                                int16_t first_const, int16_t second_const,
+                                int16x8_t *qAs16, int16x8_t *qBs16) {
+  int16x4_t d30s16, d31s16;
+  int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q15s32;
+  int16x4_t dCs16, dDs16, dAs16, dBs16;
+
+  dCs16 = vget_low_s16(q14s16);
+  dDs16 = vget_high_s16(q14s16);
+  dAs16 = vget_low_s16(q13s16);
+  dBs16 = vget_high_s16(q13s16);
+
+  d30s16 = vdup_n_s16(first_const);
+  d31s16 = vdup_n_s16(second_const);
+
+  q8s32 = vmull_s16(dCs16, d30s16);
+  q10s32 = vmull_s16(dAs16, d31s16);
+  q9s32 = vmull_s16(dDs16, d30s16);
+  q11s32 = vmull_s16(dBs16, d31s16);
+  q12s32 = vmull_s16(dCs16, d31s16);
+
+  q8s32 = vsubq_s32(q8s32, q10s32);
+  q9s32 = vsubq_s32(q9s32, q11s32);
+
+  q10s32 = vmull_s16(dDs16, d31s16);
+  q11s32 = vmull_s16(dAs16, d30s16);
+  q15s32 = vmull_s16(dBs16, d30s16);
+
+  q11s32 = vaddq_s32(q12s32, q11s32);
+  q10s32 = vaddq_s32(q10s32, q15s32);
+
+  *qAs16 = vcombine_s16(vqrshrn_n_s32(q8s32, 14), vqrshrn_n_s32(q9s32, 14));
+  *qBs16 = vcombine_s16(vqrshrn_n_s32(q11s32, 14), vqrshrn_n_s32(q10s32, 14));
+  return;
 }
 
-static INLINE void idct32_transpose_pair(
-        int16_t *input,
-        int16_t *t_buf) {
-    int16_t *in;
-    int i;
-    const int stride = 32;
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32;
-    int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16;
-
-    for (i = 0; i < 4; i++, input += 8) {
-        in = input;
-        q8s16 = vld1q_s16(in);
-        in += stride;
-        q9s16 = vld1q_s16(in);
-        in += stride;
-        q10s16 = vld1q_s16(in);
-        in += stride;
-        q11s16 = vld1q_s16(in);
-        in += stride;
-        q12s16 = vld1q_s16(in);
-        in += stride;
-        q13s16 = vld1q_s16(in);
-        in += stride;
-        q14s16 = vld1q_s16(in);
-        in += stride;
-        q15s16 = vld1q_s16(in);
-
-        d16s16 = vget_low_s16(q8s16);
-        d17s16 = vget_high_s16(q8s16);
-        d18s16 = vget_low_s16(q9s16);
-        d19s16 = vget_high_s16(q9s16);
-        d20s16 = vget_low_s16(q10s16);
-        d21s16 = vget_high_s16(q10s16);
-        d22s16 = vget_low_s16(q11s16);
-        d23s16 = vget_high_s16(q11s16);
-        d24s16 = vget_low_s16(q12s16);
-        d25s16 = vget_high_s16(q12s16);
-        d26s16 = vget_low_s16(q13s16);
-        d27s16 = vget_high_s16(q13s16);
-        d28s16 = vget_low_s16(q14s16);
-        d29s16 = vget_high_s16(q14s16);
-        d30s16 = vget_low_s16(q15s16);
-        d31s16 = vget_high_s16(q15s16);
-
-        q8s16  = vcombine_s16(d16s16, d24s16);  // vswp d17, d24
-        q9s16  = vcombine_s16(d18s16, d26s16);  // vswp d19, d26
-        q10s16 = vcombine_s16(d20s16, d28s16);  // vswp d21, d28
-        q11s16 = vcombine_s16(d22s16, d30s16);  // vswp d23, d30
-        q12s16 = vcombine_s16(d17s16, d25s16);
-        q13s16 = vcombine_s16(d19s16, d27s16);
-        q14s16 = vcombine_s16(d21s16, d29s16);
-        q15s16 = vcombine_s16(d23s16, d31s16);
-
-        q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16),
-                            vreinterpretq_s32_s16(q10s16));
-        q1x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q9s16),
-                            vreinterpretq_s32_s16(q11s16));
-        q2x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q12s16),
-                            vreinterpretq_s32_s16(q14s16));
-        q3x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q13s16),
-                            vreinterpretq_s32_s16(q15s16));
-
-        q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]),   // q8
-                            vreinterpretq_s16_s32(q1x2s32.val[0]));  // q9
-        q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]),   // q10
-                            vreinterpretq_s16_s32(q1x2s32.val[1]));  // q11
-        q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]),   // q12
-                            vreinterpretq_s16_s32(q3x2s32.val[0]));  // q13
-        q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]),   // q14
-                            vreinterpretq_s16_s32(q3x2s32.val[1]));  // q15
-
-        vst1q_s16(t_buf, q0x2s16.val[0]);
-        t_buf += 8;
-        vst1q_s16(t_buf, q0x2s16.val[1]);
-        t_buf += 8;
-        vst1q_s16(t_buf, q1x2s16.val[0]);
-        t_buf += 8;
-        vst1q_s16(t_buf, q1x2s16.val[1]);
-        t_buf += 8;
-        vst1q_s16(t_buf, q2x2s16.val[0]);
-        t_buf += 8;
-        vst1q_s16(t_buf, q2x2s16.val[1]);
-        t_buf += 8;
-        vst1q_s16(t_buf, q3x2s16.val[0]);
-        t_buf += 8;
-        vst1q_s16(t_buf, q3x2s16.val[1]);
-        t_buf += 8;
-    }
-    return;
+static INLINE void idct32_transpose_pair(int16_t *input, int16_t *t_buf) {
+  int16_t *in;
+  int i;
+  const int stride = 32;
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
+  int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+  int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32;
+  int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16;
+
+  for (i = 0; i < 4; i++, input += 8) {
+    in = input;
+    q8s16 = vld1q_s16(in);
+    in += stride;
+    q9s16 = vld1q_s16(in);
+    in += stride;
+    q10s16 = vld1q_s16(in);
+    in += stride;
+    q11s16 = vld1q_s16(in);
+    in += stride;
+    q12s16 = vld1q_s16(in);
+    in += stride;
+    q13s16 = vld1q_s16(in);
+    in += stride;
+    q14s16 = vld1q_s16(in);
+    in += stride;
+    q15s16 = vld1q_s16(in);
+
+    d16s16 = vget_low_s16(q8s16);
+    d17s16 = vget_high_s16(q8s16);
+    d18s16 = vget_low_s16(q9s16);
+    d19s16 = vget_high_s16(q9s16);
+    d20s16 = vget_low_s16(q10s16);
+    d21s16 = vget_high_s16(q10s16);
+    d22s16 = vget_low_s16(q11s16);
+    d23s16 = vget_high_s16(q11s16);
+    d24s16 = vget_low_s16(q12s16);
+    d25s16 = vget_high_s16(q12s16);
+    d26s16 = vget_low_s16(q13s16);
+    d27s16 = vget_high_s16(q13s16);
+    d28s16 = vget_low_s16(q14s16);
+    d29s16 = vget_high_s16(q14s16);
+    d30s16 = vget_low_s16(q15s16);
+    d31s16 = vget_high_s16(q15s16);
+
+    q8s16 = vcombine_s16(d16s16, d24s16);   // vswp d17, d24
+    q9s16 = vcombine_s16(d18s16, d26s16);   // vswp d19, d26
+    q10s16 = vcombine_s16(d20s16, d28s16);  // vswp d21, d28
+    q11s16 = vcombine_s16(d22s16, d30s16);  // vswp d23, d30
+    q12s16 = vcombine_s16(d17s16, d25s16);
+    q13s16 = vcombine_s16(d19s16, d27s16);
+    q14s16 = vcombine_s16(d21s16, d29s16);
+    q15s16 = vcombine_s16(d23s16, d31s16);
+
+    q0x2s32 =
+        vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q10s16));
+    q1x2s32 =
+        vtrnq_s32(vreinterpretq_s32_s16(q9s16), vreinterpretq_s32_s16(q11s16));
+    q2x2s32 =
+        vtrnq_s32(vreinterpretq_s32_s16(q12s16), vreinterpretq_s32_s16(q14s16));
+    q3x2s32 =
+        vtrnq_s32(vreinterpretq_s32_s16(q13s16), vreinterpretq_s32_s16(q15s16));
+
+    q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]),   // q8
+                        vreinterpretq_s16_s32(q1x2s32.val[0]));  // q9
+    q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]),   // q10
+                        vreinterpretq_s16_s32(q1x2s32.val[1]));  // q11
+    q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]),   // q12
+                        vreinterpretq_s16_s32(q3x2s32.val[0]));  // q13
+    q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]),   // q14
+                        vreinterpretq_s16_s32(q3x2s32.val[1]));  // q15
+
+    vst1q_s16(t_buf, q0x2s16.val[0]);
+    t_buf += 8;
+    vst1q_s16(t_buf, q0x2s16.val[1]);
+    t_buf += 8;
+    vst1q_s16(t_buf, q1x2s16.val[0]);
+    t_buf += 8;
+    vst1q_s16(t_buf, q1x2s16.val[1]);
+    t_buf += 8;
+    vst1q_s16(t_buf, q2x2s16.val[0]);
+    t_buf += 8;
+    vst1q_s16(t_buf, q2x2s16.val[1]);
+    t_buf += 8;
+    vst1q_s16(t_buf, q3x2s16.val[0]);
+    t_buf += 8;
+    vst1q_s16(t_buf, q3x2s16.val[1]);
+    t_buf += 8;
+  }
+  return;
 }
 
-static INLINE void idct32_bands_end_1st_pass(
-        int16_t *out,
-        int16x8_t q2s16,
-        int16x8_t q3s16,
-        int16x8_t q6s16,
-        int16x8_t q7s16,
-        int16x8_t q8s16,
-        int16x8_t q9s16,
-        int16x8_t q10s16,
-        int16x8_t q11s16,
-        int16x8_t q12s16,
-        int16x8_t q13s16,
-        int16x8_t q14s16,
-        int16x8_t q15s16) {
-    int16x8_t q0s16, q1s16, q4s16, q5s16;
-
-    STORE_IN_OUTPUT(17, 16, 17, q6s16, q7s16);
-    STORE_IN_OUTPUT(17, 14, 15, q8s16, q9s16);
-
-    LOAD_FROM_OUTPUT(15, 30, 31, q0s16, q1s16);
-    q4s16 = vaddq_s16(q2s16, q1s16);
-    q5s16 = vaddq_s16(q3s16, q0s16);
-    q6s16 = vsubq_s16(q3s16, q0s16);
-    q7s16 = vsubq_s16(q2s16, q1s16);
-    STORE_IN_OUTPUT(31, 30, 31, q6s16, q7s16);
-    STORE_IN_OUTPUT(31, 0, 1, q4s16, q5s16);
-
-    LOAD_FROM_OUTPUT(1, 12, 13, q0s16, q1s16);
-    q2s16 = vaddq_s16(q10s16, q1s16);
-    q3s16 = vaddq_s16(q11s16, q0s16);
-    q4s16 = vsubq_s16(q11s16, q0s16);
-    q5s16 = vsubq_s16(q10s16, q1s16);
-
-    LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16);
-    q8s16 = vaddq_s16(q4s16, q1s16);
-    q9s16 = vaddq_s16(q5s16, q0s16);
-    q6s16 = vsubq_s16(q5s16, q0s16);
-    q7s16 = vsubq_s16(q4s16, q1s16);
-    STORE_IN_OUTPUT(19, 18, 19, q6s16, q7s16);
-    STORE_IN_OUTPUT(19, 12, 13, q8s16, q9s16);
-
-    LOAD_FROM_OUTPUT(13, 28, 29, q0s16, q1s16);
-    q4s16 = vaddq_s16(q2s16, q1s16);
-    q5s16 = vaddq_s16(q3s16, q0s16);
-    q6s16 = vsubq_s16(q3s16, q0s16);
-    q7s16 = vsubq_s16(q2s16, q1s16);
-    STORE_IN_OUTPUT(29, 28, 29, q6s16, q7s16);
-    STORE_IN_OUTPUT(29, 2, 3, q4s16, q5s16);
-
-    LOAD_FROM_OUTPUT(3, 10, 11, q0s16, q1s16);
-    q2s16 = vaddq_s16(q12s16, q1s16);
-    q3s16 = vaddq_s16(q13s16, q0s16);
-    q4s16 = vsubq_s16(q13s16, q0s16);
-    q5s16 = vsubq_s16(q12s16, q1s16);
-
-    LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16);
-    q8s16 = vaddq_s16(q4s16, q1s16);
-    q9s16 = vaddq_s16(q5s16, q0s16);
-    q6s16 = vsubq_s16(q5s16, q0s16);
-    q7s16 = vsubq_s16(q4s16, q1s16);
-    STORE_IN_OUTPUT(21, 20, 21, q6s16, q7s16);
-    STORE_IN_OUTPUT(21, 10, 11, q8s16, q9s16);
-
-    LOAD_FROM_OUTPUT(11, 26, 27, q0s16, q1s16);
-    q4s16 = vaddq_s16(q2s16, q1s16);
-    q5s16 = vaddq_s16(q3s16, q0s16);
-    q6s16 = vsubq_s16(q3s16, q0s16);
-    q7s16 = vsubq_s16(q2s16, q1s16);
-    STORE_IN_OUTPUT(27, 26, 27, q6s16, q7s16);
-    STORE_IN_OUTPUT(27, 4, 5, q4s16, q5s16);
-
-    LOAD_FROM_OUTPUT(5, 8, 9, q0s16, q1s16);
-    q2s16 = vaddq_s16(q14s16, q1s16);
-    q3s16 = vaddq_s16(q15s16, q0s16);
-    q4s16 = vsubq_s16(q15s16, q0s16);
-    q5s16 = vsubq_s16(q14s16, q1s16);
-
-    LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16);
-    q8s16 = vaddq_s16(q4s16, q1s16);
-    q9s16 = vaddq_s16(q5s16, q0s16);
-    q6s16 = vsubq_s16(q5s16, q0s16);
-    q7s16 = vsubq_s16(q4s16, q1s16);
-    STORE_IN_OUTPUT(23, 22, 23, q6s16, q7s16);
-    STORE_IN_OUTPUT(23, 8, 9, q8s16, q9s16);
-
-    LOAD_FROM_OUTPUT(9, 24, 25, q0s16, q1s16);
-    q4s16 = vaddq_s16(q2s16, q1s16);
-    q5s16 = vaddq_s16(q3s16, q0s16);
-    q6s16 = vsubq_s16(q3s16, q0s16);
-    q7s16 = vsubq_s16(q2s16, q1s16);
-    STORE_IN_OUTPUT(25, 24, 25, q6s16, q7s16);
-    STORE_IN_OUTPUT(25, 6, 7, q4s16, q5s16);
-    return;
+static INLINE void idct32_bands_end_1st_pass(int16_t *out, int16x8_t q2s16,
+                                             int16x8_t q3s16, int16x8_t q6s16,
+                                             int16x8_t q7s16, int16x8_t q8s16,
+                                             int16x8_t q9s16, int16x8_t q10s16,
+                                             int16x8_t q11s16, int16x8_t q12s16,
+                                             int16x8_t q13s16, int16x8_t q14s16,
+                                             int16x8_t q15s16) {
+  int16x8_t q0s16, q1s16, q4s16, q5s16;
+
+  STORE_IN_OUTPUT(17, 16, 17, q6s16, q7s16);
+  STORE_IN_OUTPUT(17, 14, 15, q8s16, q9s16);
+
+  LOAD_FROM_OUTPUT(15, 30, 31, q0s16, q1s16);
+  q4s16 = vaddq_s16(q2s16, q1s16);
+  q5s16 = vaddq_s16(q3s16, q0s16);
+  q6s16 = vsubq_s16(q3s16, q0s16);
+  q7s16 = vsubq_s16(q2s16, q1s16);
+  STORE_IN_OUTPUT(31, 30, 31, q6s16, q7s16);
+  STORE_IN_OUTPUT(31, 0, 1, q4s16, q5s16);
+
+  LOAD_FROM_OUTPUT(1, 12, 13, q0s16, q1s16);
+  q2s16 = vaddq_s16(q10s16, q1s16);
+  q3s16 = vaddq_s16(q11s16, q0s16);
+  q4s16 = vsubq_s16(q11s16, q0s16);
+  q5s16 = vsubq_s16(q10s16, q1s16);
+
+  LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16);
+  q8s16 = vaddq_s16(q4s16, q1s16);
+  q9s16 = vaddq_s16(q5s16, q0s16);
+  q6s16 = vsubq_s16(q5s16, q0s16);
+  q7s16 = vsubq_s16(q4s16, q1s16);
+  STORE_IN_OUTPUT(19, 18, 19, q6s16, q7s16);
+  STORE_IN_OUTPUT(19, 12, 13, q8s16, q9s16);
+
+  LOAD_FROM_OUTPUT(13, 28, 29, q0s16, q1s16);
+  q4s16 = vaddq_s16(q2s16, q1s16);
+  q5s16 = vaddq_s16(q3s16, q0s16);
+  q6s16 = vsubq_s16(q3s16, q0s16);
+  q7s16 = vsubq_s16(q2s16, q1s16);
+  STORE_IN_OUTPUT(29, 28, 29, q6s16, q7s16);
+  STORE_IN_OUTPUT(29, 2, 3, q4s16, q5s16);
+
+  LOAD_FROM_OUTPUT(3, 10, 11, q0s16, q1s16);
+  q2s16 = vaddq_s16(q12s16, q1s16);
+  q3s16 = vaddq_s16(q13s16, q0s16);
+  q4s16 = vsubq_s16(q13s16, q0s16);
+  q5s16 = vsubq_s16(q12s16, q1s16);
+
+  LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16);
+  q8s16 = vaddq_s16(q4s16, q1s16);
+  q9s16 = vaddq_s16(q5s16, q0s16);
+  q6s16 = vsubq_s16(q5s16, q0s16);
+  q7s16 = vsubq_s16(q4s16, q1s16);
+  STORE_IN_OUTPUT(21, 20, 21, q6s16, q7s16);
+  STORE_IN_OUTPUT(21, 10, 11, q8s16, q9s16);
+
+  LOAD_FROM_OUTPUT(11, 26, 27, q0s16, q1s16);
+  q4s16 = vaddq_s16(q2s16, q1s16);
+  q5s16 = vaddq_s16(q3s16, q0s16);
+  q6s16 = vsubq_s16(q3s16, q0s16);
+  q7s16 = vsubq_s16(q2s16, q1s16);
+  STORE_IN_OUTPUT(27, 26, 27, q6s16, q7s16);
+  STORE_IN_OUTPUT(27, 4, 5, q4s16, q5s16);
+
+  LOAD_FROM_OUTPUT(5, 8, 9, q0s16, q1s16);
+  q2s16 = vaddq_s16(q14s16, q1s16);
+  q3s16 = vaddq_s16(q15s16, q0s16);
+  q4s16 = vsubq_s16(q15s16, q0s16);
+  q5s16 = vsubq_s16(q14s16, q1s16);
+
+  LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16);
+  q8s16 = vaddq_s16(q4s16, q1s16);
+  q9s16 = vaddq_s16(q5s16, q0s16);
+  q6s16 = vsubq_s16(q5s16, q0s16);
+  q7s16 = vsubq_s16(q4s16, q1s16);
+  STORE_IN_OUTPUT(23, 22, 23, q6s16, q7s16);
+  STORE_IN_OUTPUT(23, 8, 9, q8s16, q9s16);
+
+  LOAD_FROM_OUTPUT(9, 24, 25, q0s16, q1s16);
+  q4s16 = vaddq_s16(q2s16, q1s16);
+  q5s16 = vaddq_s16(q3s16, q0s16);
+  q6s16 = vsubq_s16(q3s16, q0s16);
+  q7s16 = vsubq_s16(q2s16, q1s16);
+  STORE_IN_OUTPUT(25, 24, 25, q6s16, q7s16);
+  STORE_IN_OUTPUT(25, 6, 7, q4s16, q5s16);
+  return;
 }
 
 static INLINE void idct32_bands_end_2nd_pass(
-        int16_t *out,
-        uint8_t *dest,
-        int stride,
-        int16x8_t q2s16,
-        int16x8_t q3s16,
-        int16x8_t q6s16,
-        int16x8_t q7s16,
-        int16x8_t q8s16,
-        int16x8_t q9s16,
-        int16x8_t q10s16,
-        int16x8_t q11s16,
-        int16x8_t q12s16,
-        int16x8_t q13s16,
-        int16x8_t q14s16,
-        int16x8_t q15s16) {
-    uint8_t *r6  = dest + 31 * stride;
-    uint8_t *r7  = dest/* +  0 * stride*/;
-    uint8_t *r9  = dest + 15 * stride;
-    uint8_t *r10 = dest + 16 * stride;
-    int str2 = stride << 1;
-    int16x8_t q0s16, q1s16, q4s16, q5s16;
-
-    STORE_COMBINE_CENTER_RESULTS(r10, r9);
-    r10 += str2; r9 -= str2;
-
-    LOAD_FROM_OUTPUT(17, 30, 31, q0s16, q1s16)
-    q4s16 = vaddq_s16(q2s16, q1s16);
-    q5s16 = vaddq_s16(q3s16, q0s16);
-    q6s16 = vsubq_s16(q3s16, q0s16);
-    q7s16 = vsubq_s16(q2s16, q1s16);
-    STORE_COMBINE_EXTREME_RESULTS(r7, r6);
-    r7 += str2; r6 -= str2;
-
-    LOAD_FROM_OUTPUT(31, 12, 13, q0s16, q1s16)
-    q2s16 = vaddq_s16(q10s16, q1s16);
-    q3s16 = vaddq_s16(q11s16, q0s16);
-    q4s16 = vsubq_s16(q11s16, q0s16);
-    q5s16 = vsubq_s16(q10s16, q1s16);
-
-    LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16)
-    q8s16 = vaddq_s16(q4s16, q1s16);
-    q9s16 = vaddq_s16(q5s16, q0s16);
-    q6s16 = vsubq_s16(q5s16, q0s16);
-    q7s16 = vsubq_s16(q4s16, q1s16);
-    STORE_COMBINE_CENTER_RESULTS(r10, r9);
-    r10 += str2; r9 -= str2;
-
-    LOAD_FROM_OUTPUT(19, 28, 29, q0s16, q1s16)
-    q4s16 = vaddq_s16(q2s16, q1s16);
-    q5s16 = vaddq_s16(q3s16, q0s16);
-    q6s16 = vsubq_s16(q3s16, q0s16);
-    q7s16 = vsubq_s16(q2s16, q1s16);
-    STORE_COMBINE_EXTREME_RESULTS(r7, r6);
-    r7 += str2; r6 -= str2;
-
-    LOAD_FROM_OUTPUT(29, 10, 11, q0s16, q1s16)
-    q2s16 = vaddq_s16(q12s16, q1s16);
-    q3s16 = vaddq_s16(q13s16, q0s16);
-    q4s16 = vsubq_s16(q13s16, q0s16);
-    q5s16 = vsubq_s16(q12s16, q1s16);
-
-    LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16)
-    q8s16 = vaddq_s16(q4s16, q1s16);
-    q9s16 = vaddq_s16(q5s16, q0s16);
-    q6s16 = vsubq_s16(q5s16, q0s16);
-    q7s16 = vsubq_s16(q4s16, q1s16);
-    STORE_COMBINE_CENTER_RESULTS(r10, r9);
-    r10 += str2; r9 -= str2;
-
-    LOAD_FROM_OUTPUT(21, 26, 27, q0s16, q1s16)
-    q4s16 = vaddq_s16(q2s16, q1s16);
-    q5s16 = vaddq_s16(q3s16, q0s16);
-    q6s16 = vsubq_s16(q3s16, q0s16);
-    q7s16 = vsubq_s16(q2s16, q1s16);
-    STORE_COMBINE_EXTREME_RESULTS(r7, r6);
-    r7 += str2; r6 -= str2;
-
-    LOAD_FROM_OUTPUT(27, 8, 9, q0s16, q1s16)
-    q2s16 = vaddq_s16(q14s16, q1s16);
-    q3s16 = vaddq_s16(q15s16, q0s16);
-    q4s16 = vsubq_s16(q15s16, q0s16);
-    q5s16 = vsubq_s16(q14s16, q1s16);
-
-    LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16)
-    q8s16 = vaddq_s16(q4s16, q1s16);
-    q9s16 = vaddq_s16(q5s16, q0s16);
-    q6s16 = vsubq_s16(q5s16, q0s16);
-    q7s16 = vsubq_s16(q4s16, q1s16);
-    STORE_COMBINE_CENTER_RESULTS(r10, r9);
-
-    LOAD_FROM_OUTPUT(23, 24, 25, q0s16, q1s16)
-    q4s16 = vaddq_s16(q2s16, q1s16);
-    q5s16 = vaddq_s16(q3s16, q0s16);
-    q6s16 = vsubq_s16(q3s16, q0s16);
-    q7s16 = vsubq_s16(q2s16, q1s16);
-    STORE_COMBINE_EXTREME_RESULTS(r7, r6);
-    return;
+    int16_t *out, uint8_t *dest, int stride, int16x8_t q2s16, int16x8_t q3s16,
+    int16x8_t q6s16, int16x8_t q7s16, int16x8_t q8s16, int16x8_t q9s16,
+    int16x8_t q10s16, int16x8_t q11s16, int16x8_t q12s16, int16x8_t q13s16,
+    int16x8_t q14s16, int16x8_t q15s16) {
+  uint8_t *r6 = dest + 31 * stride;
+  uint8_t *r7 = dest /* +  0 * stride*/;
+  uint8_t *r9 = dest + 15 * stride;
+  uint8_t *r10 = dest + 16 * stride;
+  int str2 = stride << 1;
+  int16x8_t q0s16, q1s16, q4s16, q5s16;
+
+  STORE_COMBINE_CENTER_RESULTS(r10, r9);
+  r10 += str2;
+  r9 -= str2;
+
+  LOAD_FROM_OUTPUT(17, 30, 31, q0s16, q1s16)
+  q4s16 = vaddq_s16(q2s16, q1s16);
+  q5s16 = vaddq_s16(q3s16, q0s16);
+  q6s16 = vsubq_s16(q3s16, q0s16);
+  q7s16 = vsubq_s16(q2s16, q1s16);
+  STORE_COMBINE_EXTREME_RESULTS(r7, r6);
+  r7 += str2;
+  r6 -= str2;
+
+  LOAD_FROM_OUTPUT(31, 12, 13, q0s16, q1s16)
+  q2s16 = vaddq_s16(q10s16, q1s16);
+  q3s16 = vaddq_s16(q11s16, q0s16);
+  q4s16 = vsubq_s16(q11s16, q0s16);
+  q5s16 = vsubq_s16(q10s16, q1s16);
+
+  LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16)
+  q8s16 = vaddq_s16(q4s16, q1s16);
+  q9s16 = vaddq_s16(q5s16, q0s16);
+  q6s16 = vsubq_s16(q5s16, q0s16);
+  q7s16 = vsubq_s16(q4s16, q1s16);
+  STORE_COMBINE_CENTER_RESULTS(r10, r9);
+  r10 += str2;
+  r9 -= str2;
+
+  LOAD_FROM_OUTPUT(19, 28, 29, q0s16, q1s16)
+  q4s16 = vaddq_s16(q2s16, q1s16);
+  q5s16 = vaddq_s16(q3s16, q0s16);
+  q6s16 = vsubq_s16(q3s16, q0s16);
+  q7s16 = vsubq_s16(q2s16, q1s16);
+  STORE_COMBINE_EXTREME_RESULTS(r7, r6);
+  r7 += str2;
+  r6 -= str2;
+
+  LOAD_FROM_OUTPUT(29, 10, 11, q0s16, q1s16)
+  q2s16 = vaddq_s16(q12s16, q1s16);
+  q3s16 = vaddq_s16(q13s16, q0s16);
+  q4s16 = vsubq_s16(q13s16, q0s16);
+  q5s16 = vsubq_s16(q12s16, q1s16);
+
+  LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16)
+  q8s16 = vaddq_s16(q4s16, q1s16);
+  q9s16 = vaddq_s16(q5s16, q0s16);
+  q6s16 = vsubq_s16(q5s16, q0s16);
+  q7s16 = vsubq_s16(q4s16, q1s16);
+  STORE_COMBINE_CENTER_RESULTS(r10, r9);
+  r10 += str2;
+  r9 -= str2;
+
+  LOAD_FROM_OUTPUT(21, 26, 27, q0s16, q1s16)
+  q4s16 = vaddq_s16(q2s16, q1s16);
+  q5s16 = vaddq_s16(q3s16, q0s16);
+  q6s16 = vsubq_s16(q3s16, q0s16);
+  q7s16 = vsubq_s16(q2s16, q1s16);
+  STORE_COMBINE_EXTREME_RESULTS(r7, r6);
+  r7 += str2;
+  r6 -= str2;
+
+  LOAD_FROM_OUTPUT(27, 8, 9, q0s16, q1s16)
+  q2s16 = vaddq_s16(q14s16, q1s16);
+  q3s16 = vaddq_s16(q15s16, q0s16);
+  q4s16 = vsubq_s16(q15s16, q0s16);
+  q5s16 = vsubq_s16(q14s16, q1s16);
+
+  LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16)
+  q8s16 = vaddq_s16(q4s16, q1s16);
+  q9s16 = vaddq_s16(q5s16, q0s16);
+  q6s16 = vsubq_s16(q5s16, q0s16);
+  q7s16 = vsubq_s16(q4s16, q1s16);
+  STORE_COMBINE_CENTER_RESULTS(r10, r9);
+
+  LOAD_FROM_OUTPUT(23, 24, 25, q0s16, q1s16)
+  q4s16 = vaddq_s16(q2s16, q1s16);
+  q5s16 = vaddq_s16(q3s16, q0s16);
+  q6s16 = vsubq_s16(q3s16, q0s16);
+  q7s16 = vsubq_s16(q2s16, q1s16);
+  STORE_COMBINE_EXTREME_RESULTS(r7, r6);
+  return;
 }
 
-void vpx_idct32x32_1024_add_neon(
-        int16_t *input,
-        uint8_t *dest,
-        int stride) {
-    int i, idct32_pass_loop;
-    int16_t trans_buf[32 * 8];
-    int16_t pass1[32 * 32];
-    int16_t pass2[32 * 32];
-    int16_t *out;
-    int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-
-    for (idct32_pass_loop = 0, out = pass1;
-         idct32_pass_loop < 2;
-         idct32_pass_loop++,
-         input = pass1,  // the input of pass2 is the result of pass1
-         out = pass2) {
-        for (i = 0;
-             i < 4; i++,
-             input += 32 * 8, out += 8) {  // idct32_bands_loop
-            idct32_transpose_pair(input, trans_buf);
-
-            // -----------------------------------------
-            // BLOCK A: 16-19,28-31
-            // -----------------------------------------
-            // generate 16,17,30,31
-            // part of stage 1
-            LOAD_FROM_TRANSPOSED(0, 1, 31)
-            DO_BUTTERFLY_STD(cospi_31_64, cospi_1_64, &q0s16, &q2s16)
-            LOAD_FROM_TRANSPOSED(31, 17, 15)
-            DO_BUTTERFLY_STD(cospi_15_64, cospi_17_64, &q1s16, &q3s16)
-            // part of stage 2
-            q4s16 = vaddq_s16(q0s16, q1s16);
-            q13s16 = vsubq_s16(q0s16, q1s16);
-            q6s16 = vaddq_s16(q2s16, q3s16);
-            q14s16 = vsubq_s16(q2s16, q3s16);
-            // part of stage 3
-            DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q5s16, &q7s16)
-
-            // generate 18,19,28,29
-            // part of stage 1
-            LOAD_FROM_TRANSPOSED(15, 9, 23)
-            DO_BUTTERFLY_STD(cospi_23_64, cospi_9_64, &q0s16, &q2s16)
-            LOAD_FROM_TRANSPOSED(23, 25, 7)
-            DO_BUTTERFLY_STD(cospi_7_64, cospi_25_64, &q1s16, &q3s16)
-            // part of stage 2
-            q13s16 = vsubq_s16(q3s16, q2s16);
-            q3s16 = vaddq_s16(q3s16, q2s16);
-            q14s16 = vsubq_s16(q1s16, q0s16);
-            q2s16 = vaddq_s16(q1s16, q0s16);
-            // part of stage 3
-            DO_BUTTERFLY_STD(-cospi_4_64, -cospi_28_64, &q1s16, &q0s16)
-            // part of stage 4
-            q8s16 = vaddq_s16(q4s16, q2s16);
-            q9s16 = vaddq_s16(q5s16, q0s16);
-            q10s16 = vaddq_s16(q7s16, q1s16);
-            q15s16 = vaddq_s16(q6s16, q3s16);
-            q13s16 = vsubq_s16(q5s16, q0s16);
-            q14s16 = vsubq_s16(q7s16, q1s16);
-            STORE_IN_OUTPUT(0, 16, 31, q8s16, q15s16)
-            STORE_IN_OUTPUT(31, 17, 30, q9s16, q10s16)
-            // part of stage 5
-            DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q0s16, &q1s16)
-            STORE_IN_OUTPUT(30, 29, 18, q1s16, q0s16)
-            // part of stage 4
-            q13s16 = vsubq_s16(q4s16, q2s16);
-            q14s16 = vsubq_s16(q6s16, q3s16);
-            // part of stage 5
-            DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q4s16, &q6s16)
-            STORE_IN_OUTPUT(18, 19, 28, q4s16, q6s16)
-
-            // -----------------------------------------
-            // BLOCK B: 20-23,24-27
-            // -----------------------------------------
-            // generate 20,21,26,27
-            // part of stage 1
-            LOAD_FROM_TRANSPOSED(7, 5, 27)
-            DO_BUTTERFLY_STD(cospi_27_64, cospi_5_64, &q0s16, &q2s16)
-            LOAD_FROM_TRANSPOSED(27, 21, 11)
-            DO_BUTTERFLY_STD(cospi_11_64, cospi_21_64, &q1s16, &q3s16)
-            // part of stage 2
-            q13s16 = vsubq_s16(q0s16, q1s16);
-            q0s16 = vaddq_s16(q0s16, q1s16);
-            q14s16 = vsubq_s16(q2s16, q3s16);
-            q2s16 = vaddq_s16(q2s16, q3s16);
-            // part of stage 3
-            DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16)
-
-            // generate 22,23,24,25
-            // part of stage 1
-            LOAD_FROM_TRANSPOSED(11, 13, 19)
-            DO_BUTTERFLY_STD(cospi_19_64, cospi_13_64, &q5s16, &q7s16)
-            LOAD_FROM_TRANSPOSED(19, 29, 3)
-            DO_BUTTERFLY_STD(cospi_3_64, cospi_29_64, &q4s16, &q6s16)
-            // part of stage 2
-            q14s16 = vsubq_s16(q4s16, q5s16);
-            q5s16  = vaddq_s16(q4s16, q5s16);
-            q13s16 = vsubq_s16(q6s16, q7s16);
-            q6s16  = vaddq_s16(q6s16, q7s16);
-            // part of stage 3
-            DO_BUTTERFLY_STD(-cospi_20_64, -cospi_12_64, &q4s16, &q7s16)
-            // part of stage 4
-            q10s16 = vaddq_s16(q7s16, q1s16);
-            q11s16 = vaddq_s16(q5s16, q0s16);
-            q12s16 = vaddq_s16(q6s16, q2s16);
-            q15s16 = vaddq_s16(q4s16, q3s16);
-            // part of stage 6
-            LOAD_FROM_OUTPUT(28, 16, 17, q14s16, q13s16)
-            q8s16 = vaddq_s16(q14s16, q11s16);
-            q9s16 = vaddq_s16(q13s16, q10s16);
-            q13s16 = vsubq_s16(q13s16, q10s16);
-            q11s16 = vsubq_s16(q14s16, q11s16);
-            STORE_IN_OUTPUT(17, 17, 16, q9s16, q8s16)
-            LOAD_FROM_OUTPUT(16, 30, 31, q14s16, q9s16)
-            q8s16  = vsubq_s16(q9s16, q12s16);
-            q10s16 = vaddq_s16(q14s16, q15s16);
-            q14s16 = vsubq_s16(q14s16, q15s16);
-            q12s16 = vaddq_s16(q9s16, q12s16);
-            STORE_IN_OUTPUT(31, 30, 31, q10s16, q12s16)
-            // part of stage 7
-            DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16)
-            STORE_IN_OUTPUT(31, 25, 22, q14s16, q13s16)
-            q13s16 = q11s16;
-            q14s16 = q8s16;
-            DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16)
-            STORE_IN_OUTPUT(22, 24, 23, q14s16, q13s16)
-            // part of stage 4
-            q14s16 = vsubq_s16(q5s16, q0s16);
-            q13s16 = vsubq_s16(q6s16, q2s16);
-            DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q5s16, &q6s16);
-            q14s16 = vsubq_s16(q7s16, q1s16);
-            q13s16 = vsubq_s16(q4s16, q3s16);
-            DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q0s16, &q1s16);
-            // part of stage 6
-            LOAD_FROM_OUTPUT(23, 18, 19, q14s16, q13s16)
-            q8s16 = vaddq_s16(q14s16, q1s16);
-            q9s16 = vaddq_s16(q13s16, q6s16);
-            q13s16 = vsubq_s16(q13s16, q6s16);
-            q1s16 = vsubq_s16(q14s16, q1s16);
-            STORE_IN_OUTPUT(19, 18, 19, q8s16, q9s16)
-            LOAD_FROM_OUTPUT(19, 28, 29, q8s16, q9s16)
-            q14s16 = vsubq_s16(q8s16, q5s16);
-            q10s16 = vaddq_s16(q8s16, q5s16);
-            q11s16 = vaddq_s16(q9s16, q0s16);
-            q0s16 = vsubq_s16(q9s16, q0s16);
-            STORE_IN_OUTPUT(29, 28, 29, q10s16, q11s16)
-            // part of stage 7
-            DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16)
-            STORE_IN_OUTPUT(29, 20, 27, q13s16, q14s16)
-            DO_BUTTERFLY(q0s16, q1s16, cospi_16_64, cospi_16_64,
-                                                         &q1s16, &q0s16);
-            STORE_IN_OUTPUT(27, 21, 26, q1s16, q0s16)
-
-            // -----------------------------------------
-            // BLOCK C: 8-10,11-15
-            // -----------------------------------------
-            // generate 8,9,14,15
-            // part of stage 2
-            LOAD_FROM_TRANSPOSED(3, 2, 30)
-            DO_BUTTERFLY_STD(cospi_30_64, cospi_2_64, &q0s16, &q2s16)
-            LOAD_FROM_TRANSPOSED(30, 18, 14)
-            DO_BUTTERFLY_STD(cospi_14_64, cospi_18_64, &q1s16, &q3s16)
-            // part of stage 3
-            q13s16 = vsubq_s16(q0s16, q1s16);
-            q0s16 = vaddq_s16(q0s16, q1s16);
-            q14s16 = vsubq_s16(q2s16, q3s16);
-            q2s16 = vaddq_s16(q2s16, q3s16);
-            // part of stage 4
-            DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q1s16, &q3s16)
-
-            // generate 10,11,12,13
-            // part of stage 2
-            LOAD_FROM_TRANSPOSED(14, 10, 22)
-            DO_BUTTERFLY_STD(cospi_22_64, cospi_10_64, &q5s16, &q7s16)
-            LOAD_FROM_TRANSPOSED(22, 26, 6)
-            DO_BUTTERFLY_STD(cospi_6_64, cospi_26_64, &q4s16, &q6s16)
-            // part of stage 3
-            q14s16 = vsubq_s16(q4s16, q5s16);
-            q5s16 = vaddq_s16(q4s16, q5s16);
-            q13s16 = vsubq_s16(q6s16, q7s16);
-            q6s16 = vaddq_s16(q6s16, q7s16);
-            // part of stage 4
-            DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q4s16, &q7s16)
-            // part of stage 5
-            q8s16 = vaddq_s16(q0s16, q5s16);
-            q9s16 = vaddq_s16(q1s16, q7s16);
-            q13s16 = vsubq_s16(q1s16, q7s16);
-            q14s16 = vsubq_s16(q3s16, q4s16);
-            q10s16 = vaddq_s16(q3s16, q4s16);
-            q15s16 = vaddq_s16(q2s16, q6s16);
-            STORE_IN_OUTPUT(26, 8, 15, q8s16, q15s16)
-            STORE_IN_OUTPUT(15, 9, 14, q9s16, q10s16)
-            // part of stage 6
-            DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16)
-            STORE_IN_OUTPUT(14, 13, 10, q3s16, q1s16)
-            q13s16 = vsubq_s16(q0s16, q5s16);
-            q14s16 = vsubq_s16(q2s16, q6s16);
-            DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16)
-            STORE_IN_OUTPUT(10, 11, 12, q1s16, q3s16)
-
-            // -----------------------------------------
-            // BLOCK D: 0-3,4-7
-            // -----------------------------------------
-            // generate 4,5,6,7
-            // part of stage 3
-            LOAD_FROM_TRANSPOSED(6, 4, 28)
-            DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q0s16, &q2s16)
-            LOAD_FROM_TRANSPOSED(28, 20, 12)
-            DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16)
-            // part of stage 4
-            q13s16 = vsubq_s16(q0s16, q1s16);
-            q0s16 = vaddq_s16(q0s16, q1s16);
-            q14s16 = vsubq_s16(q2s16, q3s16);
-            q2s16 = vaddq_s16(q2s16, q3s16);
-            // part of stage 5
-            DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16)
-
-            // generate 0,1,2,3
-            // part of stage 4
-            LOAD_FROM_TRANSPOSED(12, 0, 16)
-            DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q5s16, &q7s16)
-            LOAD_FROM_TRANSPOSED(16, 8, 24)
-            DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q14s16, &q6s16)
-            // part of stage 5
-            q4s16 = vaddq_s16(q7s16, q6s16);
-            q7s16 = vsubq_s16(q7s16, q6s16);
-            q6s16 = vsubq_s16(q5s16, q14s16);
-            q5s16 = vaddq_s16(q5s16, q14s16);
-            // part of stage 6
-            q8s16 = vaddq_s16(q4s16, q2s16);
-            q9s16 = vaddq_s16(q5s16, q3s16);
-            q10s16 = vaddq_s16(q6s16, q1s16);
-            q11s16 = vaddq_s16(q7s16, q0s16);
-            q12s16 = vsubq_s16(q7s16, q0s16);
-            q13s16 = vsubq_s16(q6s16, q1s16);
-            q14s16 = vsubq_s16(q5s16, q3s16);
-            q15s16 = vsubq_s16(q4s16, q2s16);
-            // part of stage 7
-            LOAD_FROM_OUTPUT(12, 14, 15, q0s16, q1s16)
-            q2s16 = vaddq_s16(q8s16, q1s16);
-            q3s16 = vaddq_s16(q9s16, q0s16);
-            q4s16 = vsubq_s16(q9s16, q0s16);
-            q5s16 = vsubq_s16(q8s16, q1s16);
-            LOAD_FROM_OUTPUT(15, 16, 17, q0s16, q1s16)
-            q8s16 = vaddq_s16(q4s16, q1s16);
-            q9s16 = vaddq_s16(q5s16, q0s16);
-            q6s16 = vsubq_s16(q5s16, q0s16);
-            q7s16 = vsubq_s16(q4s16, q1s16);
-
-            if (idct32_pass_loop == 0) {
-                idct32_bands_end_1st_pass(out,
-                         q2s16, q3s16, q6s16, q7s16, q8s16, q9s16,
-                         q10s16, q11s16, q12s16, q13s16, q14s16, q15s16);
-            } else {
-                idct32_bands_end_2nd_pass(out, dest, stride,
-                         q2s16, q3s16, q6s16, q7s16, q8s16, q9s16,
-                         q10s16, q11s16, q12s16, q13s16, q14s16, q15s16);
-                dest += 8;
-            }
-        }
+void vpx_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int stride) {
+  int i, idct32_pass_loop;
+  int16_t trans_buf[32 * 8];
+  int16_t pass1[32 * 32];
+  int16_t pass2[32 * 32];
+  int16_t *out;
+  int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
+  int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+
+  for (idct32_pass_loop = 0, out = pass1; idct32_pass_loop < 2;
+       idct32_pass_loop++,
+      input = pass1,  // the input of pass2 is the result of pass1
+       out = pass2) {
+    for (i = 0; i < 4; i++, input += 32 * 8, out += 8) {  // idct32_bands_loop
+      idct32_transpose_pair(input, trans_buf);
+
+      // -----------------------------------------
+      // BLOCK A: 16-19,28-31
+      // -----------------------------------------
+      // generate 16,17,30,31
+      // part of stage 1
+      LOAD_FROM_TRANSPOSED(0, 1, 31)
+      DO_BUTTERFLY_STD(cospi_31_64, cospi_1_64, &q0s16, &q2s16)
+      LOAD_FROM_TRANSPOSED(31, 17, 15)
+      DO_BUTTERFLY_STD(cospi_15_64, cospi_17_64, &q1s16, &q3s16)
+      // part of stage 2
+      q4s16 = vaddq_s16(q0s16, q1s16);
+      q13s16 = vsubq_s16(q0s16, q1s16);
+      q6s16 = vaddq_s16(q2s16, q3s16);
+      q14s16 = vsubq_s16(q2s16, q3s16);
+      // part of stage 3
+      DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q5s16, &q7s16)
+
+      // generate 18,19,28,29
+      // part of stage 1
+      LOAD_FROM_TRANSPOSED(15, 9, 23)
+      DO_BUTTERFLY_STD(cospi_23_64, cospi_9_64, &q0s16, &q2s16)
+      LOAD_FROM_TRANSPOSED(23, 25, 7)
+      DO_BUTTERFLY_STD(cospi_7_64, cospi_25_64, &q1s16, &q3s16)
+      // part of stage 2
+      q13s16 = vsubq_s16(q3s16, q2s16);
+      q3s16 = vaddq_s16(q3s16, q2s16);
+      q14s16 = vsubq_s16(q1s16, q0s16);
+      q2s16 = vaddq_s16(q1s16, q0s16);
+      // part of stage 3
+      DO_BUTTERFLY_STD(-cospi_4_64, -cospi_28_64, &q1s16, &q0s16)
+      // part of stage 4
+      q8s16 = vaddq_s16(q4s16, q2s16);
+      q9s16 = vaddq_s16(q5s16, q0s16);
+      q10s16 = vaddq_s16(q7s16, q1s16);
+      q15s16 = vaddq_s16(q6s16, q3s16);
+      q13s16 = vsubq_s16(q5s16, q0s16);
+      q14s16 = vsubq_s16(q7s16, q1s16);
+      STORE_IN_OUTPUT(0, 16, 31, q8s16, q15s16)
+      STORE_IN_OUTPUT(31, 17, 30, q9s16, q10s16)
+      // part of stage 5
+      DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q0s16, &q1s16)
+      STORE_IN_OUTPUT(30, 29, 18, q1s16, q0s16)
+      // part of stage 4
+      q13s16 = vsubq_s16(q4s16, q2s16);
+      q14s16 = vsubq_s16(q6s16, q3s16);
+      // part of stage 5
+      DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q4s16, &q6s16)
+      STORE_IN_OUTPUT(18, 19, 28, q4s16, q6s16)
+
+      // -----------------------------------------
+      // BLOCK B: 20-23,24-27
+      // -----------------------------------------
+      // generate 20,21,26,27
+      // part of stage 1
+      LOAD_FROM_TRANSPOSED(7, 5, 27)
+      DO_BUTTERFLY_STD(cospi_27_64, cospi_5_64, &q0s16, &q2s16)
+      LOAD_FROM_TRANSPOSED(27, 21, 11)
+      DO_BUTTERFLY_STD(cospi_11_64, cospi_21_64, &q1s16, &q3s16)
+      // part of stage 2
+      q13s16 = vsubq_s16(q0s16, q1s16);
+      q0s16 = vaddq_s16(q0s16, q1s16);
+      q14s16 = vsubq_s16(q2s16, q3s16);
+      q2s16 = vaddq_s16(q2s16, q3s16);
+      // part of stage 3
+      DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16)
+
+      // generate 22,23,24,25
+      // part of stage 1
+      LOAD_FROM_TRANSPOSED(11, 13, 19)
+      DO_BUTTERFLY_STD(cospi_19_64, cospi_13_64, &q5s16, &q7s16)
+      LOAD_FROM_TRANSPOSED(19, 29, 3)
+      DO_BUTTERFLY_STD(cospi_3_64, cospi_29_64, &q4s16, &q6s16)
+      // part of stage 2
+      q14s16 = vsubq_s16(q4s16, q5s16);
+      q5s16 = vaddq_s16(q4s16, q5s16);
+      q13s16 = vsubq_s16(q6s16, q7s16);
+      q6s16 = vaddq_s16(q6s16, q7s16);
+      // part of stage 3
+      DO_BUTTERFLY_STD(-cospi_20_64, -cospi_12_64, &q4s16, &q7s16)
+      // part of stage 4
+      q10s16 = vaddq_s16(q7s16, q1s16);
+      q11s16 = vaddq_s16(q5s16, q0s16);
+      q12s16 = vaddq_s16(q6s16, q2s16);
+      q15s16 = vaddq_s16(q4s16, q3s16);
+      // part of stage 6
+      LOAD_FROM_OUTPUT(28, 16, 17, q14s16, q13s16)
+      q8s16 = vaddq_s16(q14s16, q11s16);
+      q9s16 = vaddq_s16(q13s16, q10s16);
+      q13s16 = vsubq_s16(q13s16, q10s16);
+      q11s16 = vsubq_s16(q14s16, q11s16);
+      STORE_IN_OUTPUT(17, 17, 16, q9s16, q8s16)
+      LOAD_FROM_OUTPUT(16, 30, 31, q14s16, q9s16)
+      q8s16 = vsubq_s16(q9s16, q12s16);
+      q10s16 = vaddq_s16(q14s16, q15s16);
+      q14s16 = vsubq_s16(q14s16, q15s16);
+      q12s16 = vaddq_s16(q9s16, q12s16);
+      STORE_IN_OUTPUT(31, 30, 31, q10s16, q12s16)
+      // part of stage 7
+      DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16)
+      STORE_IN_OUTPUT(31, 25, 22, q14s16, q13s16)
+      q13s16 = q11s16;
+      q14s16 = q8s16;
+      DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16)
+      STORE_IN_OUTPUT(22, 24, 23, q14s16, q13s16)
+      // part of stage 4
+      q14s16 = vsubq_s16(q5s16, q0s16);
+      q13s16 = vsubq_s16(q6s16, q2s16);
+      DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q5s16, &q6s16);
+      q14s16 = vsubq_s16(q7s16, q1s16);
+      q13s16 = vsubq_s16(q4s16, q3s16);
+      DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q0s16, &q1s16);
+      // part of stage 6
+      LOAD_FROM_OUTPUT(23, 18, 19, q14s16, q13s16)
+      q8s16 = vaddq_s16(q14s16, q1s16);
+      q9s16 = vaddq_s16(q13s16, q6s16);
+      q13s16 = vsubq_s16(q13s16, q6s16);
+      q1s16 = vsubq_s16(q14s16, q1s16);
+      STORE_IN_OUTPUT(19, 18, 19, q8s16, q9s16)
+      LOAD_FROM_OUTPUT(19, 28, 29, q8s16, q9s16)
+      q14s16 = vsubq_s16(q8s16, q5s16);
+      q10s16 = vaddq_s16(q8s16, q5s16);
+      q11s16 = vaddq_s16(q9s16, q0s16);
+      q0s16 = vsubq_s16(q9s16, q0s16);
+      STORE_IN_OUTPUT(29, 28, 29, q10s16, q11s16)
+      // part of stage 7
+      DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16)
+      STORE_IN_OUTPUT(29, 20, 27, q13s16, q14s16)
+      DO_BUTTERFLY(q0s16, q1s16, cospi_16_64, cospi_16_64, &q1s16, &q0s16);
+      STORE_IN_OUTPUT(27, 21, 26, q1s16, q0s16)
+
+      // -----------------------------------------
+      // BLOCK C: 8-10,11-15
+      // -----------------------------------------
+      // generate 8,9,14,15
+      // part of stage 2
+      LOAD_FROM_TRANSPOSED(3, 2, 30)
+      DO_BUTTERFLY_STD(cospi_30_64, cospi_2_64, &q0s16, &q2s16)
+      LOAD_FROM_TRANSPOSED(30, 18, 14)
+      DO_BUTTERFLY_STD(cospi_14_64, cospi_18_64, &q1s16, &q3s16)
+      // part of stage 3
+      q13s16 = vsubq_s16(q0s16, q1s16);
+      q0s16 = vaddq_s16(q0s16, q1s16);
+      q14s16 = vsubq_s16(q2s16, q3s16);
+      q2s16 = vaddq_s16(q2s16, q3s16);
+      // part of stage 4
+      DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q1s16, &q3s16)
+
+      // generate 10,11,12,13
+      // part of stage 2
+      LOAD_FROM_TRANSPOSED(14, 10, 22)
+      DO_BUTTERFLY_STD(cospi_22_64, cospi_10_64, &q5s16, &q7s16)
+      LOAD_FROM_TRANSPOSED(22, 26, 6)
+      DO_BUTTERFLY_STD(cospi_6_64, cospi_26_64, &q4s16, &q6s16)
+      // part of stage 3
+      q14s16 = vsubq_s16(q4s16, q5s16);
+      q5s16 = vaddq_s16(q4s16, q5s16);
+      q13s16 = vsubq_s16(q6s16, q7s16);
+      q6s16 = vaddq_s16(q6s16, q7s16);
+      // part of stage 4
+      DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q4s16, &q7s16)
+      // part of stage 5
+      q8s16 = vaddq_s16(q0s16, q5s16);
+      q9s16 = vaddq_s16(q1s16, q7s16);
+      q13s16 = vsubq_s16(q1s16, q7s16);
+      q14s16 = vsubq_s16(q3s16, q4s16);
+      q10s16 = vaddq_s16(q3s16, q4s16);
+      q15s16 = vaddq_s16(q2s16, q6s16);
+      STORE_IN_OUTPUT(26, 8, 15, q8s16, q15s16)
+      STORE_IN_OUTPUT(15, 9, 14, q9s16, q10s16)
+      // part of stage 6
+      DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16)
+      STORE_IN_OUTPUT(14, 13, 10, q3s16, q1s16)
+      q13s16 = vsubq_s16(q0s16, q5s16);
+      q14s16 = vsubq_s16(q2s16, q6s16);
+      DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16)
+      STORE_IN_OUTPUT(10, 11, 12, q1s16, q3s16)
+
+      // -----------------------------------------
+      // BLOCK D: 0-3,4-7
+      // -----------------------------------------
+      // generate 4,5,6,7
+      // part of stage 3
+      LOAD_FROM_TRANSPOSED(6, 4, 28)
+      DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q0s16, &q2s16)
+      LOAD_FROM_TRANSPOSED(28, 20, 12)
+      DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16)
+      // part of stage 4
+      q13s16 = vsubq_s16(q0s16, q1s16);
+      q0s16 = vaddq_s16(q0s16, q1s16);
+      q14s16 = vsubq_s16(q2s16, q3s16);
+      q2s16 = vaddq_s16(q2s16, q3s16);
+      // part of stage 5
+      DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16)
+
+      // generate 0,1,2,3
+      // part of stage 4
+      LOAD_FROM_TRANSPOSED(12, 0, 16)
+      DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q5s16, &q7s16)
+      LOAD_FROM_TRANSPOSED(16, 8, 24)
+      DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q14s16, &q6s16)
+      // part of stage 5
+      q4s16 = vaddq_s16(q7s16, q6s16);
+      q7s16 = vsubq_s16(q7s16, q6s16);
+      q6s16 = vsubq_s16(q5s16, q14s16);
+      q5s16 = vaddq_s16(q5s16, q14s16);
+      // part of stage 6
+      q8s16 = vaddq_s16(q4s16, q2s16);
+      q9s16 = vaddq_s16(q5s16, q3s16);
+      q10s16 = vaddq_s16(q6s16, q1s16);
+      q11s16 = vaddq_s16(q7s16, q0s16);
+      q12s16 = vsubq_s16(q7s16, q0s16);
+      q13s16 = vsubq_s16(q6s16, q1s16);
+      q14s16 = vsubq_s16(q5s16, q3s16);
+      q15s16 = vsubq_s16(q4s16, q2s16);
+      // part of stage 7
+      LOAD_FROM_OUTPUT(12, 14, 15, q0s16, q1s16)
+      q2s16 = vaddq_s16(q8s16, q1s16);
+      q3s16 = vaddq_s16(q9s16, q0s16);
+      q4s16 = vsubq_s16(q9s16, q0s16);
+      q5s16 = vsubq_s16(q8s16, q1s16);
+      LOAD_FROM_OUTPUT(15, 16, 17, q0s16, q1s16)
+      q8s16 = vaddq_s16(q4s16, q1s16);
+      q9s16 = vaddq_s16(q5s16, q0s16);
+      q6s16 = vsubq_s16(q5s16, q0s16);
+      q7s16 = vsubq_s16(q4s16, q1s16);
+
+      if (idct32_pass_loop == 0) {
+        idct32_bands_end_1st_pass(out, q2s16, q3s16, q6s16, q7s16, q8s16, q9s16,
+                                  q10s16, q11s16, q12s16, q13s16, q14s16,
+                                  q15s16);
+      } else {
+        idct32_bands_end_2nd_pass(out, dest, stride, q2s16, q3s16, q6s16, q7s16,
+                                  q8s16, q9s16, q10s16, q11s16, q12s16, q13s16,
+                                  q14s16, q15s16);
+        dest += 8;
+      }
     }
-    return;
+  }
+  return;
 }
diff --git a/vpx_dsp/arm/idct4x4_1_add_neon.c b/vpx_dsp/arm/idct4x4_1_add_neon.c
index ea618700c95457f77b1a361754446ce98eb5eba4..9f999e979d7fd1721db04c2475a36902463879f5 100644
--- a/vpx_dsp/arm/idct4x4_1_add_neon.c
+++ b/vpx_dsp/arm/idct4x4_1_add_neon.c
@@ -13,38 +13,34 @@
 #include "vpx_dsp/inv_txfm.h"
 #include "vpx_ports/mem.h"
 
-void vpx_idct4x4_1_add_neon(
-        int16_t *input,
-        uint8_t *dest,
-        int dest_stride) {
-    uint8x8_t d6u8;
-    uint32x2_t d2u32 = vdup_n_u32(0);
-    uint16x8_t q8u16;
-    int16x8_t q0s16;
-    uint8_t *d1, *d2;
-    int16_t i, a1, cospi_16_64 = 11585;
-    int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
-    out = dct_const_round_shift(out * cospi_16_64);
-    a1 = ROUND_POWER_OF_TWO(out, 4);
+void vpx_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+  uint8x8_t d6u8;
+  uint32x2_t d2u32 = vdup_n_u32(0);
+  uint16x8_t q8u16;
+  int16x8_t q0s16;
+  uint8_t *d1, *d2;
+  int16_t i, a1, cospi_16_64 = 11585;
+  int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+  out = dct_const_round_shift(out * cospi_16_64);
+  a1 = ROUND_POWER_OF_TWO(out, 4);
 
-    q0s16 = vdupq_n_s16(a1);
+  q0s16 = vdupq_n_s16(a1);
 
-    // dc_only_idct_add
-    d1 = d2 = dest;
-    for (i = 0; i < 2; i++) {
-        d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 0);
-        d1 += dest_stride;
-        d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 1);
-        d1 += dest_stride;
+  // dc_only_idct_add
+  d1 = d2 = dest;
+  for (i = 0; i < 2; i++) {
+    d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 0);
+    d1 += dest_stride;
+    d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 1);
+    d1 += dest_stride;
 
-        q8u16 = vaddw_u8(vreinterpretq_u16_s16(q0s16),
-                         vreinterpret_u8_u32(d2u32));
-        d6u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
+    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q0s16), vreinterpret_u8_u32(d2u32));
+    d6u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
 
-        vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 0);
-        d2 += dest_stride;
-        vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 1);
-        d2 += dest_stride;
-    }
-    return;
+    vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 0);
+    d2 += dest_stride;
+    vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 1);
+    d2 += dest_stride;
+  }
+  return;
 }
diff --git a/vpx_dsp/arm/idct4x4_add_neon.c b/vpx_dsp/arm/idct4x4_add_neon.c
index 3c975c99b771c7fbe063fecba3dde0d469e116ef..382626928bfdcd15dffad54bff915edd6b845f4f 100644
--- a/vpx_dsp/arm/idct4x4_add_neon.c
+++ b/vpx_dsp/arm/idct4x4_add_neon.c
@@ -10,142 +10,137 @@
 
 #include <arm_neon.h>
 
-void vpx_idct4x4_16_add_neon(
-        int16_t *input,
-        uint8_t *dest,
-        int dest_stride) {
-    uint8x8_t d26u8, d27u8;
-    uint32x2_t d26u32, d27u32;
-    uint16x8_t q8u16, q9u16;
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16;
-    int16x4_t d22s16, d23s16, d24s16, d26s16, d27s16, d28s16, d29s16;
-    int16x8_t q8s16, q9s16, q13s16, q14s16;
-    int32x4_t q1s32, q13s32, q14s32, q15s32;
-    int16x4x2_t d0x2s16, d1x2s16;
-    int32x4x2_t q0x2s32;
-    uint8_t *d;
-    int16_t cospi_8_64 = 15137;
-    int16_t cospi_16_64 = 11585;
-    int16_t cospi_24_64 = 6270;
-
-    d26u32 = d27u32 = vdup_n_u32(0);
-
-    q8s16 = vld1q_s16(input);
-    q9s16 = vld1q_s16(input + 8);
-
-    d16s16 = vget_low_s16(q8s16);
-    d17s16 = vget_high_s16(q8s16);
-    d18s16 = vget_low_s16(q9s16);
-    d19s16 = vget_high_s16(q9s16);
-
-    d0x2s16 = vtrn_s16(d16s16, d17s16);
-    d1x2s16 = vtrn_s16(d18s16, d19s16);
-    q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]);
-    q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]);
-
-    d20s16 = vdup_n_s16(cospi_8_64);
-    d21s16 = vdup_n_s16(cospi_16_64);
-
-    q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16),
-                        vreinterpretq_s32_s16(q9s16));
-    d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
-    d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
-    d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
-    d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
-
-    d22s16 = vdup_n_s16(cospi_24_64);
-
-    // stage 1
-    d23s16 = vadd_s16(d16s16, d18s16);
-    d24s16 = vsub_s16(d16s16, d18s16);
-
-    q15s32 = vmull_s16(d17s16, d22s16);
-    q1s32  = vmull_s16(d17s16, d20s16);
-    q13s32 = vmull_s16(d23s16, d21s16);
-    q14s32 = vmull_s16(d24s16, d21s16);
-
-    q15s32 = vmlsl_s16(q15s32, d19s16, d20s16);
-    q1s32  = vmlal_s16(q1s32,  d19s16, d22s16);
-
-    d26s16 = vqrshrn_n_s32(q13s32, 14);
-    d27s16 = vqrshrn_n_s32(q14s32, 14);
-    d29s16 = vqrshrn_n_s32(q15s32, 14);
-    d28s16 = vqrshrn_n_s32(q1s32,  14);
-    q13s16 = vcombine_s16(d26s16, d27s16);
-    q14s16 = vcombine_s16(d28s16, d29s16);
-
-    // stage 2
-    q8s16 = vaddq_s16(q13s16, q14s16);
-    q9s16 = vsubq_s16(q13s16, q14s16);
-
-    d16s16 = vget_low_s16(q8s16);
-    d17s16 = vget_high_s16(q8s16);
-    d18s16 = vget_high_s16(q9s16);  // vswp d18 d19
-    d19s16 = vget_low_s16(q9s16);
-
-    d0x2s16 = vtrn_s16(d16s16, d17s16);
-    d1x2s16 = vtrn_s16(d18s16, d19s16);
-    q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]);
-    q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]);
-
-    q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16),
-                        vreinterpretq_s32_s16(q9s16));
-    d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
-    d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
-    d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
-    d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
-
-    // do the transform on columns
-    // stage 1
-    d23s16 = vadd_s16(d16s16, d18s16);
-    d24s16 = vsub_s16(d16s16, d18s16);
-
-    q15s32 = vmull_s16(d17s16, d22s16);
-    q1s32  = vmull_s16(d17s16, d20s16);
-    q13s32 = vmull_s16(d23s16, d21s16);
-    q14s32 = vmull_s16(d24s16, d21s16);
-
-    q15s32 = vmlsl_s16(q15s32, d19s16, d20s16);
-    q1s32  = vmlal_s16(q1s32,  d19s16, d22s16);
-
-    d26s16 = vqrshrn_n_s32(q13s32, 14);
-    d27s16 = vqrshrn_n_s32(q14s32, 14);
-    d29s16 = vqrshrn_n_s32(q15s32, 14);
-    d28s16 = vqrshrn_n_s32(q1s32,  14);
-    q13s16 = vcombine_s16(d26s16, d27s16);
-    q14s16 = vcombine_s16(d28s16, d29s16);
-
-    // stage 2
-    q8s16 = vaddq_s16(q13s16, q14s16);
-    q9s16 = vsubq_s16(q13s16, q14s16);
-
-    q8s16 = vrshrq_n_s16(q8s16, 4);
-    q9s16 = vrshrq_n_s16(q9s16, 4);
-
-    d = dest;
-    d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 0);
-    d += dest_stride;
-    d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 1);
-    d += dest_stride;
-    d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 1);
-    d += dest_stride;
-    d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 0);
-
-    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16),
-                     vreinterpret_u8_u32(d26u32));
-    q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16),
-                     vreinterpret_u8_u32(d27u32));
-
-    d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-    d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-
-    d = dest;
-    vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 0);
-    d += dest_stride;
-    vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 1);
-    d += dest_stride;
-    vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1);
-    d += dest_stride;
-    vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0);
-    return;
+void vpx_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+  uint8x8_t d26u8, d27u8;
+  uint32x2_t d26u32, d27u32;
+  uint16x8_t q8u16, q9u16;
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16;
+  int16x4_t d22s16, d23s16, d24s16, d26s16, d27s16, d28s16, d29s16;
+  int16x8_t q8s16, q9s16, q13s16, q14s16;
+  int32x4_t q1s32, q13s32, q14s32, q15s32;
+  int16x4x2_t d0x2s16, d1x2s16;
+  int32x4x2_t q0x2s32;
+  uint8_t *d;
+  int16_t cospi_8_64 = 15137;
+  int16_t cospi_16_64 = 11585;
+  int16_t cospi_24_64 = 6270;
+
+  d26u32 = d27u32 = vdup_n_u32(0);
+
+  q8s16 = vld1q_s16(input);
+  q9s16 = vld1q_s16(input + 8);
+
+  d16s16 = vget_low_s16(q8s16);
+  d17s16 = vget_high_s16(q8s16);
+  d18s16 = vget_low_s16(q9s16);
+  d19s16 = vget_high_s16(q9s16);
+
+  d0x2s16 = vtrn_s16(d16s16, d17s16);
+  d1x2s16 = vtrn_s16(d18s16, d19s16);
+  q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]);
+  q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]);
+
+  d20s16 = vdup_n_s16(cospi_8_64);
+  d21s16 = vdup_n_s16(cospi_16_64);
+
+  q0x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16));
+  d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
+  d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
+  d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
+  d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
+
+  d22s16 = vdup_n_s16(cospi_24_64);
+
+  // stage 1
+  d23s16 = vadd_s16(d16s16, d18s16);
+  d24s16 = vsub_s16(d16s16, d18s16);
+
+  q15s32 = vmull_s16(d17s16, d22s16);
+  q1s32 = vmull_s16(d17s16, d20s16);
+  q13s32 = vmull_s16(d23s16, d21s16);
+  q14s32 = vmull_s16(d24s16, d21s16);
+
+  q15s32 = vmlsl_s16(q15s32, d19s16, d20s16);
+  q1s32 = vmlal_s16(q1s32, d19s16, d22s16);
+
+  d26s16 = vqrshrn_n_s32(q13s32, 14);
+  d27s16 = vqrshrn_n_s32(q14s32, 14);
+  d29s16 = vqrshrn_n_s32(q15s32, 14);
+  d28s16 = vqrshrn_n_s32(q1s32, 14);
+  q13s16 = vcombine_s16(d26s16, d27s16);
+  q14s16 = vcombine_s16(d28s16, d29s16);
+
+  // stage 2
+  q8s16 = vaddq_s16(q13s16, q14s16);
+  q9s16 = vsubq_s16(q13s16, q14s16);
+
+  d16s16 = vget_low_s16(q8s16);
+  d17s16 = vget_high_s16(q8s16);
+  d18s16 = vget_high_s16(q9s16);  // vswp d18 d19
+  d19s16 = vget_low_s16(q9s16);
+
+  d0x2s16 = vtrn_s16(d16s16, d17s16);
+  d1x2s16 = vtrn_s16(d18s16, d19s16);
+  q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]);
+  q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]);
+
+  q0x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16));
+  d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
+  d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
+  d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
+  d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
+
+  // do the transform on columns
+  // stage 1
+  d23s16 = vadd_s16(d16s16, d18s16);
+  d24s16 = vsub_s16(d16s16, d18s16);
+
+  q15s32 = vmull_s16(d17s16, d22s16);
+  q1s32 = vmull_s16(d17s16, d20s16);
+  q13s32 = vmull_s16(d23s16, d21s16);
+  q14s32 = vmull_s16(d24s16, d21s16);
+
+  q15s32 = vmlsl_s16(q15s32, d19s16, d20s16);
+  q1s32 = vmlal_s16(q1s32, d19s16, d22s16);
+
+  d26s16 = vqrshrn_n_s32(q13s32, 14);
+  d27s16 = vqrshrn_n_s32(q14s32, 14);
+  d29s16 = vqrshrn_n_s32(q15s32, 14);
+  d28s16 = vqrshrn_n_s32(q1s32, 14);
+  q13s16 = vcombine_s16(d26s16, d27s16);
+  q14s16 = vcombine_s16(d28s16, d29s16);
+
+  // stage 2
+  q8s16 = vaddq_s16(q13s16, q14s16);
+  q9s16 = vsubq_s16(q13s16, q14s16);
+
+  q8s16 = vrshrq_n_s16(q8s16, 4);
+  q9s16 = vrshrq_n_s16(q9s16, 4);
+
+  d = dest;
+  d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 0);
+  d += dest_stride;
+  d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 1);
+  d += dest_stride;
+  d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 1);
+  d += dest_stride;
+  d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 0);
+
+  q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32));
+  q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32));
+
+  d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
+  d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+
+  d = dest;
+  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 0);
+  d += dest_stride;
+  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 1);
+  d += dest_stride;
+  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1);
+  d += dest_stride;
+  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0);
+  return;
 }
diff --git a/vpx_dsp/arm/idct8x8_1_add_neon.c b/vpx_dsp/arm/idct8x8_1_add_neon.c
index c1b801fad54390af8544f994de6bc9a2f98cae94..e3db0b876bc2cb5cf3fe2db7ee70ee99a555e639 100644
--- a/vpx_dsp/arm/idct8x8_1_add_neon.c
+++ b/vpx_dsp/arm/idct8x8_1_add_neon.c
@@ -13,52 +13,49 @@
 #include "vpx_dsp/inv_txfm.h"
 #include "vpx_ports/mem.h"
 
-void vpx_idct8x8_1_add_neon(
-        int16_t *input,
-        uint8_t *dest,
-        int dest_stride) {
-    uint8x8_t d2u8, d3u8, d30u8, d31u8;
-    uint64x1_t d2u64, d3u64, d4u64, d5u64;
-    uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
-    int16x8_t q0s16;
-    uint8_t *d1, *d2;
-    int16_t i, a1, cospi_16_64 = 11585;
-    int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
-    out = dct_const_round_shift(out * cospi_16_64);
-    a1 = ROUND_POWER_OF_TWO(out, 5);
-
-    q0s16 = vdupq_n_s16(a1);
-    q0u16 = vreinterpretq_u16_s16(q0s16);
-
-    d1 = d2 = dest;
-    for (i = 0; i < 2; i++) {
-        d2u64 = vld1_u64((const uint64_t *)d1);
-        d1 += dest_stride;
-        d3u64 = vld1_u64((const uint64_t *)d1);
-        d1 += dest_stride;
-        d4u64 = vld1_u64((const uint64_t *)d1);
-        d1 += dest_stride;
-        d5u64 = vld1_u64((const uint64_t *)d1);
-        d1 += dest_stride;
-
-        q9u16  = vaddw_u8(q0u16, vreinterpret_u8_u64(d2u64));
-        q10u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d3u64));
-        q11u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d4u64));
-        q12u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d5u64));
-
-        d2u8  = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-        d3u8  = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
-        d30u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
-        d31u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
-
-        vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
-        d2 += dest_stride;
-        vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
-        d2 += dest_stride;
-        vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d30u8));
-        d2 += dest_stride;
-        vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d31u8));
-        d2 += dest_stride;
-    }
-    return;
+void vpx_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+  uint8x8_t d2u8, d3u8, d30u8, d31u8;
+  uint64x1_t d2u64, d3u64, d4u64, d5u64;
+  uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
+  int16x8_t q0s16;
+  uint8_t *d1, *d2;
+  int16_t i, a1, cospi_16_64 = 11585;
+  int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+  out = dct_const_round_shift(out * cospi_16_64);
+  a1 = ROUND_POWER_OF_TWO(out, 5);
+
+  q0s16 = vdupq_n_s16(a1);
+  q0u16 = vreinterpretq_u16_s16(q0s16);
+
+  d1 = d2 = dest;
+  for (i = 0; i < 2; i++) {
+    d2u64 = vld1_u64((const uint64_t *)d1);
+    d1 += dest_stride;
+    d3u64 = vld1_u64((const uint64_t *)d1);
+    d1 += dest_stride;
+    d4u64 = vld1_u64((const uint64_t *)d1);
+    d1 += dest_stride;
+    d5u64 = vld1_u64((const uint64_t *)d1);
+    d1 += dest_stride;
+
+    q9u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d2u64));
+    q10u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d3u64));
+    q11u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d4u64));
+    q12u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d5u64));
+
+    d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+    d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
+    d30u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
+    d31u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16));
+
+    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
+    d2 += dest_stride;
+    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
+    d2 += dest_stride;
+    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d30u8));
+    d2 += dest_stride;
+    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d31u8));
+    d2 += dest_stride;
+  }
+  return;
 }
diff --git a/vpx_dsp/arm/idct8x8_add_neon.c b/vpx_dsp/arm/idct8x8_add_neon.c
index 4b2c2a6f83c0ce731605827984aa658dd662ada4..f1c271110dea9fedc801d36e2e416676c5d81c53 100644
--- a/vpx_dsp/arm/idct8x8_add_neon.c
+++ b/vpx_dsp/arm/idct8x8_add_neon.c
@@ -13,528 +13,496 @@
 #include "./vpx_config.h"
 #include "vpx_dsp/txfm_common.h"
 
-static INLINE void TRANSPOSE8X8(
-        int16x8_t *q8s16,
-        int16x8_t *q9s16,
-        int16x8_t *q10s16,
-        int16x8_t *q11s16,
-        int16x8_t *q12s16,
-        int16x8_t *q13s16,
-        int16x8_t *q14s16,
-        int16x8_t *q15s16) {
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-    int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32;
-    int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16;
-
-    d16s16 = vget_low_s16(*q8s16);
-    d17s16 = vget_high_s16(*q8s16);
-    d18s16 = vget_low_s16(*q9s16);
-    d19s16 = vget_high_s16(*q9s16);
-    d20s16 = vget_low_s16(*q10s16);
-    d21s16 = vget_high_s16(*q10s16);
-    d22s16 = vget_low_s16(*q11s16);
-    d23s16 = vget_high_s16(*q11s16);
-    d24s16 = vget_low_s16(*q12s16);
-    d25s16 = vget_high_s16(*q12s16);
-    d26s16 = vget_low_s16(*q13s16);
-    d27s16 = vget_high_s16(*q13s16);
-    d28s16 = vget_low_s16(*q14s16);
-    d29s16 = vget_high_s16(*q14s16);
-    d30s16 = vget_low_s16(*q15s16);
-    d31s16 = vget_high_s16(*q15s16);
-
-    *q8s16  = vcombine_s16(d16s16, d24s16);  // vswp d17, d24
-    *q9s16  = vcombine_s16(d18s16, d26s16);  // vswp d19, d26
-    *q10s16 = vcombine_s16(d20s16, d28s16);  // vswp d21, d28
-    *q11s16 = vcombine_s16(d22s16, d30s16);  // vswp d23, d30
-    *q12s16 = vcombine_s16(d17s16, d25s16);
-    *q13s16 = vcombine_s16(d19s16, d27s16);
-    *q14s16 = vcombine_s16(d21s16, d29s16);
-    *q15s16 = vcombine_s16(d23s16, d31s16);
-
-    q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q8s16),
-                        vreinterpretq_s32_s16(*q10s16));
-    q1x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q9s16),
-                        vreinterpretq_s32_s16(*q11s16));
-    q2x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q12s16),
-                        vreinterpretq_s32_s16(*q14s16));
-    q3x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q13s16),
-                        vreinterpretq_s32_s16(*q15s16));
-
-    q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]),   // q8
-                        vreinterpretq_s16_s32(q1x2s32.val[0]));  // q9
-    q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]),   // q10
-                        vreinterpretq_s16_s32(q1x2s32.val[1]));  // q11
-    q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]),   // q12
-                        vreinterpretq_s16_s32(q3x2s32.val[0]));  // q13
-    q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]),   // q14
-                        vreinterpretq_s16_s32(q3x2s32.val[1]));  // q15
-
-    *q8s16  = q0x2s16.val[0];
-    *q9s16  = q0x2s16.val[1];
-    *q10s16 = q1x2s16.val[0];
-    *q11s16 = q1x2s16.val[1];
-    *q12s16 = q2x2s16.val[0];
-    *q13s16 = q2x2s16.val[1];
-    *q14s16 = q3x2s16.val[0];
-    *q15s16 = q3x2s16.val[1];
-    return;
+static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16,
+                                int16x8_t *q10s16, int16x8_t *q11s16,
+                                int16x8_t *q12s16, int16x8_t *q13s16,
+                                int16x8_t *q14s16, int16x8_t *q15s16) {
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
+  int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32;
+  int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16;
+
+  d16s16 = vget_low_s16(*q8s16);
+  d17s16 = vget_high_s16(*q8s16);
+  d18s16 = vget_low_s16(*q9s16);
+  d19s16 = vget_high_s16(*q9s16);
+  d20s16 = vget_low_s16(*q10s16);
+  d21s16 = vget_high_s16(*q10s16);
+  d22s16 = vget_low_s16(*q11s16);
+  d23s16 = vget_high_s16(*q11s16);
+  d24s16 = vget_low_s16(*q12s16);
+  d25s16 = vget_high_s16(*q12s16);
+  d26s16 = vget_low_s16(*q13s16);
+  d27s16 = vget_high_s16(*q13s16);
+  d28s16 = vget_low_s16(*q14s16);
+  d29s16 = vget_high_s16(*q14s16);
+  d30s16 = vget_low_s16(*q15s16);
+  d31s16 = vget_high_s16(*q15s16);
+
+  *q8s16 = vcombine_s16(d16s16, d24s16);   // vswp d17, d24
+  *q9s16 = vcombine_s16(d18s16, d26s16);   // vswp d19, d26
+  *q10s16 = vcombine_s16(d20s16, d28s16);  // vswp d21, d28
+  *q11s16 = vcombine_s16(d22s16, d30s16);  // vswp d23, d30
+  *q12s16 = vcombine_s16(d17s16, d25s16);
+  *q13s16 = vcombine_s16(d19s16, d27s16);
+  *q14s16 = vcombine_s16(d21s16, d29s16);
+  *q15s16 = vcombine_s16(d23s16, d31s16);
+
+  q0x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q8s16), vreinterpretq_s32_s16(*q10s16));
+  q1x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q9s16), vreinterpretq_s32_s16(*q11s16));
+  q2x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q12s16), vreinterpretq_s32_s16(*q14s16));
+  q3x2s32 =
+      vtrnq_s32(vreinterpretq_s32_s16(*q13s16), vreinterpretq_s32_s16(*q15s16));
+
+  q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]),   // q8
+                      vreinterpretq_s16_s32(q1x2s32.val[0]));  // q9
+  q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]),   // q10
+                      vreinterpretq_s16_s32(q1x2s32.val[1]));  // q11
+  q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]),   // q12
+                      vreinterpretq_s16_s32(q3x2s32.val[0]));  // q13
+  q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]),   // q14
+                      vreinterpretq_s16_s32(q3x2s32.val[1]));  // q15
+
+  *q8s16 = q0x2s16.val[0];
+  *q9s16 = q0x2s16.val[1];
+  *q10s16 = q1x2s16.val[0];
+  *q11s16 = q1x2s16.val[1];
+  *q12s16 = q2x2s16.val[0];
+  *q13s16 = q2x2s16.val[1];
+  *q14s16 = q3x2s16.val[0];
+  *q15s16 = q3x2s16.val[1];
+  return;
 }
 
-static INLINE void IDCT8x8_1D(
-        int16x8_t *q8s16,
-        int16x8_t *q9s16,
-        int16x8_t *q10s16,
-        int16x8_t *q11s16,
-        int16x8_t *q12s16,
-        int16x8_t *q13s16,
-        int16x8_t *q14s16,
-        int16x8_t *q15s16) {
-    int16x4_t d0s16, d1s16, d2s16, d3s16;
-    int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
-    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
-    int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-    int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
-    int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32;
-    int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32;
-
-    d0s16 = vdup_n_s16(cospi_28_64);
-    d1s16 = vdup_n_s16(cospi_4_64);
-    d2s16 = vdup_n_s16(cospi_12_64);
-    d3s16 = vdup_n_s16(cospi_20_64);
-
-    d16s16 = vget_low_s16(*q8s16);
-    d17s16 = vget_high_s16(*q8s16);
-    d18s16 = vget_low_s16(*q9s16);
-    d19s16 = vget_high_s16(*q9s16);
-    d20s16 = vget_low_s16(*q10s16);
-    d21s16 = vget_high_s16(*q10s16);
-    d22s16 = vget_low_s16(*q11s16);
-    d23s16 = vget_high_s16(*q11s16);
-    d24s16 = vget_low_s16(*q12s16);
-    d25s16 = vget_high_s16(*q12s16);
-    d26s16 = vget_low_s16(*q13s16);
-    d27s16 = vget_high_s16(*q13s16);
-    d28s16 = vget_low_s16(*q14s16);
-    d29s16 = vget_high_s16(*q14s16);
-    d30s16 = vget_low_s16(*q15s16);
-    d31s16 = vget_high_s16(*q15s16);
-
-    q2s32 = vmull_s16(d18s16, d0s16);
-    q3s32 = vmull_s16(d19s16, d0s16);
-    q5s32 = vmull_s16(d26s16, d2s16);
-    q6s32 = vmull_s16(d27s16, d2s16);
-
-    q2s32 = vmlsl_s16(q2s32, d30s16, d1s16);
-    q3s32 = vmlsl_s16(q3s32, d31s16, d1s16);
-    q5s32 = vmlsl_s16(q5s32, d22s16, d3s16);
-    q6s32 = vmlsl_s16(q6s32, d23s16, d3s16);
-
-    d8s16 = vqrshrn_n_s32(q2s32, 14);
-    d9s16 = vqrshrn_n_s32(q3s32, 14);
-    d10s16 = vqrshrn_n_s32(q5s32, 14);
-    d11s16 = vqrshrn_n_s32(q6s32, 14);
-    q4s16 = vcombine_s16(d8s16, d9s16);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-
-    q2s32 = vmull_s16(d18s16, d1s16);
-    q3s32 = vmull_s16(d19s16, d1s16);
-    q9s32 = vmull_s16(d26s16, d3s16);
-    q13s32 = vmull_s16(d27s16, d3s16);
-
-    q2s32 = vmlal_s16(q2s32, d30s16, d0s16);
-    q3s32 = vmlal_s16(q3s32, d31s16, d0s16);
-    q9s32 = vmlal_s16(q9s32, d22s16, d2s16);
-    q13s32 = vmlal_s16(q13s32, d23s16, d2s16);
-
-    d14s16 = vqrshrn_n_s32(q2s32, 14);
-    d15s16 = vqrshrn_n_s32(q3s32, 14);
-    d12s16 = vqrshrn_n_s32(q9s32, 14);
-    d13s16 = vqrshrn_n_s32(q13s32, 14);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-    q7s16 = vcombine_s16(d14s16, d15s16);
-
-    d0s16 = vdup_n_s16(cospi_16_64);
-
-    q2s32 = vmull_s16(d16s16, d0s16);
-    q3s32 = vmull_s16(d17s16, d0s16);
-    q13s32 = vmull_s16(d16s16, d0s16);
-    q15s32 = vmull_s16(d17s16, d0s16);
-
-    q2s32 = vmlal_s16(q2s32, d24s16, d0s16);
-    q3s32 = vmlal_s16(q3s32, d25s16, d0s16);
-    q13s32 = vmlsl_s16(q13s32, d24s16, d0s16);
-    q15s32 = vmlsl_s16(q15s32, d25s16, d0s16);
-
-    d0s16 = vdup_n_s16(cospi_24_64);
-    d1s16 = vdup_n_s16(cospi_8_64);
-
-    d18s16 = vqrshrn_n_s32(q2s32, 14);
-    d19s16 = vqrshrn_n_s32(q3s32, 14);
-    d22s16 = vqrshrn_n_s32(q13s32, 14);
-    d23s16 = vqrshrn_n_s32(q15s32, 14);
-    *q9s16 = vcombine_s16(d18s16, d19s16);
-    *q11s16 = vcombine_s16(d22s16, d23s16);
-
-    q2s32 = vmull_s16(d20s16, d0s16);
-    q3s32 = vmull_s16(d21s16, d0s16);
-    q8s32 = vmull_s16(d20s16, d1s16);
-    q12s32 = vmull_s16(d21s16, d1s16);
-
-    q2s32 = vmlsl_s16(q2s32, d28s16, d1s16);
-    q3s32 = vmlsl_s16(q3s32, d29s16, d1s16);
-    q8s32 = vmlal_s16(q8s32, d28s16, d0s16);
-    q12s32 = vmlal_s16(q12s32, d29s16, d0s16);
-
-    d26s16 = vqrshrn_n_s32(q2s32, 14);
-    d27s16 = vqrshrn_n_s32(q3s32, 14);
-    d30s16 = vqrshrn_n_s32(q8s32, 14);
-    d31s16 = vqrshrn_n_s32(q12s32, 14);
-    *q13s16 = vcombine_s16(d26s16, d27s16);
-    *q15s16 = vcombine_s16(d30s16, d31s16);
-
-    q0s16 = vaddq_s16(*q9s16, *q15s16);
-    q1s16 = vaddq_s16(*q11s16, *q13s16);
-    q2s16 = vsubq_s16(*q11s16, *q13s16);
-    q3s16 = vsubq_s16(*q9s16, *q15s16);
-
-    *q13s16 = vsubq_s16(q4s16, q5s16);
-    q4s16 = vaddq_s16(q4s16, q5s16);
-    *q14s16 = vsubq_s16(q7s16, q6s16);
-    q7s16 = vaddq_s16(q7s16, q6s16);
-    d26s16 = vget_low_s16(*q13s16);
-    d27s16 = vget_high_s16(*q13s16);
-    d28s16 = vget_low_s16(*q14s16);
-    d29s16 = vget_high_s16(*q14s16);
-
-    d16s16 = vdup_n_s16(cospi_16_64);
-
-    q9s32 = vmull_s16(d28s16, d16s16);
-    q10s32 = vmull_s16(d29s16, d16s16);
-    q11s32 = vmull_s16(d28s16, d16s16);
-    q12s32 = vmull_s16(d29s16, d16s16);
-
-    q9s32 = vmlsl_s16(q9s32,  d26s16, d16s16);
-    q10s32 = vmlsl_s16(q10s32, d27s16, d16s16);
-    q11s32 = vmlal_s16(q11s32, d26s16, d16s16);
-    q12s32 = vmlal_s16(q12s32, d27s16, d16s16);
-
-    d10s16 = vqrshrn_n_s32(q9s32, 14);
-    d11s16 = vqrshrn_n_s32(q10s32, 14);
-    d12s16 = vqrshrn_n_s32(q11s32, 14);
-    d13s16 = vqrshrn_n_s32(q12s32, 14);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    *q8s16 = vaddq_s16(q0s16, q7s16);
-    *q9s16 = vaddq_s16(q1s16, q6s16);
-    *q10s16 = vaddq_s16(q2s16, q5s16);
-    *q11s16 = vaddq_s16(q3s16, q4s16);
-    *q12s16 = vsubq_s16(q3s16, q4s16);
-    *q13s16 = vsubq_s16(q2s16, q5s16);
-    *q14s16 = vsubq_s16(q1s16, q6s16);
-    *q15s16 = vsubq_s16(q0s16, q7s16);
-    return;
+static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
+                              int16x8_t *q10s16, int16x8_t *q11s16,
+                              int16x8_t *q12s16, int16x8_t *q13s16,
+                              int16x8_t *q14s16, int16x8_t *q15s16) {
+  int16x4_t d0s16, d1s16, d2s16, d3s16;
+  int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
+  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
+  int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
+  int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
+  int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32;
+  int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32;
+
+  d0s16 = vdup_n_s16(cospi_28_64);
+  d1s16 = vdup_n_s16(cospi_4_64);
+  d2s16 = vdup_n_s16(cospi_12_64);
+  d3s16 = vdup_n_s16(cospi_20_64);
+
+  d16s16 = vget_low_s16(*q8s16);
+  d17s16 = vget_high_s16(*q8s16);
+  d18s16 = vget_low_s16(*q9s16);
+  d19s16 = vget_high_s16(*q9s16);
+  d20s16 = vget_low_s16(*q10s16);
+  d21s16 = vget_high_s16(*q10s16);
+  d22s16 = vget_low_s16(*q11s16);
+  d23s16 = vget_high_s16(*q11s16);
+  d24s16 = vget_low_s16(*q12s16);
+  d25s16 = vget_high_s16(*q12s16);
+  d26s16 = vget_low_s16(*q13s16);
+  d27s16 = vget_high_s16(*q13s16);
+  d28s16 = vget_low_s16(*q14s16);
+  d29s16 = vget_high_s16(*q14s16);
+  d30s16 = vget_low_s16(*q15s16);
+  d31s16 = vget_high_s16(*q15s16);
+
+  q2s32 = vmull_s16(d18s16, d0s16);
+  q3s32 = vmull_s16(d19s16, d0s16);
+  q5s32 = vmull_s16(d26s16, d2s16);
+  q6s32 = vmull_s16(d27s16, d2s16);
+
+  q2s32 = vmlsl_s16(q2s32, d30s16, d1s16);
+  q3s32 = vmlsl_s16(q3s32, d31s16, d1s16);
+  q5s32 = vmlsl_s16(q5s32, d22s16, d3s16);
+  q6s32 = vmlsl_s16(q6s32, d23s16, d3s16);
+
+  d8s16 = vqrshrn_n_s32(q2s32, 14);
+  d9s16 = vqrshrn_n_s32(q3s32, 14);
+  d10s16 = vqrshrn_n_s32(q5s32, 14);
+  d11s16 = vqrshrn_n_s32(q6s32, 14);
+  q4s16 = vcombine_s16(d8s16, d9s16);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+
+  q2s32 = vmull_s16(d18s16, d1s16);
+  q3s32 = vmull_s16(d19s16, d1s16);
+  q9s32 = vmull_s16(d26s16, d3s16);
+  q13s32 = vmull_s16(d27s16, d3s16);
+
+  q2s32 = vmlal_s16(q2s32, d30s16, d0s16);
+  q3s32 = vmlal_s16(q3s32, d31s16, d0s16);
+  q9s32 = vmlal_s16(q9s32, d22s16, d2s16);
+  q13s32 = vmlal_s16(q13s32, d23s16, d2s16);
+
+  d14s16 = vqrshrn_n_s32(q2s32, 14);
+  d15s16 = vqrshrn_n_s32(q3s32, 14);
+  d12s16 = vqrshrn_n_s32(q9s32, 14);
+  d13s16 = vqrshrn_n_s32(q13s32, 14);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+  q7s16 = vcombine_s16(d14s16, d15s16);
+
+  d0s16 = vdup_n_s16(cospi_16_64);
+
+  q2s32 = vmull_s16(d16s16, d0s16);
+  q3s32 = vmull_s16(d17s16, d0s16);
+  q13s32 = vmull_s16(d16s16, d0s16);
+  q15s32 = vmull_s16(d17s16, d0s16);
+
+  q2s32 = vmlal_s16(q2s32, d24s16, d0s16);
+  q3s32 = vmlal_s16(q3s32, d25s16, d0s16);
+  q13s32 = vmlsl_s16(q13s32, d24s16, d0s16);
+  q15s32 = vmlsl_s16(q15s32, d25s16, d0s16);
+
+  d0s16 = vdup_n_s16(cospi_24_64);
+  d1s16 = vdup_n_s16(cospi_8_64);
+
+  d18s16 = vqrshrn_n_s32(q2s32, 14);
+  d19s16 = vqrshrn_n_s32(q3s32, 14);
+  d22s16 = vqrshrn_n_s32(q13s32, 14);
+  d23s16 = vqrshrn_n_s32(q15s32, 14);
+  *q9s16 = vcombine_s16(d18s16, d19s16);
+  *q11s16 = vcombine_s16(d22s16, d23s16);
+
+  q2s32 = vmull_s16(d20s16, d0s16);
+  q3s32 = vmull_s16(d21s16, d0s16);
+  q8s32 = vmull_s16(d20s16, d1s16);
+  q12s32 = vmull_s16(d21s16, d1s16);
+
+  q2s32 = vmlsl_s16(q2s32, d28s16, d1s16);
+  q3s32 = vmlsl_s16(q3s32, d29s16, d1s16);
+  q8s32 = vmlal_s16(q8s32, d28s16, d0s16);
+  q12s32 = vmlal_s16(q12s32, d29s16, d0s16);
+
+  d26s16 = vqrshrn_n_s32(q2s32, 14);
+  d27s16 = vqrshrn_n_s32(q3s32, 14);
+  d30s16 = vqrshrn_n_s32(q8s32, 14);
+  d31s16 = vqrshrn_n_s32(q12s32, 14);
+  *q13s16 = vcombine_s16(d26s16, d27s16);
+  *q15s16 = vcombine_s16(d30s16, d31s16);
+
+  q0s16 = vaddq_s16(*q9s16, *q15s16);
+  q1s16 = vaddq_s16(*q11s16, *q13s16);
+  q2s16 = vsubq_s16(*q11s16, *q13s16);
+  q3s16 = vsubq_s16(*q9s16, *q15s16);
+
+  *q13s16 = vsubq_s16(q4s16, q5s16);
+  q4s16 = vaddq_s16(q4s16, q5s16);
+  *q14s16 = vsubq_s16(q7s16, q6s16);
+  q7s16 = vaddq_s16(q7s16, q6s16);
+  d26s16 = vget_low_s16(*q13s16);
+  d27s16 = vget_high_s16(*q13s16);
+  d28s16 = vget_low_s16(*q14s16);
+  d29s16 = vget_high_s16(*q14s16);
+
+  d16s16 = vdup_n_s16(cospi_16_64);
+
+  q9s32 = vmull_s16(d28s16, d16s16);
+  q10s32 = vmull_s16(d29s16, d16s16);
+  q11s32 = vmull_s16(d28s16, d16s16);
+  q12s32 = vmull_s16(d29s16, d16s16);
+
+  q9s32 = vmlsl_s16(q9s32, d26s16, d16s16);
+  q10s32 = vmlsl_s16(q10s32, d27s16, d16s16);
+  q11s32 = vmlal_s16(q11s32, d26s16, d16s16);
+  q12s32 = vmlal_s16(q12s32, d27s16, d16s16);
+
+  d10s16 = vqrshrn_n_s32(q9s32, 14);
+  d11s16 = vqrshrn_n_s32(q10s32, 14);
+  d12s16 = vqrshrn_n_s32(q11s32, 14);
+  d13s16 = vqrshrn_n_s32(q12s32, 14);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  *q8s16 = vaddq_s16(q0s16, q7s16);
+  *q9s16 = vaddq_s16(q1s16, q6s16);
+  *q10s16 = vaddq_s16(q2s16, q5s16);
+  *q11s16 = vaddq_s16(q3s16, q4s16);
+  *q12s16 = vsubq_s16(q3s16, q4s16);
+  *q13s16 = vsubq_s16(q2s16, q5s16);
+  *q14s16 = vsubq_s16(q1s16, q6s16);
+  *q15s16 = vsubq_s16(q0s16, q7s16);
+  return;
 }
 
-void vpx_idct8x8_64_add_neon(
-        int16_t *input,
-        uint8_t *dest,
-        int dest_stride) {
-    uint8_t *d1, *d2;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8;
-    uint64x1_t d0u64, d1u64, d2u64, d3u64;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    uint16x8_t q8u16, q9u16, q10u16, q11u16;
-
-    q8s16 = vld1q_s16(input);
-    q9s16 = vld1q_s16(input + 8);
-    q10s16 = vld1q_s16(input + 16);
-    q11s16 = vld1q_s16(input + 24);
-    q12s16 = vld1q_s16(input + 32);
-    q13s16 = vld1q_s16(input + 40);
-    q14s16 = vld1q_s16(input + 48);
-    q15s16 = vld1q_s16(input + 56);
-
-    TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                 &q12s16, &q13s16, &q14s16, &q15s16);
-
-    IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16,
-               &q12s16, &q13s16, &q14s16, &q15s16);
-
-    TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                 &q12s16, &q13s16, &q14s16, &q15s16);
-
-    IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16,
-               &q12s16, &q13s16, &q14s16, &q15s16);
-
-    q8s16 = vrshrq_n_s16(q8s16, 5);
-    q9s16 = vrshrq_n_s16(q9s16, 5);
-    q10s16 = vrshrq_n_s16(q10s16, 5);
-    q11s16 = vrshrq_n_s16(q11s16, 5);
-    q12s16 = vrshrq_n_s16(q12s16, 5);
-    q13s16 = vrshrq_n_s16(q13s16, 5);
-    q14s16 = vrshrq_n_s16(q14s16, 5);
-    q15s16 = vrshrq_n_s16(q15s16, 5);
-
-    d1 = d2 = dest;
-
-    d0u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d1u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d2u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d3u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-
-    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16),
-                     vreinterpret_u8_u64(d0u64));
-    q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16),
-                     vreinterpret_u8_u64(d1u64));
-    q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16),
-                      vreinterpret_u8_u64(d2u64));
-    q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16),
-                      vreinterpret_u8_u64(d3u64));
-
-    d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-    d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-    d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
-    d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
-
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
-    d2 += dest_stride;
-
-    q8s16 = q12s16;
-    q9s16 = q13s16;
-    q10s16 = q14s16;
-    q11s16 = q15s16;
-
-    d0u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d1u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d2u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d3u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-
-    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16),
-                     vreinterpret_u8_u64(d0u64));
-    q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16),
-                     vreinterpret_u8_u64(d1u64));
-    q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16),
-                      vreinterpret_u8_u64(d2u64));
-    q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16),
-                      vreinterpret_u8_u64(d3u64));
-
-    d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-    d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-    d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
-    d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
-
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
-    d2 += dest_stride;
-    return;
+void vpx_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+  uint8_t *d1, *d2;
+  uint8x8_t d0u8, d1u8, d2u8, d3u8;
+  uint64x1_t d0u64, d1u64, d2u64, d3u64;
+  int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+  uint16x8_t q8u16, q9u16, q10u16, q11u16;
+
+  q8s16 = vld1q_s16(input);
+  q9s16 = vld1q_s16(input + 8);
+  q10s16 = vld1q_s16(input + 16);
+  q11s16 = vld1q_s16(input + 24);
+  q12s16 = vld1q_s16(input + 32);
+  q13s16 = vld1q_s16(input + 40);
+  q14s16 = vld1q_s16(input + 48);
+  q15s16 = vld1q_s16(input + 56);
+
+  TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+               &q15s16);
+
+  IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+             &q15s16);
+
+  TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+               &q15s16);
+
+  IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+             &q15s16);
+
+  q8s16 = vrshrq_n_s16(q8s16, 5);
+  q9s16 = vrshrq_n_s16(q9s16, 5);
+  q10s16 = vrshrq_n_s16(q10s16, 5);
+  q11s16 = vrshrq_n_s16(q11s16, 5);
+  q12s16 = vrshrq_n_s16(q12s16, 5);
+  q13s16 = vrshrq_n_s16(q13s16, 5);
+  q14s16 = vrshrq_n_s16(q14s16, 5);
+  q15s16 = vrshrq_n_s16(q15s16, 5);
+
+  d1 = d2 = dest;
+
+  d0u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d1u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d2u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d3u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+
+  q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64));
+  q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64));
+  q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64));
+  q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64));
+
+  d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
+  d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+  d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
+  d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
+
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
+  d2 += dest_stride;
+
+  q8s16 = q12s16;
+  q9s16 = q13s16;
+  q10s16 = q14s16;
+  q11s16 = q15s16;
+
+  d0u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d1u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d2u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d3u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+
+  q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64));
+  q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64));
+  q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64));
+  q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64));
+
+  d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
+  d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+  d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
+  d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
+
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
+  d2 += dest_stride;
+  return;
 }
 
-void vpx_idct8x8_12_add_neon(
-        int16_t *input,
-        uint8_t *dest,
-        int dest_stride) {
-    uint8_t *d1, *d2;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8;
-    int16x4_t d10s16, d11s16, d12s16, d13s16, d16s16;
-    int16x4_t d26s16, d27s16, d28s16, d29s16;
-    uint64x1_t d0u64, d1u64, d2u64, d3u64;
-    int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    uint16x8_t q8u16, q9u16, q10u16, q11u16;
-    int32x4_t q9s32, q10s32, q11s32, q12s32;
-
-    q8s16 = vld1q_s16(input);
-    q9s16 = vld1q_s16(input + 8);
-    q10s16 = vld1q_s16(input + 16);
-    q11s16 = vld1q_s16(input + 24);
-    q12s16 = vld1q_s16(input + 32);
-    q13s16 = vld1q_s16(input + 40);
-    q14s16 = vld1q_s16(input + 48);
-    q15s16 = vld1q_s16(input + 56);
-
-    TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                 &q12s16, &q13s16, &q14s16, &q15s16);
-
-    // First transform rows
-    // stage 1
-    q0s16 = vdupq_n_s16(cospi_28_64 * 2);
-    q1s16 = vdupq_n_s16(cospi_4_64 * 2);
-
-    q4s16 = vqrdmulhq_s16(q9s16, q0s16);
-
-    q0s16 = vdupq_n_s16(-cospi_20_64 * 2);
-
-    q7s16 = vqrdmulhq_s16(q9s16, q1s16);
-
-    q1s16 = vdupq_n_s16(cospi_12_64 * 2);
-
-    q5s16 = vqrdmulhq_s16(q11s16, q0s16);
-
-    q0s16 = vdupq_n_s16(cospi_16_64 * 2);
-
-    q6s16 = vqrdmulhq_s16(q11s16, q1s16);
-
-    // stage 2 & stage 3 - even half
-    q1s16 = vdupq_n_s16(cospi_24_64 * 2);
-
-    q9s16 = vqrdmulhq_s16(q8s16, q0s16);
-
-    q0s16 = vdupq_n_s16(cospi_8_64 * 2);
-
-    q13s16 = vqrdmulhq_s16(q10s16, q1s16);
-
-    q15s16 = vqrdmulhq_s16(q10s16, q0s16);
-
-    // stage 3 -odd half
-    q0s16 = vaddq_s16(q9s16, q15s16);
-    q1s16 = vaddq_s16(q9s16, q13s16);
-    q2s16 = vsubq_s16(q9s16, q13s16);
-    q3s16 = vsubq_s16(q9s16, q15s16);
-
-    // stage 2 - odd half
-    q13s16 = vsubq_s16(q4s16, q5s16);
-    q4s16 = vaddq_s16(q4s16, q5s16);
-    q14s16 = vsubq_s16(q7s16, q6s16);
-    q7s16 = vaddq_s16(q7s16, q6s16);
-    d26s16 = vget_low_s16(q13s16);
-    d27s16 = vget_high_s16(q13s16);
-    d28s16 = vget_low_s16(q14s16);
-    d29s16 = vget_high_s16(q14s16);
-
-    d16s16 = vdup_n_s16(cospi_16_64);
-    q9s32 = vmull_s16(d28s16, d16s16);
-    q10s32 = vmull_s16(d29s16, d16s16);
-    q11s32 = vmull_s16(d28s16, d16s16);
-    q12s32 = vmull_s16(d29s16, d16s16);
-
-    q9s32 = vmlsl_s16(q9s32,  d26s16, d16s16);
-    q10s32 = vmlsl_s16(q10s32, d27s16, d16s16);
-    q11s32 = vmlal_s16(q11s32, d26s16, d16s16);
-    q12s32 = vmlal_s16(q12s32, d27s16, d16s16);
-
-    d10s16 = vqrshrn_n_s32(q9s32, 14);
-    d11s16 = vqrshrn_n_s32(q10s32, 14);
-    d12s16 = vqrshrn_n_s32(q11s32, 14);
-    d13s16 = vqrshrn_n_s32(q12s32, 14);
-    q5s16 = vcombine_s16(d10s16, d11s16);
-    q6s16 = vcombine_s16(d12s16, d13s16);
-
-    // stage 4
-    q8s16 = vaddq_s16(q0s16, q7s16);
-    q9s16 = vaddq_s16(q1s16, q6s16);
-    q10s16 = vaddq_s16(q2s16, q5s16);
-    q11s16 = vaddq_s16(q3s16, q4s16);
-    q12s16 = vsubq_s16(q3s16, q4s16);
-    q13s16 = vsubq_s16(q2s16, q5s16);
-    q14s16 = vsubq_s16(q1s16, q6s16);
-    q15s16 = vsubq_s16(q0s16, q7s16);
-
-    TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16,
-                 &q12s16, &q13s16, &q14s16, &q15s16);
-
-    IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16,
-               &q12s16, &q13s16, &q14s16, &q15s16);
-
-    q8s16 = vrshrq_n_s16(q8s16, 5);
-    q9s16 = vrshrq_n_s16(q9s16, 5);
-    q10s16 = vrshrq_n_s16(q10s16, 5);
-    q11s16 = vrshrq_n_s16(q11s16, 5);
-    q12s16 = vrshrq_n_s16(q12s16, 5);
-    q13s16 = vrshrq_n_s16(q13s16, 5);
-    q14s16 = vrshrq_n_s16(q14s16, 5);
-    q15s16 = vrshrq_n_s16(q15s16, 5);
-
-    d1 = d2 = dest;
-
-    d0u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d1u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d2u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d3u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-
-    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16),
-                     vreinterpret_u8_u64(d0u64));
-    q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16),
-                     vreinterpret_u8_u64(d1u64));
-    q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16),
-                      vreinterpret_u8_u64(d2u64));
-    q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16),
-                      vreinterpret_u8_u64(d3u64));
-
-    d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-    d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-    d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
-    d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
-
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
-    d2 += dest_stride;
-
-    q8s16 = q12s16;
-    q9s16 = q13s16;
-    q10s16 = q14s16;
-    q11s16 = q15s16;
-
-    d0u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d1u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d2u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-    d3u64 = vld1_u64((uint64_t *)d1);
-    d1 += dest_stride;
-
-    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16),
-                     vreinterpret_u8_u64(d0u64));
-    q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16),
-                     vreinterpret_u8_u64(d1u64));
-    q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16),
-                      vreinterpret_u8_u64(d2u64));
-    q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16),
-                      vreinterpret_u8_u64(d3u64));
-
-    d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-    d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-    d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
-    d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
-
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
-    d2 += dest_stride;
-    vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
-    d2 += dest_stride;
-    return;
+void vpx_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
+  uint8_t *d1, *d2;
+  uint8x8_t d0u8, d1u8, d2u8, d3u8;
+  int16x4_t d10s16, d11s16, d12s16, d13s16, d16s16;
+  int16x4_t d26s16, d27s16, d28s16, d29s16;
+  uint64x1_t d0u64, d1u64, d2u64, d3u64;
+  int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
+  int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+  uint16x8_t q8u16, q9u16, q10u16, q11u16;
+  int32x4_t q9s32, q10s32, q11s32, q12s32;
+
+  q8s16 = vld1q_s16(input);
+  q9s16 = vld1q_s16(input + 8);
+  q10s16 = vld1q_s16(input + 16);
+  q11s16 = vld1q_s16(input + 24);
+  q12s16 = vld1q_s16(input + 32);
+  q13s16 = vld1q_s16(input + 40);
+  q14s16 = vld1q_s16(input + 48);
+  q15s16 = vld1q_s16(input + 56);
+
+  TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+               &q15s16);
+
+  // First transform rows
+  // stage 1
+  q0s16 = vdupq_n_s16(cospi_28_64 * 2);
+  q1s16 = vdupq_n_s16(cospi_4_64 * 2);
+
+  q4s16 = vqrdmulhq_s16(q9s16, q0s16);
+
+  q0s16 = vdupq_n_s16(-cospi_20_64 * 2);
+
+  q7s16 = vqrdmulhq_s16(q9s16, q1s16);
+
+  q1s16 = vdupq_n_s16(cospi_12_64 * 2);
+
+  q5s16 = vqrdmulhq_s16(q11s16, q0s16);
+
+  q0s16 = vdupq_n_s16(cospi_16_64 * 2);
+
+  q6s16 = vqrdmulhq_s16(q11s16, q1s16);
+
+  // stage 2 & stage 3 - even half
+  q1s16 = vdupq_n_s16(cospi_24_64 * 2);
+
+  q9s16 = vqrdmulhq_s16(q8s16, q0s16);
+
+  q0s16 = vdupq_n_s16(cospi_8_64 * 2);
+
+  q13s16 = vqrdmulhq_s16(q10s16, q1s16);
+
+  q15s16 = vqrdmulhq_s16(q10s16, q0s16);
+
+  // stage 3 -odd half
+  q0s16 = vaddq_s16(q9s16, q15s16);
+  q1s16 = vaddq_s16(q9s16, q13s16);
+  q2s16 = vsubq_s16(q9s16, q13s16);
+  q3s16 = vsubq_s16(q9s16, q15s16);
+
+  // stage 2 - odd half
+  q13s16 = vsubq_s16(q4s16, q5s16);
+  q4s16 = vaddq_s16(q4s16, q5s16);
+  q14s16 = vsubq_s16(q7s16, q6s16);
+  q7s16 = vaddq_s16(q7s16, q6s16);
+  d26s16 = vget_low_s16(q13s16);
+  d27s16 = vget_high_s16(q13s16);
+  d28s16 = vget_low_s16(q14s16);
+  d29s16 = vget_high_s16(q14s16);
+
+  d16s16 = vdup_n_s16(cospi_16_64);
+  q9s32 = vmull_s16(d28s16, d16s16);
+  q10s32 = vmull_s16(d29s16, d16s16);
+  q11s32 = vmull_s16(d28s16, d16s16);
+  q12s32 = vmull_s16(d29s16, d16s16);
+
+  q9s32 = vmlsl_s16(q9s32, d26s16, d16s16);
+  q10s32 = vmlsl_s16(q10s32, d27s16, d16s16);
+  q11s32 = vmlal_s16(q11s32, d26s16, d16s16);
+  q12s32 = vmlal_s16(q12s32, d27s16, d16s16);
+
+  d10s16 = vqrshrn_n_s32(q9s32, 14);
+  d11s16 = vqrshrn_n_s32(q10s32, 14);
+  d12s16 = vqrshrn_n_s32(q11s32, 14);
+  d13s16 = vqrshrn_n_s32(q12s32, 14);
+  q5s16 = vcombine_s16(d10s16, d11s16);
+  q6s16 = vcombine_s16(d12s16, d13s16);
+
+  // stage 4
+  q8s16 = vaddq_s16(q0s16, q7s16);
+  q9s16 = vaddq_s16(q1s16, q6s16);
+  q10s16 = vaddq_s16(q2s16, q5s16);
+  q11s16 = vaddq_s16(q3s16, q4s16);
+  q12s16 = vsubq_s16(q3s16, q4s16);
+  q13s16 = vsubq_s16(q2s16, q5s16);
+  q14s16 = vsubq_s16(q1s16, q6s16);
+  q15s16 = vsubq_s16(q0s16, q7s16);
+
+  TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+               &q15s16);
+
+  IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
+             &q15s16);
+
+  q8s16 = vrshrq_n_s16(q8s16, 5);
+  q9s16 = vrshrq_n_s16(q9s16, 5);
+  q10s16 = vrshrq_n_s16(q10s16, 5);
+  q11s16 = vrshrq_n_s16(q11s16, 5);
+  q12s16 = vrshrq_n_s16(q12s16, 5);
+  q13s16 = vrshrq_n_s16(q13s16, 5);
+  q14s16 = vrshrq_n_s16(q14s16, 5);
+  q15s16 = vrshrq_n_s16(q15s16, 5);
+
+  d1 = d2 = dest;
+
+  d0u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d1u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d2u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d3u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+
+  q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64));
+  q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64));
+  q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64));
+  q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64));
+
+  d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
+  d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+  d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
+  d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
+
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
+  d2 += dest_stride;
+
+  q8s16 = q12s16;
+  q9s16 = q13s16;
+  q10s16 = q14s16;
+  q11s16 = q15s16;
+
+  d0u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d1u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d2u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+  d3u64 = vld1_u64((uint64_t *)d1);
+  d1 += dest_stride;
+
+  q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64));
+  q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64));
+  q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64));
+  q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64));
+
+  d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
+  d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
+  d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
+  d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
+
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
+  d2 += dest_stride;
+  vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
+  d2 += dest_stride;
+  return;
 }
diff --git a/vpx_dsp/arm/intrapred_neon.c b/vpx_dsp/arm/intrapred_neon.c
index 0a376104d2bbc1f0d57c3e6062f95f4f91b53361..32dd1ba14606f25efabf12195ff882f147a31556 100644
--- a/vpx_dsp/arm/intrapred_neon.c
+++ b/vpx_dsp/arm/intrapred_neon.c
@@ -18,9 +18,8 @@
 // DC 4x4
 
 // 'do_above' and 'do_left' facilitate branch removal when inlined.
-static INLINE void dc_4x4(uint8_t *dst, ptrdiff_t stride,
-                          const uint8_t *above, const uint8_t *left,
-                          int do_above, int do_left) {
+static INLINE void dc_4x4(uint8_t *dst, ptrdiff_t stride, const uint8_t *above,
+                          const uint8_t *left, int do_above, int do_left) {
   uint16x8_t sum_top;
   uint16x8_t sum_left;
   uint8x8_t dc0;
@@ -33,7 +32,7 @@ static INLINE void dc_4x4(uint8_t *dst, ptrdiff_t stride,
   }
 
   if (do_left) {
-    const uint8x8_t L = vld1_u8(left);  // left border
+    const uint8x8_t L = vld1_u8(left);   // left border
     const uint16x4_t p0 = vpaddl_u8(L);  // cascading summation of the left
     const uint16x4_t p1 = vpadd_u16(p0, p0);
     sum_left = vcombine_u16(p1, p1);
@@ -54,7 +53,7 @@ static INLINE void dc_4x4(uint8_t *dst, ptrdiff_t stride,
     const uint8x8_t dc = vdup_lane_u8(dc0, 0);
     int i;
     for (i = 0; i < 4; ++i) {
-      vst1_lane_u32((uint32_t*)(dst + i * stride), vreinterpret_u32_u8(dc), 0);
+      vst1_lane_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc), 0);
     }
   }
 }
@@ -87,9 +86,8 @@ void vpx_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
 // DC 8x8
 
 // 'do_above' and 'do_left' facilitate branch removal when inlined.
-static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride,
-                          const uint8_t *above, const uint8_t *left,
-                          int do_above, int do_left) {
+static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride, const uint8_t *above,
+                          const uint8_t *left, int do_above, int do_left) {
   uint16x8_t sum_top;
   uint16x8_t sum_left;
   uint8x8_t dc0;
@@ -103,7 +101,7 @@ static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride,
   }
 
   if (do_left) {
-    const uint8x8_t L = vld1_u8(left);  // left border
+    const uint8x8_t L = vld1_u8(left);   // left border
     const uint16x4_t p0 = vpaddl_u8(L);  // cascading summation of the left
     const uint16x4_t p1 = vpadd_u16(p0, p0);
     const uint16x4_t p2 = vpadd_u16(p1, p1);
@@ -125,7 +123,7 @@ static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride,
     const uint8x8_t dc = vdup_lane_u8(dc0, 0);
     int i;
     for (i = 0; i < 8; ++i) {
-      vst1_u32((uint32_t*)(dst + i * stride), vreinterpret_u32_u8(dc));
+      vst1_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc));
     }
   }
 }
@@ -167,7 +165,7 @@ static INLINE void dc_16x16(uint8_t *dst, ptrdiff_t stride,
 
   if (do_above) {
     const uint8x16_t A = vld1q_u8(above);  // top row
-    const uint16x8_t p0 = vpaddlq_u8(A);  // cascading summation of the top
+    const uint16x8_t p0 = vpaddlq_u8(A);   // cascading summation of the top
     const uint16x4_t p1 = vadd_u16(vget_low_u16(p0), vget_high_u16(p0));
     const uint16x4_t p2 = vpadd_u16(p1, p1);
     const uint16x4_t p3 = vpadd_u16(p2, p2);
@@ -425,8 +423,7 @@ void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
   (void)left;
 
   d0u8 = vld1_u8(above);
-  for (i = 0; i < 8; i++, dst += stride)
-    vst1_u8(dst, d0u8);
+  for (i = 0; i < 8; i++, dst += stride) vst1_u8(dst, d0u8);
 }
 
 void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
@@ -436,8 +433,7 @@ void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
   (void)left;
 
   q0u8 = vld1q_u8(above);
-  for (i = 0; i < 16; i++, dst += stride)
-    vst1q_u8(dst, q0u8);
+  for (i = 0; i < 16; i++, dst += stride) vst1q_u8(dst, q0u8);
 }
 
 void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
@@ -608,8 +604,8 @@ void vpx_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
   q3u16 = vsubl_u8(vreinterpret_u8_u32(d2u32), d0u8);
   for (i = 0; i < 4; i++, dst += stride) {
     q1u16 = vdupq_n_u16((uint16_t)left[i]);
-    q1s16 = vaddq_s16(vreinterpretq_s16_u16(q1u16),
-                      vreinterpretq_s16_u16(q3u16));
+    q1s16 =
+        vaddq_s16(vreinterpretq_s16_u16(q1u16), vreinterpretq_s16_u16(q3u16));
     d0u8 = vqmovun_s16(q1s16);
     vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
   }
@@ -631,26 +627,26 @@ void vpx_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
   d20u16 = vget_low_u16(q10u16);
   for (j = 0; j < 2; j++, d20u16 = vget_high_u16(q10u16)) {
     q0u16 = vdupq_lane_u16(d20u16, 0);
-    q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
-                      vreinterpretq_s16_u16(q0u16));
+    q0s16 =
+        vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16));
     d0u8 = vqmovun_s16(q0s16);
     vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
     dst += stride;
     q0u16 = vdupq_lane_u16(d20u16, 1);
-    q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
-                      vreinterpretq_s16_u16(q0u16));
+    q0s16 =
+        vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16));
     d0u8 = vqmovun_s16(q0s16);
     vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
     dst += stride;
     q0u16 = vdupq_lane_u16(d20u16, 2);
-    q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
-                      vreinterpretq_s16_u16(q0u16));
+    q0s16 =
+        vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16));
     d0u8 = vqmovun_s16(q0s16);
     vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
     dst += stride;
     q0u16 = vdupq_lane_u16(d20u16, 3);
-    q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
-                      vreinterpretq_s16_u16(q0u16));
+    q0s16 =
+        vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16));
     d0u8 = vqmovun_s16(q0s16);
     vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
     dst += stride;
@@ -677,14 +673,14 @@ void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
     for (j = 0; j < 2; j++, d20u16 = vget_high_u16(q10u16)) {
       q0u16 = vdupq_lane_u16(d20u16, 0);
       q8u16 = vdupq_lane_u16(d20u16, 1);
-      q1s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                        vreinterpretq_s16_u16(q2u16));
-      q0s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                        vreinterpretq_s16_u16(q3u16));
-      q11s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16),
-                         vreinterpretq_s16_u16(q2u16));
-      q8s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16),
-                        vreinterpretq_s16_u16(q3u16));
+      q1s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q2u16));
+      q0s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q3u16));
+      q11s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q2u16));
+      q8s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q3u16));
       d2u8 = vqmovun_s16(q1s16);
       d3u8 = vqmovun_s16(q0s16);
       d22u8 = vqmovun_s16(q11s16);
@@ -698,14 +694,14 @@ void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
 
       q0u16 = vdupq_lane_u16(d20u16, 2);
       q8u16 = vdupq_lane_u16(d20u16, 3);
-      q1s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                        vreinterpretq_s16_u16(q2u16));
-      q0s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                        vreinterpretq_s16_u16(q3u16));
-      q11s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16),
-                         vreinterpretq_s16_u16(q2u16));
-      q8s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16),
-                        vreinterpretq_s16_u16(q3u16));
+      q1s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q2u16));
+      q0s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q3u16));
+      q11s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q2u16));
+      q8s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q3u16));
       d2u8 = vqmovun_s16(q1s16);
       d3u8 = vqmovun_s16(q0s16);
       d22u8 = vqmovun_s16(q11s16);
@@ -742,10 +738,10 @@ void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
     d6u16 = vget_low_u16(q3u16);
     for (j = 0; j < 2; j++, d6u16 = vget_high_u16(q3u16)) {
       q0u16 = vdupq_lane_u16(d6u16, 0);
-      q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                         vreinterpretq_s16_u16(q8u16));
-      q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                         vreinterpretq_s16_u16(q9u16));
+      q12s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16));
+      q13s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16));
       q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
                          vreinterpretq_s16_u16(q10u16));
       q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
@@ -761,10 +757,10 @@ void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
       dst += stride;
 
       q0u16 = vdupq_lane_u16(d6u16, 1);
-      q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                         vreinterpretq_s16_u16(q8u16));
-      q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                         vreinterpretq_s16_u16(q9u16));
+      q12s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16));
+      q13s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16));
       q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
                          vreinterpretq_s16_u16(q10u16));
       q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
@@ -780,10 +776,10 @@ void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
       dst += stride;
 
       q0u16 = vdupq_lane_u16(d6u16, 2);
-      q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                         vreinterpretq_s16_u16(q8u16));
-      q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                         vreinterpretq_s16_u16(q9u16));
+      q12s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16));
+      q13s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16));
       q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
                          vreinterpretq_s16_u16(q10u16));
       q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
@@ -799,10 +795,10 @@ void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
       dst += stride;
 
       q0u16 = vdupq_lane_u16(d6u16, 3);
-      q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                         vreinterpretq_s16_u16(q8u16));
-      q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
-                         vreinterpretq_s16_u16(q9u16));
+      q12s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16));
+      q13s16 =
+          vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16));
       q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
                          vreinterpretq_s16_u16(q10u16));
       q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
diff --git a/vpx_dsp/arm/loopfilter_16_neon.c b/vpx_dsp/arm/loopfilter_16_neon.c
index d24e6adc8a64a7bc72a91b5eecc19730c9288758..9607bb24056b4cc39212a5781ba812da10fbec3b 100644
--- a/vpx_dsp/arm/loopfilter_16_neon.c
+++ b/vpx_dsp/arm/loopfilter_16_neon.c
@@ -14,166 +14,160 @@
 #include "./vpx_config.h"
 #include "vpx/vpx_integer.h"
 
-static INLINE void loop_filter_neon_16(
-        uint8x16_t qblimit,  // blimit
-        uint8x16_t qlimit,   // limit
-        uint8x16_t qthresh,  // thresh
-        uint8x16_t q3,       // p3
-        uint8x16_t q4,       // p2
-        uint8x16_t q5,       // p1
-        uint8x16_t q6,       // p0
-        uint8x16_t q7,       // q0
-        uint8x16_t q8,       // q1
-        uint8x16_t q9,       // q2
-        uint8x16_t q10,      // q3
-        uint8x16_t *q5r,     // p1
-        uint8x16_t *q6r,     // p0
-        uint8x16_t *q7r,     // q0
-        uint8x16_t *q8r) {   // q1
-    uint8x16_t q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8;
-    int16x8_t q2s16, q11s16;
-    uint16x8_t q4u16;
-    int8x16_t q0s8, q1s8, q2s8, q11s8, q12s8, q13s8;
-    int8x8_t d2s8, d3s8;
-
-    q11u8 = vabdq_u8(q3, q4);
-    q12u8 = vabdq_u8(q4, q5);
-    q13u8 = vabdq_u8(q5, q6);
-    q14u8 = vabdq_u8(q8, q7);
-    q3 = vabdq_u8(q9, q8);
-    q4 = vabdq_u8(q10, q9);
-
-    q11u8 = vmaxq_u8(q11u8, q12u8);
-    q12u8 = vmaxq_u8(q13u8, q14u8);
-    q3 = vmaxq_u8(q3, q4);
-    q15u8 = vmaxq_u8(q11u8, q12u8);
-
-    q9 = vabdq_u8(q6, q7);
-
-    // vp8_hevmask
-    q13u8 = vcgtq_u8(q13u8, qthresh);
-    q14u8 = vcgtq_u8(q14u8, qthresh);
-    q15u8 = vmaxq_u8(q15u8, q3);
-
-    q2u8 = vabdq_u8(q5, q8);
-    q9 = vqaddq_u8(q9, q9);
-
-    q15u8 = vcgeq_u8(qlimit, q15u8);
-
-    // vp8_filter() function
-    // convert to signed
-    q10 = vdupq_n_u8(0x80);
-    q8 = veorq_u8(q8, q10);
-    q7 = veorq_u8(q7, q10);
-    q6 = veorq_u8(q6, q10);
-    q5 = veorq_u8(q5, q10);
-
-    q2u8 = vshrq_n_u8(q2u8, 1);
-    q9 = vqaddq_u8(q9, q2u8);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
-                     vget_low_s8(vreinterpretq_s8_u8(q6)));
-    q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
-                      vget_high_s8(vreinterpretq_s8_u8(q6)));
-
-    q9 = vcgeq_u8(qblimit, q9);
-
-    q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
-                    vreinterpretq_s8_u8(q8));
-
-    q14u8 = vorrq_u8(q13u8, q14u8);
-
-    q4u16 = vdupq_n_u16(3);
-    q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16));
-    q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16));
-
-    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8);
-    q15u8 = vandq_u8(q15u8, q9);
-
-    q1s8 = vreinterpretq_s8_u8(q1u8);
-    q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
-    q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8));
-
-    q4 = vdupq_n_u8(3);
-    q9 = vdupq_n_u8(4);
-    // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
-    d2s8 = vqmovn_s16(q2s16);
-    d3s8 = vqmovn_s16(q11s16);
-    q1s8 = vcombine_s8(d2s8, d3s8);
-    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8);
-    q1s8 = vreinterpretq_s8_u8(q1u8);
-
-    q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q4));
-    q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q1s8 = vshrq_n_s8(q1s8, 3);
-
-    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8);
-    q0s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8);
-
-    q1s8 = vrshrq_n_s8(q1s8, 1);
-    q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
-
-    q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8);
-    q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8);
-
-    *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q10);
-    *q7r = veorq_u8(vreinterpretq_u8_s8(q0s8),  q10);
-    *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q10);
-    *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q10);
-    return;
+static INLINE void loop_filter_neon_16(uint8x16_t qblimit,  // blimit
+                                       uint8x16_t qlimit,   // limit
+                                       uint8x16_t qthresh,  // thresh
+                                       uint8x16_t q3,       // p3
+                                       uint8x16_t q4,       // p2
+                                       uint8x16_t q5,       // p1
+                                       uint8x16_t q6,       // p0
+                                       uint8x16_t q7,       // q0
+                                       uint8x16_t q8,       // q1
+                                       uint8x16_t q9,       // q2
+                                       uint8x16_t q10,      // q3
+                                       uint8x16_t *q5r,     // p1
+                                       uint8x16_t *q6r,     // p0
+                                       uint8x16_t *q7r,     // q0
+                                       uint8x16_t *q8r) {   // q1
+  uint8x16_t q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8;
+  int16x8_t q2s16, q11s16;
+  uint16x8_t q4u16;
+  int8x16_t q0s8, q1s8, q2s8, q11s8, q12s8, q13s8;
+  int8x8_t d2s8, d3s8;
+
+  q11u8 = vabdq_u8(q3, q4);
+  q12u8 = vabdq_u8(q4, q5);
+  q13u8 = vabdq_u8(q5, q6);
+  q14u8 = vabdq_u8(q8, q7);
+  q3 = vabdq_u8(q9, q8);
+  q4 = vabdq_u8(q10, q9);
+
+  q11u8 = vmaxq_u8(q11u8, q12u8);
+  q12u8 = vmaxq_u8(q13u8, q14u8);
+  q3 = vmaxq_u8(q3, q4);
+  q15u8 = vmaxq_u8(q11u8, q12u8);
+
+  q9 = vabdq_u8(q6, q7);
+
+  // vp8_hevmask
+  q13u8 = vcgtq_u8(q13u8, qthresh);
+  q14u8 = vcgtq_u8(q14u8, qthresh);
+  q15u8 = vmaxq_u8(q15u8, q3);
+
+  q2u8 = vabdq_u8(q5, q8);
+  q9 = vqaddq_u8(q9, q9);
+
+  q15u8 = vcgeq_u8(qlimit, q15u8);
+
+  // vp8_filter() function
+  // convert to signed
+  q10 = vdupq_n_u8(0x80);
+  q8 = veorq_u8(q8, q10);
+  q7 = veorq_u8(q7, q10);
+  q6 = veorq_u8(q6, q10);
+  q5 = veorq_u8(q5, q10);
+
+  q2u8 = vshrq_n_u8(q2u8, 1);
+  q9 = vqaddq_u8(q9, q2u8);
+
+  q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
+                   vget_low_s8(vreinterpretq_s8_u8(q6)));
+  q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
+                    vget_high_s8(vreinterpretq_s8_u8(q6)));
+
+  q9 = vcgeq_u8(qblimit, q9);
+
+  q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8));
+
+  q14u8 = vorrq_u8(q13u8, q14u8);
+
+  q4u16 = vdupq_n_u16(3);
+  q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16));
+  q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16));
+
+  q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8);
+  q15u8 = vandq_u8(q15u8, q9);
+
+  q1s8 = vreinterpretq_s8_u8(q1u8);
+  q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
+  q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8));
+
+  q4 = vdupq_n_u8(3);
+  q9 = vdupq_n_u8(4);
+  // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
+  d2s8 = vqmovn_s16(q2s16);
+  d3s8 = vqmovn_s16(q11s16);
+  q1s8 = vcombine_s8(d2s8, d3s8);
+  q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8);
+  q1s8 = vreinterpretq_s8_u8(q1u8);
+
+  q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q4));
+  q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9));
+  q2s8 = vshrq_n_s8(q2s8, 3);
+  q1s8 = vshrq_n_s8(q1s8, 3);
+
+  q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8);
+  q0s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8);
+
+  q1s8 = vrshrq_n_s8(q1s8, 1);
+  q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
+
+  q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8);
+  q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8);
+
+  *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q10);
+  *q7r = veorq_u8(vreinterpretq_u8_s8(q0s8), q10);
+  *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q10);
+  *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q10);
+  return;
 }
 
-void vpx_lpf_horizontal_4_dual_neon(uint8_t *s, int p /* pitch */,
-                                    const uint8_t *blimit0,
-                                    const uint8_t *limit0,
-                                    const uint8_t *thresh0,
-                                    const uint8_t *blimit1,
-                                    const uint8_t *limit1,
-                                    const uint8_t *thresh1) {
-    uint8x8_t dblimit0, dlimit0, dthresh0, dblimit1, dlimit1, dthresh1;
-    uint8x16_t qblimit, qlimit, qthresh;
-    uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8;
-
-    dblimit0 = vld1_u8(blimit0);
-    dlimit0 = vld1_u8(limit0);
-    dthresh0 = vld1_u8(thresh0);
-    dblimit1 = vld1_u8(blimit1);
-    dlimit1 = vld1_u8(limit1);
-    dthresh1 = vld1_u8(thresh1);
-    qblimit = vcombine_u8(dblimit0, dblimit1);
-    qlimit = vcombine_u8(dlimit0, dlimit1);
-    qthresh = vcombine_u8(dthresh0, dthresh1);
-
-    s -= (p << 2);
-
-    q3u8 = vld1q_u8(s);
-    s += p;
-    q4u8 = vld1q_u8(s);
-    s += p;
-    q5u8 = vld1q_u8(s);
-    s += p;
-    q6u8 = vld1q_u8(s);
-    s += p;
-    q7u8 = vld1q_u8(s);
-    s += p;
-    q8u8 = vld1q_u8(s);
-    s += p;
-    q9u8 = vld1q_u8(s);
-    s += p;
-    q10u8 = vld1q_u8(s);
-
-    loop_filter_neon_16(qblimit, qlimit, qthresh,
-                        q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8,
-                        &q5u8, &q6u8, &q7u8, &q8u8);
-
-    s -= (p * 5);
-    vst1q_u8(s, q5u8);
-    s += p;
-    vst1q_u8(s, q6u8);
-    s += p;
-    vst1q_u8(s, q7u8);
-    s += p;
-    vst1q_u8(s, q8u8);
-    return;
+void vpx_lpf_horizontal_4_dual_neon(
+    uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
+    const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
+    const uint8_t *limit1, const uint8_t *thresh1) {
+  uint8x8_t dblimit0, dlimit0, dthresh0, dblimit1, dlimit1, dthresh1;
+  uint8x16_t qblimit, qlimit, qthresh;
+  uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8;
+
+  dblimit0 = vld1_u8(blimit0);
+  dlimit0 = vld1_u8(limit0);
+  dthresh0 = vld1_u8(thresh0);
+  dblimit1 = vld1_u8(blimit1);
+  dlimit1 = vld1_u8(limit1);
+  dthresh1 = vld1_u8(thresh1);
+  qblimit = vcombine_u8(dblimit0, dblimit1);
+  qlimit = vcombine_u8(dlimit0, dlimit1);
+  qthresh = vcombine_u8(dthresh0, dthresh1);
+
+  s -= (p << 2);
+
+  q3u8 = vld1q_u8(s);
+  s += p;
+  q4u8 = vld1q_u8(s);
+  s += p;
+  q5u8 = vld1q_u8(s);
+  s += p;
+  q6u8 = vld1q_u8(s);
+  s += p;
+  q7u8 = vld1q_u8(s);
+  s += p;
+  q8u8 = vld1q_u8(s);
+  s += p;
+  q9u8 = vld1q_u8(s);
+  s += p;
+  q10u8 = vld1q_u8(s);
+
+  loop_filter_neon_16(qblimit, qlimit, qthresh, q3u8, q4u8, q5u8, q6u8, q7u8,
+                      q8u8, q9u8, q10u8, &q5u8, &q6u8, &q7u8, &q8u8);
+
+  s -= (p * 5);
+  vst1q_u8(s, q5u8);
+  s += p;
+  vst1q_u8(s, q6u8);
+  s += p;
+  vst1q_u8(s, q7u8);
+  s += p;
+  vst1q_u8(s, q8u8);
+  return;
 }
diff --git a/vpx_dsp/arm/loopfilter_4_neon.c b/vpx_dsp/arm/loopfilter_4_neon.c
index 7ad411aea275b968256041d9d1f02877a9323823..b350e9ef305314cbb5f488c1928ee8e6f3d2f49e 100644
--- a/vpx_dsp/arm/loopfilter_4_neon.c
+++ b/vpx_dsp/arm/loopfilter_4_neon.c
@@ -12,263 +12,246 @@
 
 #include "./vpx_dsp_rtcd.h"
 
-static INLINE void loop_filter_neon(
-        uint8x8_t dblimit,    // flimit
-        uint8x8_t dlimit,     // limit
-        uint8x8_t dthresh,    // thresh
-        uint8x8_t d3u8,       // p3
-        uint8x8_t d4u8,       // p2
-        uint8x8_t d5u8,       // p1
-        uint8x8_t d6u8,       // p0
-        uint8x8_t d7u8,       // q0
-        uint8x8_t d16u8,      // q1
-        uint8x8_t d17u8,      // q2
-        uint8x8_t d18u8,      // q3
-        uint8x8_t *d4ru8,     // p1
-        uint8x8_t *d5ru8,     // p0
-        uint8x8_t *d6ru8,     // q0
-        uint8x8_t *d7ru8) {   // q1
-    uint8x8_t d19u8, d20u8, d21u8, d22u8, d23u8, d27u8, d28u8;
-    int16x8_t q12s16;
-    int8x8_t d19s8, d20s8, d21s8, d26s8, d27s8, d28s8;
+static INLINE void loop_filter_neon(uint8x8_t dblimit,   // flimit
+                                    uint8x8_t dlimit,    // limit
+                                    uint8x8_t dthresh,   // thresh
+                                    uint8x8_t d3u8,      // p3
+                                    uint8x8_t d4u8,      // p2
+                                    uint8x8_t d5u8,      // p1
+                                    uint8x8_t d6u8,      // p0
+                                    uint8x8_t d7u8,      // q0
+                                    uint8x8_t d16u8,     // q1
+                                    uint8x8_t d17u8,     // q2
+                                    uint8x8_t d18u8,     // q3
+                                    uint8x8_t *d4ru8,    // p1
+                                    uint8x8_t *d5ru8,    // p0
+                                    uint8x8_t *d6ru8,    // q0
+                                    uint8x8_t *d7ru8) {  // q1
+  uint8x8_t d19u8, d20u8, d21u8, d22u8, d23u8, d27u8, d28u8;
+  int16x8_t q12s16;
+  int8x8_t d19s8, d20s8, d21s8, d26s8, d27s8, d28s8;
 
-    d19u8 = vabd_u8(d3u8, d4u8);
-    d20u8 = vabd_u8(d4u8, d5u8);
-    d21u8 = vabd_u8(d5u8, d6u8);
-    d22u8 = vabd_u8(d16u8, d7u8);
-    d3u8  = vabd_u8(d17u8, d16u8);
-    d4u8  = vabd_u8(d18u8, d17u8);
+  d19u8 = vabd_u8(d3u8, d4u8);
+  d20u8 = vabd_u8(d4u8, d5u8);
+  d21u8 = vabd_u8(d5u8, d6u8);
+  d22u8 = vabd_u8(d16u8, d7u8);
+  d3u8 = vabd_u8(d17u8, d16u8);
+  d4u8 = vabd_u8(d18u8, d17u8);
 
-    d19u8 = vmax_u8(d19u8, d20u8);
-    d20u8 = vmax_u8(d21u8, d22u8);
-    d3u8  = vmax_u8(d3u8,  d4u8);
-    d23u8 = vmax_u8(d19u8, d20u8);
+  d19u8 = vmax_u8(d19u8, d20u8);
+  d20u8 = vmax_u8(d21u8, d22u8);
+  d3u8 = vmax_u8(d3u8, d4u8);
+  d23u8 = vmax_u8(d19u8, d20u8);
 
-    d17u8 = vabd_u8(d6u8, d7u8);
+  d17u8 = vabd_u8(d6u8, d7u8);
 
-    d21u8 = vcgt_u8(d21u8, dthresh);
-    d22u8 = vcgt_u8(d22u8, dthresh);
-    d23u8 = vmax_u8(d23u8, d3u8);
+  d21u8 = vcgt_u8(d21u8, dthresh);
+  d22u8 = vcgt_u8(d22u8, dthresh);
+  d23u8 = vmax_u8(d23u8, d3u8);
 
-    d28u8 = vabd_u8(d5u8, d16u8);
-    d17u8 = vqadd_u8(d17u8, d17u8);
+  d28u8 = vabd_u8(d5u8, d16u8);
+  d17u8 = vqadd_u8(d17u8, d17u8);
 
-    d23u8 = vcge_u8(dlimit, d23u8);
+  d23u8 = vcge_u8(dlimit, d23u8);
 
-    d18u8 = vdup_n_u8(0x80);
-    d5u8  = veor_u8(d5u8,  d18u8);
-    d6u8  = veor_u8(d6u8,  d18u8);
-    d7u8  = veor_u8(d7u8,  d18u8);
-    d16u8 = veor_u8(d16u8, d18u8);
+  d18u8 = vdup_n_u8(0x80);
+  d5u8 = veor_u8(d5u8, d18u8);
+  d6u8 = veor_u8(d6u8, d18u8);
+  d7u8 = veor_u8(d7u8, d18u8);
+  d16u8 = veor_u8(d16u8, d18u8);
 
-    d28u8 = vshr_n_u8(d28u8, 1);
-    d17u8 = vqadd_u8(d17u8, d28u8);
+  d28u8 = vshr_n_u8(d28u8, 1);
+  d17u8 = vqadd_u8(d17u8, d28u8);
 
-    d19u8 = vdup_n_u8(3);
+  d19u8 = vdup_n_u8(3);
 
-    d28s8 = vsub_s8(vreinterpret_s8_u8(d7u8),
-                    vreinterpret_s8_u8(d6u8));
+  d28s8 = vsub_s8(vreinterpret_s8_u8(d7u8), vreinterpret_s8_u8(d6u8));
 
-    d17u8 = vcge_u8(dblimit, d17u8);
+  d17u8 = vcge_u8(dblimit, d17u8);
 
-    d27s8 = vqsub_s8(vreinterpret_s8_u8(d5u8),
-                     vreinterpret_s8_u8(d16u8));
+  d27s8 = vqsub_s8(vreinterpret_s8_u8(d5u8), vreinterpret_s8_u8(d16u8));
 
-    d22u8 = vorr_u8(d21u8, d22u8);
+  d22u8 = vorr_u8(d21u8, d22u8);
 
-    q12s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d19u8));
+  q12s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d19u8));
 
-    d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d22u8);
-    d23u8 = vand_u8(d23u8, d17u8);
+  d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d22u8);
+  d23u8 = vand_u8(d23u8, d17u8);
 
-    q12s16 = vaddw_s8(q12s16, vreinterpret_s8_u8(d27u8));
+  q12s16 = vaddw_s8(q12s16, vreinterpret_s8_u8(d27u8));
 
-    d17u8 = vdup_n_u8(4);
+  d17u8 = vdup_n_u8(4);
 
-    d27s8 = vqmovn_s16(q12s16);
-    d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d23u8);
-    d27s8 = vreinterpret_s8_u8(d27u8);
+  d27s8 = vqmovn_s16(q12s16);
+  d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d23u8);
+  d27s8 = vreinterpret_s8_u8(d27u8);
 
-    d28s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d19u8));
-    d27s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d17u8));
-    d28s8 = vshr_n_s8(d28s8, 3);
-    d27s8 = vshr_n_s8(d27s8, 3);
+  d28s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d19u8));
+  d27s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d17u8));
+  d28s8 = vshr_n_s8(d28s8, 3);
+  d27s8 = vshr_n_s8(d27s8, 3);
 
-    d19s8 = vqadd_s8(vreinterpret_s8_u8(d6u8), d28s8);
-    d26s8 = vqsub_s8(vreinterpret_s8_u8(d7u8), d27s8);
+  d19s8 = vqadd_s8(vreinterpret_s8_u8(d6u8), d28s8);
+  d26s8 = vqsub_s8(vreinterpret_s8_u8(d7u8), d27s8);
 
-    d27s8 = vrshr_n_s8(d27s8, 1);
-    d27s8 = vbic_s8(d27s8, vreinterpret_s8_u8(d22u8));
+  d27s8 = vrshr_n_s8(d27s8, 1);
+  d27s8 = vbic_s8(d27s8, vreinterpret_s8_u8(d22u8));
 
-    d21s8 = vqadd_s8(vreinterpret_s8_u8(d5u8), d27s8);
-    d20s8 = vqsub_s8(vreinterpret_s8_u8(d16u8), d27s8);
+  d21s8 = vqadd_s8(vreinterpret_s8_u8(d5u8), d27s8);
+  d20s8 = vqsub_s8(vreinterpret_s8_u8(d16u8), d27s8);
 
-    *d4ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d18u8);
-    *d5ru8 = veor_u8(vreinterpret_u8_s8(d19s8), d18u8);
-    *d6ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d18u8);
-    *d7ru8 = veor_u8(vreinterpret_u8_s8(d20s8), d18u8);
-    return;
+  *d4ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d18u8);
+  *d5ru8 = veor_u8(vreinterpret_u8_s8(d19s8), d18u8);
+  *d6ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d18u8);
+  *d7ru8 = veor_u8(vreinterpret_u8_s8(d20s8), d18u8);
+  return;
 }
 
-void vpx_lpf_horizontal_4_neon(
-        uint8_t *src,
-        int pitch,
-        const uint8_t *blimit,
-        const uint8_t *limit,
-        const uint8_t *thresh,
-        int count) {
-    int i;
-    uint8_t *s, *psrc;
-    uint8x8_t dblimit, dlimit, dthresh;
-    uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
-
-    if (count == 0)  // end_vpx_lf_h_edge
-        return;
-
-    dblimit = vld1_u8(blimit);
-    dlimit = vld1_u8(limit);
-    dthresh = vld1_u8(thresh);
-
-    psrc = src - (pitch << 2);
-    for (i = 0; i < count; i++) {
-        s = psrc + i * 8;
-
-        d3u8 = vld1_u8(s);
-        s += pitch;
-        d4u8 = vld1_u8(s);
-        s += pitch;
-        d5u8 = vld1_u8(s);
-        s += pitch;
-        d6u8 = vld1_u8(s);
-        s += pitch;
-        d7u8 = vld1_u8(s);
-        s += pitch;
-        d16u8 = vld1_u8(s);
-        s += pitch;
-        d17u8 = vld1_u8(s);
-        s += pitch;
-        d18u8 = vld1_u8(s);
-
-        loop_filter_neon(dblimit, dlimit, dthresh,
-                         d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
-                         &d4u8, &d5u8, &d6u8, &d7u8);
-
-        s -= (pitch * 5);
-        vst1_u8(s, d4u8);
-        s += pitch;
-        vst1_u8(s, d5u8);
-        s += pitch;
-        vst1_u8(s, d6u8);
-        s += pitch;
-        vst1_u8(s, d7u8);
-    }
+void vpx_lpf_horizontal_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+                               const uint8_t *limit, const uint8_t *thresh,
+                               int count) {
+  int i;
+  uint8_t *s, *psrc;
+  uint8x8_t dblimit, dlimit, dthresh;
+  uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
+
+  if (count == 0)  // end_vpx_lf_h_edge
     return;
+
+  dblimit = vld1_u8(blimit);
+  dlimit = vld1_u8(limit);
+  dthresh = vld1_u8(thresh);
+
+  psrc = src - (pitch << 2);
+  for (i = 0; i < count; i++) {
+    s = psrc + i * 8;
+
+    d3u8 = vld1_u8(s);
+    s += pitch;
+    d4u8 = vld1_u8(s);
+    s += pitch;
+    d5u8 = vld1_u8(s);
+    s += pitch;
+    d6u8 = vld1_u8(s);
+    s += pitch;
+    d7u8 = vld1_u8(s);
+    s += pitch;
+    d16u8 = vld1_u8(s);
+    s += pitch;
+    d17u8 = vld1_u8(s);
+    s += pitch;
+    d18u8 = vld1_u8(s);
+
+    loop_filter_neon(dblimit, dlimit, dthresh, d3u8, d4u8, d5u8, d6u8, d7u8,
+                     d16u8, d17u8, d18u8, &d4u8, &d5u8, &d6u8, &d7u8);
+
+    s -= (pitch * 5);
+    vst1_u8(s, d4u8);
+    s += pitch;
+    vst1_u8(s, d5u8);
+    s += pitch;
+    vst1_u8(s, d6u8);
+    s += pitch;
+    vst1_u8(s, d7u8);
+  }
+  return;
 }
 
-void vpx_lpf_vertical_4_neon(
-        uint8_t *src,
-        int pitch,
-        const uint8_t *blimit,
-        const uint8_t *limit,
-        const uint8_t *thresh,
-        int count) {
-    int i, pitch8;
-    uint8_t *s;
-    uint8x8_t dblimit, dlimit, dthresh;
-    uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
-    uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
-    uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
-    uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
-    uint8x8x4_t d4Result;
-
-    if (count == 0)  // end_vpx_lf_h_edge
-        return;
-
-    dblimit = vld1_u8(blimit);
-    dlimit = vld1_u8(limit);
-    dthresh = vld1_u8(thresh);
-
-    pitch8 = pitch * 8;
-    for (i = 0; i < count; i++, src += pitch8) {
-        s = src - (i + 1) * 4;
-
-        d3u8 = vld1_u8(s);
-        s += pitch;
-        d4u8 = vld1_u8(s);
-        s += pitch;
-        d5u8 = vld1_u8(s);
-        s += pitch;
-        d6u8 = vld1_u8(s);
-        s += pitch;
-        d7u8 = vld1_u8(s);
-        s += pitch;
-        d16u8 = vld1_u8(s);
-        s += pitch;
-        d17u8 = vld1_u8(s);
-        s += pitch;
-        d18u8 = vld1_u8(s);
-
-        d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8),
-                      vreinterpret_u32_u8(d7u8));
-        d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8),
-                      vreinterpret_u32_u8(d16u8));
-        d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8),
-                      vreinterpret_u32_u8(d17u8));
-        d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8),
-                      vreinterpret_u32_u8(d18u8));
-
-        d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
-                          vreinterpret_u16_u32(d2tmp2.val[0]));
-        d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
-                          vreinterpret_u16_u32(d2tmp3.val[0]));
-        d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
-                          vreinterpret_u16_u32(d2tmp2.val[1]));
-        d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
-                          vreinterpret_u16_u32(d2tmp3.val[1]));
-
-        d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
-                         vreinterpret_u8_u16(d2tmp5.val[0]));
-        d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
-                         vreinterpret_u8_u16(d2tmp5.val[1]));
-        d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
-                          vreinterpret_u8_u16(d2tmp7.val[0]));
-        d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
-                          vreinterpret_u8_u16(d2tmp7.val[1]));
-
-        d3u8 = d2tmp8.val[0];
-        d4u8 = d2tmp8.val[1];
-        d5u8 = d2tmp9.val[0];
-        d6u8 = d2tmp9.val[1];
-        d7u8 = d2tmp10.val[0];
-        d16u8 = d2tmp10.val[1];
-        d17u8 = d2tmp11.val[0];
-        d18u8 = d2tmp11.val[1];
-
-        loop_filter_neon(dblimit, dlimit, dthresh,
-                         d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
-                         &d4u8, &d5u8, &d6u8, &d7u8);
-
-        d4Result.val[0] = d4u8;
-        d4Result.val[1] = d5u8;
-        d4Result.val[2] = d6u8;
-        d4Result.val[3] = d7u8;
-
-        src -= 2;
-        vst4_lane_u8(src, d4Result, 0);
-        src += pitch;
-        vst4_lane_u8(src, d4Result, 1);
-        src += pitch;
-        vst4_lane_u8(src, d4Result, 2);
-        src += pitch;
-        vst4_lane_u8(src, d4Result, 3);
-        src += pitch;
-        vst4_lane_u8(src, d4Result, 4);
-        src += pitch;
-        vst4_lane_u8(src, d4Result, 5);
-        src += pitch;
-        vst4_lane_u8(src, d4Result, 6);
-        src += pitch;
-        vst4_lane_u8(src, d4Result, 7);
-    }
+void vpx_lpf_vertical_4_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+                             const uint8_t *limit, const uint8_t *thresh,
+                             int count) {
+  int i, pitch8;
+  uint8_t *s;
+  uint8x8_t dblimit, dlimit, dthresh;
+  uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
+  uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
+  uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
+  uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
+  uint8x8x4_t d4Result;
+
+  if (count == 0)  // end_vpx_lf_h_edge
     return;
+
+  dblimit = vld1_u8(blimit);
+  dlimit = vld1_u8(limit);
+  dthresh = vld1_u8(thresh);
+
+  pitch8 = pitch * 8;
+  for (i = 0; i < count; i++, src += pitch8) {
+    s = src - (i + 1) * 4;
+
+    d3u8 = vld1_u8(s);
+    s += pitch;
+    d4u8 = vld1_u8(s);
+    s += pitch;
+    d5u8 = vld1_u8(s);
+    s += pitch;
+    d6u8 = vld1_u8(s);
+    s += pitch;
+    d7u8 = vld1_u8(s);
+    s += pitch;
+    d16u8 = vld1_u8(s);
+    s += pitch;
+    d17u8 = vld1_u8(s);
+    s += pitch;
+    d18u8 = vld1_u8(s);
+
+    d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8), vreinterpret_u32_u8(d7u8));
+    d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8), vreinterpret_u32_u8(d16u8));
+    d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8), vreinterpret_u32_u8(d17u8));
+    d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8), vreinterpret_u32_u8(d18u8));
+
+    d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
+                      vreinterpret_u16_u32(d2tmp2.val[0]));
+    d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
+                      vreinterpret_u16_u32(d2tmp3.val[0]));
+    d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
+                      vreinterpret_u16_u32(d2tmp2.val[1]));
+    d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
+                      vreinterpret_u16_u32(d2tmp3.val[1]));
+
+    d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
+                     vreinterpret_u8_u16(d2tmp5.val[0]));
+    d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
+                     vreinterpret_u8_u16(d2tmp5.val[1]));
+    d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
+                      vreinterpret_u8_u16(d2tmp7.val[0]));
+    d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
+                      vreinterpret_u8_u16(d2tmp7.val[1]));
+
+    d3u8 = d2tmp8.val[0];
+    d4u8 = d2tmp8.val[1];
+    d5u8 = d2tmp9.val[0];
+    d6u8 = d2tmp9.val[1];
+    d7u8 = d2tmp10.val[0];
+    d16u8 = d2tmp10.val[1];
+    d17u8 = d2tmp11.val[0];
+    d18u8 = d2tmp11.val[1];
+
+    loop_filter_neon(dblimit, dlimit, dthresh, d3u8, d4u8, d5u8, d6u8, d7u8,
+                     d16u8, d17u8, d18u8, &d4u8, &d5u8, &d6u8, &d7u8);
+
+    d4Result.val[0] = d4u8;
+    d4Result.val[1] = d5u8;
+    d4Result.val[2] = d6u8;
+    d4Result.val[3] = d7u8;
+
+    src -= 2;
+    vst4_lane_u8(src, d4Result, 0);
+    src += pitch;
+    vst4_lane_u8(src, d4Result, 1);
+    src += pitch;
+    vst4_lane_u8(src, d4Result, 2);
+    src += pitch;
+    vst4_lane_u8(src, d4Result, 3);
+    src += pitch;
+    vst4_lane_u8(src, d4Result, 4);
+    src += pitch;
+    vst4_lane_u8(src, d4Result, 5);
+    src += pitch;
+    vst4_lane_u8(src, d4Result, 6);
+    src += pitch;
+    vst4_lane_u8(src, d4Result, 7);
+  }
+  return;
 }
diff --git a/vpx_dsp/arm/loopfilter_8_neon.c b/vpx_dsp/arm/loopfilter_8_neon.c
index a887e2ee5486a9a3a8d1bc84f6333e04f79e68b8..f26d02734fe5bee477f55eeb9ccee5c676f716aa 100644
--- a/vpx_dsp/arm/loopfilter_8_neon.c
+++ b/vpx_dsp/arm/loopfilter_8_neon.c
@@ -12,442 +12,425 @@
 
 #include "./vpx_dsp_rtcd.h"
 
-static INLINE void mbloop_filter_neon(
-        uint8x8_t dblimit,   // mblimit
-        uint8x8_t dlimit,    // limit
-        uint8x8_t dthresh,   // thresh
-        uint8x8_t d3u8,      // p2
-        uint8x8_t d4u8,      // p2
-        uint8x8_t d5u8,      // p1
-        uint8x8_t d6u8,      // p0
-        uint8x8_t d7u8,      // q0
-        uint8x8_t d16u8,     // q1
-        uint8x8_t d17u8,     // q2
-        uint8x8_t d18u8,     // q3
-        uint8x8_t *d0ru8,    // p1
-        uint8x8_t *d1ru8,    // p1
-        uint8x8_t *d2ru8,    // p0
-        uint8x8_t *d3ru8,    // q0
-        uint8x8_t *d4ru8,    // q1
-        uint8x8_t *d5ru8) {  // q1
-    uint32_t flat;
-    uint8x8_t d0u8, d1u8, d2u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8;
-    uint8x8_t d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
-    int16x8_t q15s16;
-    uint16x8_t q10u16, q14u16;
-    int8x8_t d21s8, d24s8, d25s8, d26s8, d28s8, d29s8, d30s8;
+static INLINE void mbloop_filter_neon(uint8x8_t dblimit,   // mblimit
+                                      uint8x8_t dlimit,    // limit
+                                      uint8x8_t dthresh,   // thresh
+                                      uint8x8_t d3u8,      // p2
+                                      uint8x8_t d4u8,      // p2
+                                      uint8x8_t d5u8,      // p1
+                                      uint8x8_t d6u8,      // p0
+                                      uint8x8_t d7u8,      // q0
+                                      uint8x8_t d16u8,     // q1
+                                      uint8x8_t d17u8,     // q2
+                                      uint8x8_t d18u8,     // q3
+                                      uint8x8_t *d0ru8,    // p1
+                                      uint8x8_t *d1ru8,    // p1
+                                      uint8x8_t *d2ru8,    // p0
+                                      uint8x8_t *d3ru8,    // q0
+                                      uint8x8_t *d4ru8,    // q1
+                                      uint8x8_t *d5ru8) {  // q1
+  uint32_t flat;
+  uint8x8_t d0u8, d1u8, d2u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8;
+  uint8x8_t d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
+  int16x8_t q15s16;
+  uint16x8_t q10u16, q14u16;
+  int8x8_t d21s8, d24s8, d25s8, d26s8, d28s8, d29s8, d30s8;
 
-    d19u8 = vabd_u8(d3u8, d4u8);
-    d20u8 = vabd_u8(d4u8, d5u8);
-    d21u8 = vabd_u8(d5u8, d6u8);
-    d22u8 = vabd_u8(d16u8, d7u8);
-    d23u8 = vabd_u8(d17u8, d16u8);
-    d24u8 = vabd_u8(d18u8, d17u8);
+  d19u8 = vabd_u8(d3u8, d4u8);
+  d20u8 = vabd_u8(d4u8, d5u8);
+  d21u8 = vabd_u8(d5u8, d6u8);
+  d22u8 = vabd_u8(d16u8, d7u8);
+  d23u8 = vabd_u8(d17u8, d16u8);
+  d24u8 = vabd_u8(d18u8, d17u8);
 
-    d19u8 = vmax_u8(d19u8, d20u8);
-    d20u8 = vmax_u8(d21u8, d22u8);
+  d19u8 = vmax_u8(d19u8, d20u8);
+  d20u8 = vmax_u8(d21u8, d22u8);
 
-    d25u8 = vabd_u8(d6u8, d4u8);
+  d25u8 = vabd_u8(d6u8, d4u8);
 
-    d23u8 = vmax_u8(d23u8, d24u8);
+  d23u8 = vmax_u8(d23u8, d24u8);
 
-    d26u8 = vabd_u8(d7u8, d17u8);
+  d26u8 = vabd_u8(d7u8, d17u8);
 
-    d19u8 = vmax_u8(d19u8, d20u8);
+  d19u8 = vmax_u8(d19u8, d20u8);
 
-    d24u8 = vabd_u8(d6u8, d7u8);
-    d27u8 = vabd_u8(d3u8, d6u8);
-    d28u8 = vabd_u8(d18u8, d7u8);
+  d24u8 = vabd_u8(d6u8, d7u8);
+  d27u8 = vabd_u8(d3u8, d6u8);
+  d28u8 = vabd_u8(d18u8, d7u8);
 
-    d19u8 = vmax_u8(d19u8, d23u8);
+  d19u8 = vmax_u8(d19u8, d23u8);
 
-    d23u8 = vabd_u8(d5u8, d16u8);
-    d24u8 = vqadd_u8(d24u8, d24u8);
+  d23u8 = vabd_u8(d5u8, d16u8);
+  d24u8 = vqadd_u8(d24u8, d24u8);
 
+  d19u8 = vcge_u8(dlimit, d19u8);
 
-    d19u8 = vcge_u8(dlimit, d19u8);
+  d25u8 = vmax_u8(d25u8, d26u8);
+  d26u8 = vmax_u8(d27u8, d28u8);
 
+  d23u8 = vshr_n_u8(d23u8, 1);
 
-    d25u8 = vmax_u8(d25u8, d26u8);
-    d26u8 = vmax_u8(d27u8, d28u8);
+  d25u8 = vmax_u8(d25u8, d26u8);
 
-    d23u8 = vshr_n_u8(d23u8, 1);
+  d24u8 = vqadd_u8(d24u8, d23u8);
 
-    d25u8 = vmax_u8(d25u8, d26u8);
+  d20u8 = vmax_u8(d20u8, d25u8);
 
-    d24u8 = vqadd_u8(d24u8, d23u8);
+  d23u8 = vdup_n_u8(1);
+  d24u8 = vcge_u8(dblimit, d24u8);
 
-    d20u8 = vmax_u8(d20u8, d25u8);
+  d21u8 = vcgt_u8(d21u8, dthresh);
 
-    d23u8 = vdup_n_u8(1);
-    d24u8 = vcge_u8(dblimit, d24u8);
+  d20u8 = vcge_u8(d23u8, d20u8);
 
-    d21u8 = vcgt_u8(d21u8, dthresh);
+  d19u8 = vand_u8(d19u8, d24u8);
 
-    d20u8 = vcge_u8(d23u8, d20u8);
+  d23u8 = vcgt_u8(d22u8, dthresh);
 
-    d19u8 = vand_u8(d19u8, d24u8);
+  d20u8 = vand_u8(d20u8, d19u8);
 
-    d23u8 = vcgt_u8(d22u8, dthresh);
+  d22u8 = vdup_n_u8(0x80);
 
-    d20u8 = vand_u8(d20u8, d19u8);
+  d23u8 = vorr_u8(d21u8, d23u8);
 
-    d22u8 = vdup_n_u8(0x80);
+  q10u16 = vcombine_u16(vreinterpret_u16_u8(d20u8), vreinterpret_u16_u8(d21u8));
 
-    d23u8 = vorr_u8(d21u8, d23u8);
+  d30u8 = vshrn_n_u16(q10u16, 4);
+  flat = vget_lane_u32(vreinterpret_u32_u8(d30u8), 0);
 
-    q10u16 = vcombine_u16(vreinterpret_u16_u8(d20u8),
-                          vreinterpret_u16_u8(d21u8));
+  if (flat == 0xffffffff) {  // Check for all 1's, power_branch_only
+    d27u8 = vdup_n_u8(3);
+    d21u8 = vdup_n_u8(2);
+    q14u16 = vaddl_u8(d6u8, d7u8);
+    q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
+    q14u16 = vmlal_u8(q14u16, d4u8, d21u8);
+    q14u16 = vaddw_u8(q14u16, d5u8);
+    *d0ru8 = vqrshrn_n_u16(q14u16, 3);
 
-    d30u8 = vshrn_n_u16(q10u16, 4);
-    flat = vget_lane_u32(vreinterpret_u32_u8(d30u8), 0);
+    q14u16 = vsubw_u8(q14u16, d3u8);
+    q14u16 = vsubw_u8(q14u16, d4u8);
+    q14u16 = vaddw_u8(q14u16, d5u8);
+    q14u16 = vaddw_u8(q14u16, d16u8);
+    *d1ru8 = vqrshrn_n_u16(q14u16, 3);
 
-    if (flat == 0xffffffff) {  // Check for all 1's, power_branch_only
-        d27u8 = vdup_n_u8(3);
-        d21u8 = vdup_n_u8(2);
-        q14u16 = vaddl_u8(d6u8, d7u8);
-        q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
-        q14u16 = vmlal_u8(q14u16, d4u8, d21u8);
-        q14u16 = vaddw_u8(q14u16, d5u8);
-        *d0ru8 = vqrshrn_n_u16(q14u16, 3);
+    q14u16 = vsubw_u8(q14u16, d3u8);
+    q14u16 = vsubw_u8(q14u16, d5u8);
+    q14u16 = vaddw_u8(q14u16, d6u8);
+    q14u16 = vaddw_u8(q14u16, d17u8);
+    *d2ru8 = vqrshrn_n_u16(q14u16, 3);
 
-        q14u16 = vsubw_u8(q14u16, d3u8);
-        q14u16 = vsubw_u8(q14u16, d4u8);
-        q14u16 = vaddw_u8(q14u16, d5u8);
-        q14u16 = vaddw_u8(q14u16, d16u8);
-        *d1ru8 = vqrshrn_n_u16(q14u16, 3);
+    q14u16 = vsubw_u8(q14u16, d3u8);
+    q14u16 = vsubw_u8(q14u16, d6u8);
+    q14u16 = vaddw_u8(q14u16, d7u8);
+    q14u16 = vaddw_u8(q14u16, d18u8);
+    *d3ru8 = vqrshrn_n_u16(q14u16, 3);
 
-        q14u16 = vsubw_u8(q14u16, d3u8);
-        q14u16 = vsubw_u8(q14u16, d5u8);
-        q14u16 = vaddw_u8(q14u16, d6u8);
-        q14u16 = vaddw_u8(q14u16, d17u8);
-        *d2ru8 = vqrshrn_n_u16(q14u16, 3);
+    q14u16 = vsubw_u8(q14u16, d4u8);
+    q14u16 = vsubw_u8(q14u16, d7u8);
+    q14u16 = vaddw_u8(q14u16, d16u8);
+    q14u16 = vaddw_u8(q14u16, d18u8);
+    *d4ru8 = vqrshrn_n_u16(q14u16, 3);
 
-        q14u16 = vsubw_u8(q14u16, d3u8);
-        q14u16 = vsubw_u8(q14u16, d6u8);
-        q14u16 = vaddw_u8(q14u16, d7u8);
-        q14u16 = vaddw_u8(q14u16, d18u8);
-        *d3ru8 = vqrshrn_n_u16(q14u16, 3);
+    q14u16 = vsubw_u8(q14u16, d5u8);
+    q14u16 = vsubw_u8(q14u16, d16u8);
+    q14u16 = vaddw_u8(q14u16, d17u8);
+    q14u16 = vaddw_u8(q14u16, d18u8);
+    *d5ru8 = vqrshrn_n_u16(q14u16, 3);
+  } else {
+    d21u8 = veor_u8(d7u8, d22u8);
+    d24u8 = veor_u8(d6u8, d22u8);
+    d25u8 = veor_u8(d5u8, d22u8);
+    d26u8 = veor_u8(d16u8, d22u8);
 
-        q14u16 = vsubw_u8(q14u16, d4u8);
-        q14u16 = vsubw_u8(q14u16, d7u8);
-        q14u16 = vaddw_u8(q14u16, d16u8);
-        q14u16 = vaddw_u8(q14u16, d18u8);
-        *d4ru8 = vqrshrn_n_u16(q14u16, 3);
+    d27u8 = vdup_n_u8(3);
 
-        q14u16 = vsubw_u8(q14u16, d5u8);
-        q14u16 = vsubw_u8(q14u16, d16u8);
-        q14u16 = vaddw_u8(q14u16, d17u8);
-        q14u16 = vaddw_u8(q14u16, d18u8);
-        *d5ru8 = vqrshrn_n_u16(q14u16, 3);
-    } else {
-        d21u8 = veor_u8(d7u8,  d22u8);
-        d24u8 = veor_u8(d6u8,  d22u8);
-        d25u8 = veor_u8(d5u8,  d22u8);
-        d26u8 = veor_u8(d16u8, d22u8);
+    d28s8 = vsub_s8(vreinterpret_s8_u8(d21u8), vreinterpret_s8_u8(d24u8));
+    d29s8 = vqsub_s8(vreinterpret_s8_u8(d25u8), vreinterpret_s8_u8(d26u8));
 
-        d27u8 = vdup_n_u8(3);
+    q15s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d27u8));
 
-        d28s8 = vsub_s8(vreinterpret_s8_u8(d21u8), vreinterpret_s8_u8(d24u8));
-        d29s8 = vqsub_s8(vreinterpret_s8_u8(d25u8), vreinterpret_s8_u8(d26u8));
+    d29s8 = vand_s8(d29s8, vreinterpret_s8_u8(d23u8));
 
-        q15s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d27u8));
+    q15s16 = vaddw_s8(q15s16, d29s8);
 
-        d29s8 = vand_s8(d29s8, vreinterpret_s8_u8(d23u8));
+    d29u8 = vdup_n_u8(4);
 
-        q15s16 = vaddw_s8(q15s16, d29s8);
+    d28s8 = vqmovn_s16(q15s16);
 
-        d29u8 = vdup_n_u8(4);
+    d28s8 = vand_s8(d28s8, vreinterpret_s8_u8(d19u8));
 
-        d28s8 = vqmovn_s16(q15s16);
+    d30s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d27u8));
+    d29s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d29u8));
+    d30s8 = vshr_n_s8(d30s8, 3);
+    d29s8 = vshr_n_s8(d29s8, 3);
 
-        d28s8 = vand_s8(d28s8, vreinterpret_s8_u8(d19u8));
+    d24s8 = vqadd_s8(vreinterpret_s8_u8(d24u8), d30s8);
+    d21s8 = vqsub_s8(vreinterpret_s8_u8(d21u8), d29s8);
 
-        d30s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d27u8));
-        d29s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d29u8));
-        d30s8 = vshr_n_s8(d30s8, 3);
-        d29s8 = vshr_n_s8(d29s8, 3);
+    d29s8 = vrshr_n_s8(d29s8, 1);
+    d29s8 = vbic_s8(d29s8, vreinterpret_s8_u8(d23u8));
 
-        d24s8 = vqadd_s8(vreinterpret_s8_u8(d24u8), d30s8);
-        d21s8 = vqsub_s8(vreinterpret_s8_u8(d21u8), d29s8);
+    d25s8 = vqadd_s8(vreinterpret_s8_u8(d25u8), d29s8);
+    d26s8 = vqsub_s8(vreinterpret_s8_u8(d26u8), d29s8);
 
-        d29s8 = vrshr_n_s8(d29s8, 1);
-        d29s8 = vbic_s8(d29s8, vreinterpret_s8_u8(d23u8));
+    if (flat == 0) {  // filter_branch_only
+      *d0ru8 = d4u8;
+      *d1ru8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
+      *d2ru8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
+      *d3ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
+      *d4ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
+      *d5ru8 = d17u8;
+      return;
+    }
 
-        d25s8 = vqadd_s8(vreinterpret_s8_u8(d25u8), d29s8);
-        d26s8 = vqsub_s8(vreinterpret_s8_u8(d26u8), d29s8);
+    d21u8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
+    d24u8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
+    d25u8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
+    d26u8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
 
-        if (flat == 0) {  // filter_branch_only
-            *d0ru8 = d4u8;
-            *d1ru8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
-            *d2ru8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
-            *d3ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
-            *d4ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
-            *d5ru8 = d17u8;
-            return;
-        }
+    d23u8 = vdup_n_u8(2);
+    q14u16 = vaddl_u8(d6u8, d7u8);
+    q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
+    q14u16 = vmlal_u8(q14u16, d4u8, d23u8);
 
-        d21u8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
-        d24u8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
-        d25u8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
-        d26u8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
+    d0u8 = vbsl_u8(d20u8, dblimit, d4u8);
 
-        d23u8 = vdup_n_u8(2);
-        q14u16 = vaddl_u8(d6u8, d7u8);
-        q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
-        q14u16 = vmlal_u8(q14u16, d4u8, d23u8);
+    q14u16 = vaddw_u8(q14u16, d5u8);
 
-        d0u8 = vbsl_u8(d20u8, dblimit, d4u8);
+    d1u8 = vbsl_u8(d20u8, dlimit, d25u8);
 
-        q14u16 = vaddw_u8(q14u16, d5u8);
+    d30u8 = vqrshrn_n_u16(q14u16, 3);
 
-        d1u8 = vbsl_u8(d20u8, dlimit, d25u8);
+    q14u16 = vsubw_u8(q14u16, d3u8);
+    q14u16 = vsubw_u8(q14u16, d4u8);
+    q14u16 = vaddw_u8(q14u16, d5u8);
+    q14u16 = vaddw_u8(q14u16, d16u8);
 
-        d30u8 = vqrshrn_n_u16(q14u16, 3);
+    d2u8 = vbsl_u8(d20u8, dthresh, d24u8);
 
-        q14u16 = vsubw_u8(q14u16, d3u8);
-        q14u16 = vsubw_u8(q14u16, d4u8);
-        q14u16 = vaddw_u8(q14u16, d5u8);
-        q14u16 = vaddw_u8(q14u16, d16u8);
+    d31u8 = vqrshrn_n_u16(q14u16, 3);
 
-        d2u8 = vbsl_u8(d20u8, dthresh, d24u8);
+    q14u16 = vsubw_u8(q14u16, d3u8);
+    q14u16 = vsubw_u8(q14u16, d5u8);
+    q14u16 = vaddw_u8(q14u16, d6u8);
+    q14u16 = vaddw_u8(q14u16, d17u8);
 
-        d31u8 = vqrshrn_n_u16(q14u16, 3);
+    *d0ru8 = vbsl_u8(d20u8, d30u8, d0u8);
 
-        q14u16 = vsubw_u8(q14u16, d3u8);
-        q14u16 = vsubw_u8(q14u16, d5u8);
-        q14u16 = vaddw_u8(q14u16, d6u8);
-        q14u16 = vaddw_u8(q14u16, d17u8);
+    d23u8 = vqrshrn_n_u16(q14u16, 3);
 
-        *d0ru8 = vbsl_u8(d20u8, d30u8, d0u8);
+    q14u16 = vsubw_u8(q14u16, d3u8);
+    q14u16 = vsubw_u8(q14u16, d6u8);
+    q14u16 = vaddw_u8(q14u16, d7u8);
 
-        d23u8 = vqrshrn_n_u16(q14u16, 3);
+    *d1ru8 = vbsl_u8(d20u8, d31u8, d1u8);
 
-        q14u16 = vsubw_u8(q14u16, d3u8);
-        q14u16 = vsubw_u8(q14u16, d6u8);
-        q14u16 = vaddw_u8(q14u16, d7u8);
+    q14u16 = vaddw_u8(q14u16, d18u8);
 
-        *d1ru8 = vbsl_u8(d20u8, d31u8, d1u8);
+    *d2ru8 = vbsl_u8(d20u8, d23u8, d2u8);
 
-        q14u16 = vaddw_u8(q14u16, d18u8);
+    d22u8 = vqrshrn_n_u16(q14u16, 3);
 
-        *d2ru8 = vbsl_u8(d20u8, d23u8, d2u8);
+    q14u16 = vsubw_u8(q14u16, d4u8);
+    q14u16 = vsubw_u8(q14u16, d7u8);
+    q14u16 = vaddw_u8(q14u16, d16u8);
 
-        d22u8 = vqrshrn_n_u16(q14u16, 3);
+    d3u8 = vbsl_u8(d20u8, d3u8, d21u8);
 
-        q14u16 = vsubw_u8(q14u16, d4u8);
-        q14u16 = vsubw_u8(q14u16, d7u8);
-        q14u16 = vaddw_u8(q14u16, d16u8);
+    q14u16 = vaddw_u8(q14u16, d18u8);
 
-        d3u8 = vbsl_u8(d20u8, d3u8, d21u8);
+    d4u8 = vbsl_u8(d20u8, d4u8, d26u8);
 
-        q14u16 = vaddw_u8(q14u16, d18u8);
+    d6u8 = vqrshrn_n_u16(q14u16, 3);
 
-        d4u8 = vbsl_u8(d20u8, d4u8, d26u8);
+    q14u16 = vsubw_u8(q14u16, d5u8);
+    q14u16 = vsubw_u8(q14u16, d16u8);
+    q14u16 = vaddw_u8(q14u16, d17u8);
+    q14u16 = vaddw_u8(q14u16, d18u8);
 
-        d6u8 = vqrshrn_n_u16(q14u16, 3);
+    d5u8 = vbsl_u8(d20u8, d5u8, d17u8);
 
-        q14u16 = vsubw_u8(q14u16, d5u8);
-        q14u16 = vsubw_u8(q14u16, d16u8);
-        q14u16 = vaddw_u8(q14u16, d17u8);
-        q14u16 = vaddw_u8(q14u16, d18u8);
+    d7u8 = vqrshrn_n_u16(q14u16, 3);
 
-        d5u8 = vbsl_u8(d20u8, d5u8, d17u8);
+    *d3ru8 = vbsl_u8(d20u8, d22u8, d3u8);
+    *d4ru8 = vbsl_u8(d20u8, d6u8, d4u8);
+    *d5ru8 = vbsl_u8(d20u8, d7u8, d5u8);
+  }
+  return;
+}
 
-        d7u8 = vqrshrn_n_u16(q14u16, 3);
+void vpx_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+                               const uint8_t *limit, const uint8_t *thresh,
+                               int count) {
+  int i;
+  uint8_t *s, *psrc;
+  uint8x8_t dblimit, dlimit, dthresh;
+  uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
+  uint8x8_t d16u8, d17u8, d18u8;
 
-        *d3ru8 = vbsl_u8(d20u8, d22u8, d3u8);
-        *d4ru8 = vbsl_u8(d20u8, d6u8, d4u8);
-        *d5ru8 = vbsl_u8(d20u8, d7u8, d5u8);
-    }
+  if (count == 0)  // end_vpx_mblf_h_edge
     return;
-}
 
-void vpx_lpf_horizontal_8_neon(
-        uint8_t *src,
-        int pitch,
-        const uint8_t *blimit,
-        const uint8_t *limit,
-        const uint8_t *thresh,
-        int count) {
-    int i;
-    uint8_t *s, *psrc;
-    uint8x8_t dblimit, dlimit, dthresh;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
-    uint8x8_t d16u8, d17u8, d18u8;
-
-    if (count == 0)  // end_vpx_mblf_h_edge
-        return;
-
-    dblimit = vld1_u8(blimit);
-    dlimit = vld1_u8(limit);
-    dthresh = vld1_u8(thresh);
-
-    psrc = src - (pitch << 2);
-    for (i = 0; i < count; i++) {
-        s = psrc + i * 8;
-
-        d3u8  = vld1_u8(s);
-        s += pitch;
-        d4u8  = vld1_u8(s);
-        s += pitch;
-        d5u8  = vld1_u8(s);
-        s += pitch;
-        d6u8  = vld1_u8(s);
-        s += pitch;
-        d7u8  = vld1_u8(s);
-        s += pitch;
-        d16u8 = vld1_u8(s);
-        s += pitch;
-        d17u8 = vld1_u8(s);
-        s += pitch;
-        d18u8 = vld1_u8(s);
-
-        mbloop_filter_neon(dblimit, dlimit, dthresh,
-                           d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
-                           &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8);
-
-        s -= (pitch * 6);
-        vst1_u8(s, d0u8);
-        s += pitch;
-        vst1_u8(s, d1u8);
-        s += pitch;
-        vst1_u8(s, d2u8);
-        s += pitch;
-        vst1_u8(s, d3u8);
-        s += pitch;
-        vst1_u8(s, d4u8);
-        s += pitch;
-        vst1_u8(s, d5u8);
-    }
-    return;
+  dblimit = vld1_u8(blimit);
+  dlimit = vld1_u8(limit);
+  dthresh = vld1_u8(thresh);
+
+  psrc = src - (pitch << 2);
+  for (i = 0; i < count; i++) {
+    s = psrc + i * 8;
+
+    d3u8 = vld1_u8(s);
+    s += pitch;
+    d4u8 = vld1_u8(s);
+    s += pitch;
+    d5u8 = vld1_u8(s);
+    s += pitch;
+    d6u8 = vld1_u8(s);
+    s += pitch;
+    d7u8 = vld1_u8(s);
+    s += pitch;
+    d16u8 = vld1_u8(s);
+    s += pitch;
+    d17u8 = vld1_u8(s);
+    s += pitch;
+    d18u8 = vld1_u8(s);
+
+    mbloop_filter_neon(dblimit, dlimit, dthresh, d3u8, d4u8, d5u8, d6u8, d7u8,
+                       d16u8, d17u8, d18u8, &d0u8, &d1u8, &d2u8, &d3u8, &d4u8,
+                       &d5u8);
+
+    s -= (pitch * 6);
+    vst1_u8(s, d0u8);
+    s += pitch;
+    vst1_u8(s, d1u8);
+    s += pitch;
+    vst1_u8(s, d2u8);
+    s += pitch;
+    vst1_u8(s, d3u8);
+    s += pitch;
+    vst1_u8(s, d4u8);
+    s += pitch;
+    vst1_u8(s, d5u8);
+  }
+  return;
 }
 
-void vpx_lpf_vertical_8_neon(
-        uint8_t *src,
-        int pitch,
-        const uint8_t *blimit,
-        const uint8_t *limit,
-        const uint8_t *thresh,
-        int count) {
-    int i;
-    uint8_t *s;
-    uint8x8_t dblimit, dlimit, dthresh;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
-    uint8x8_t d16u8, d17u8, d18u8;
-    uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
-    uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
-    uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
-    uint8x8x4_t d4Result;
-    uint8x8x2_t d2Result;
-
-    if (count == 0)
-        return;
-
-    dblimit = vld1_u8(blimit);
-    dlimit = vld1_u8(limit);
-    dthresh = vld1_u8(thresh);
-
-    for (i = 0; i < count; i++) {
-        s = src + (i * (pitch << 3)) - 4;
-
-        d3u8 = vld1_u8(s);
-        s += pitch;
-        d4u8 = vld1_u8(s);
-        s += pitch;
-        d5u8 = vld1_u8(s);
-        s += pitch;
-        d6u8 = vld1_u8(s);
-        s += pitch;
-        d7u8 = vld1_u8(s);
-        s += pitch;
-        d16u8 = vld1_u8(s);
-        s += pitch;
-        d17u8 = vld1_u8(s);
-        s += pitch;
-        d18u8 = vld1_u8(s);
-
-        d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8),
-                          vreinterpret_u32_u8(d7u8));
-        d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8),
-                          vreinterpret_u32_u8(d16u8));
-        d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8),
-                          vreinterpret_u32_u8(d17u8));
-        d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8),
-                          vreinterpret_u32_u8(d18u8));
-
-        d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
-                          vreinterpret_u16_u32(d2tmp2.val[0]));
-        d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
-                          vreinterpret_u16_u32(d2tmp3.val[0]));
-        d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
-                          vreinterpret_u16_u32(d2tmp2.val[1]));
-        d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
-                          vreinterpret_u16_u32(d2tmp3.val[1]));
-
-        d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
-                         vreinterpret_u8_u16(d2tmp5.val[0]));
-        d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
-                         vreinterpret_u8_u16(d2tmp5.val[1]));
-        d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
-                          vreinterpret_u8_u16(d2tmp7.val[0]));
-        d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
-                          vreinterpret_u8_u16(d2tmp7.val[1]));
-
-        d3u8 = d2tmp8.val[0];
-        d4u8 = d2tmp8.val[1];
-        d5u8 = d2tmp9.val[0];
-        d6u8 = d2tmp9.val[1];
-        d7u8 = d2tmp10.val[0];
-        d16u8 = d2tmp10.val[1];
-        d17u8 = d2tmp11.val[0];
-        d18u8 = d2tmp11.val[1];
-
-        mbloop_filter_neon(dblimit, dlimit, dthresh,
-                           d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
-                           &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8);
-
-        d4Result.val[0] = d0u8;
-        d4Result.val[1] = d1u8;
-        d4Result.val[2] = d2u8;
-        d4Result.val[3] = d3u8;
-
-        d2Result.val[0] = d4u8;
-        d2Result.val[1] = d5u8;
-
-        s = src - 3;
-        vst4_lane_u8(s, d4Result, 0);
-        s += pitch;
-        vst4_lane_u8(s, d4Result, 1);
-        s += pitch;
-        vst4_lane_u8(s, d4Result, 2);
-        s += pitch;
-        vst4_lane_u8(s, d4Result, 3);
-        s += pitch;
-        vst4_lane_u8(s, d4Result, 4);
-        s += pitch;
-        vst4_lane_u8(s, d4Result, 5);
-        s += pitch;
-        vst4_lane_u8(s, d4Result, 6);
-        s += pitch;
-        vst4_lane_u8(s, d4Result, 7);
-
-        s = src + 1;
-        vst2_lane_u8(s, d2Result, 0);
-        s += pitch;
-        vst2_lane_u8(s, d2Result, 1);
-        s += pitch;
-        vst2_lane_u8(s, d2Result, 2);
-        s += pitch;
-        vst2_lane_u8(s, d2Result, 3);
-        s += pitch;
-        vst2_lane_u8(s, d2Result, 4);
-        s += pitch;
-        vst2_lane_u8(s, d2Result, 5);
-        s += pitch;
-        vst2_lane_u8(s, d2Result, 6);
-        s += pitch;
-        vst2_lane_u8(s, d2Result, 7);
-    }
-    return;
+void vpx_lpf_vertical_8_neon(uint8_t *src, int pitch, const uint8_t *blimit,
+                             const uint8_t *limit, const uint8_t *thresh,
+                             int count) {
+  int i;
+  uint8_t *s;
+  uint8x8_t dblimit, dlimit, dthresh;
+  uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
+  uint8x8_t d16u8, d17u8, d18u8;
+  uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
+  uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
+  uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
+  uint8x8x4_t d4Result;
+  uint8x8x2_t d2Result;
+
+  if (count == 0) return;
+
+  dblimit = vld1_u8(blimit);
+  dlimit = vld1_u8(limit);
+  dthresh = vld1_u8(thresh);
+
+  for (i = 0; i < count; i++) {
+    s = src + (i * (pitch << 3)) - 4;
+
+    d3u8 = vld1_u8(s);
+    s += pitch;
+    d4u8 = vld1_u8(s);
+    s += pitch;
+    d5u8 = vld1_u8(s);
+    s += pitch;
+    d6u8 = vld1_u8(s);
+    s += pitch;
+    d7u8 = vld1_u8(s);
+    s += pitch;
+    d16u8 = vld1_u8(s);
+    s += pitch;
+    d17u8 = vld1_u8(s);
+    s += pitch;
+    d18u8 = vld1_u8(s);
+
+    d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8), vreinterpret_u32_u8(d7u8));
+    d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8), vreinterpret_u32_u8(d16u8));
+    d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8), vreinterpret_u32_u8(d17u8));
+    d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8), vreinterpret_u32_u8(d18u8));
+
+    d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
+                      vreinterpret_u16_u32(d2tmp2.val[0]));
+    d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
+                      vreinterpret_u16_u32(d2tmp3.val[0]));
+    d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
+                      vreinterpret_u16_u32(d2tmp2.val[1]));
+    d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
+                      vreinterpret_u16_u32(d2tmp3.val[1]));
+
+    d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
+                     vreinterpret_u8_u16(d2tmp5.val[0]));
+    d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
+                     vreinterpret_u8_u16(d2tmp5.val[1]));
+    d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
+                      vreinterpret_u8_u16(d2tmp7.val[0]));
+    d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
+                      vreinterpret_u8_u16(d2tmp7.val[1]));
+
+    d3u8 = d2tmp8.val[0];
+    d4u8 = d2tmp8.val[1];
+    d5u8 = d2tmp9.val[0];
+    d6u8 = d2tmp9.val[1];
+    d7u8 = d2tmp10.val[0];
+    d16u8 = d2tmp10.val[1];
+    d17u8 = d2tmp11.val[0];
+    d18u8 = d2tmp11.val[1];
+
+    mbloop_filter_neon(dblimit, dlimit, dthresh, d3u8, d4u8, d5u8, d6u8, d7u8,
+                       d16u8, d17u8, d18u8, &d0u8, &d1u8, &d2u8, &d3u8, &d4u8,
+                       &d5u8);
+
+    d4Result.val[0] = d0u8;
+    d4Result.val[1] = d1u8;
+    d4Result.val[2] = d2u8;
+    d4Result.val[3] = d3u8;
+
+    d2Result.val[0] = d4u8;
+    d2Result.val[1] = d5u8;
+
+    s = src - 3;
+    vst4_lane_u8(s, d4Result, 0);
+    s += pitch;
+    vst4_lane_u8(s, d4Result, 1);
+    s += pitch;
+    vst4_lane_u8(s, d4Result, 2);
+    s += pitch;
+    vst4_lane_u8(s, d4Result, 3);
+    s += pitch;
+    vst4_lane_u8(s, d4Result, 4);
+    s += pitch;
+    vst4_lane_u8(s, d4Result, 5);
+    s += pitch;
+    vst4_lane_u8(s, d4Result, 6);
+    s += pitch;
+    vst4_lane_u8(s, d4Result, 7);
+
+    s = src + 1;
+    vst2_lane_u8(s, d2Result, 0);
+    s += pitch;
+    vst2_lane_u8(s, d2Result, 1);
+    s += pitch;
+    vst2_lane_u8(s, d2Result, 2);
+    s += pitch;
+    vst2_lane_u8(s, d2Result, 3);
+    s += pitch;
+    vst2_lane_u8(s, d2Result, 4);
+    s += pitch;
+    vst2_lane_u8(s, d2Result, 5);
+    s += pitch;
+    vst2_lane_u8(s, d2Result, 6);
+    s += pitch;
+    vst2_lane_u8(s, d2Result, 7);
+  }
+  return;
 }
diff --git a/vpx_dsp/arm/loopfilter_neon.c b/vpx_dsp/arm/loopfilter_neon.c
index eff87d29bd3d895f1da1f9ef57b70723abbdfc51..6abc7aec0aadf0c15c396d811850851a0c18f7f4 100644
--- a/vpx_dsp/arm/loopfilter_neon.c
+++ b/vpx_dsp/arm/loopfilter_neon.c
@@ -14,42 +14,32 @@
 #include "./vpx_config.h"
 #include "vpx/vpx_integer.h"
 
-void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int p,
-                                  const uint8_t *blimit0,
-                                  const uint8_t *limit0,
-                                  const uint8_t *thresh0,
-                                  const uint8_t *blimit1,
-                                  const uint8_t *limit1,
+void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
+                                  const uint8_t *limit0, const uint8_t *thresh0,
+                                  const uint8_t *blimit1, const uint8_t *limit1,
                                   const uint8_t *thresh1) {
   vpx_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0, 1);
   vpx_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1);
 }
 
 #if HAVE_NEON_ASM
-void vpx_lpf_horizontal_8_dual_neon(uint8_t *s, int p /* pitch */,
-                                    const uint8_t *blimit0,
-                                    const uint8_t *limit0,
-                                    const uint8_t *thresh0,
-                                    const uint8_t *blimit1,
-                                    const uint8_t *limit1,
-                                    const uint8_t *thresh1) {
+void vpx_lpf_horizontal_8_dual_neon(
+    uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
+    const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
+    const uint8_t *limit1, const uint8_t *thresh1) {
   vpx_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0, 1);
   vpx_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1, 1);
 }
 
-void vpx_lpf_vertical_8_dual_neon(uint8_t *s, int p,
-                                  const uint8_t *blimit0,
-                                  const uint8_t *limit0,
-                                  const uint8_t *thresh0,
-                                  const uint8_t *blimit1,
-                                  const uint8_t *limit1,
+void vpx_lpf_vertical_8_dual_neon(uint8_t *s, int p, const uint8_t *blimit0,
+                                  const uint8_t *limit0, const uint8_t *thresh0,
+                                  const uint8_t *blimit1, const uint8_t *limit1,
                                   const uint8_t *thresh1) {
   vpx_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0, 1);
   vpx_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1);
 }
 
-void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int p,
-                                   const uint8_t *blimit,
+void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit,
                                    const uint8_t *limit,
                                    const uint8_t *thresh) {
   vpx_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
diff --git a/vpx_dsp/arm/sad4d_neon.c b/vpx_dsp/arm/sad4d_neon.c
index c7704dc1be67266995b6e117adba5248f53307b3..dc20398000aaa4daffb16ef65a9c0c7863dd7c78 100644
--- a/vpx_dsp/arm/sad4d_neon.c
+++ b/vpx_dsp/arm/sad4d_neon.c
@@ -16,10 +16,10 @@
 
 static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo,
                                                     const uint16x8_t vec_hi) {
-  const uint32x4_t vec_l_lo = vaddl_u16(vget_low_u16(vec_lo),
-                                        vget_high_u16(vec_lo));
-  const uint32x4_t vec_l_hi = vaddl_u16(vget_low_u16(vec_hi),
-                                        vget_high_u16(vec_hi));
+  const uint32x4_t vec_l_lo =
+      vaddl_u16(vget_low_u16(vec_lo), vget_high_u16(vec_lo));
+  const uint32x4_t vec_l_hi =
+      vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi));
   const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi);
   const uint64x2_t b = vpaddlq_u32(a);
   const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
@@ -33,8 +33,7 @@ static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo,
 static void sad_neon_64(const uint8x16_t vec_src_00,
                         const uint8x16_t vec_src_16,
                         const uint8x16_t vec_src_32,
-                        const uint8x16_t vec_src_48,
-                        const uint8_t *ref,
+                        const uint8x16_t vec_src_48, const uint8_t *ref,
                         uint16x8_t *vec_sum_ref_lo,
                         uint16x8_t *vec_sum_ref_hi) {
   const uint8x16_t vec_ref_00 = vld1q_u8(ref);
@@ -63,8 +62,7 @@ static void sad_neon_64(const uint8x16_t vec_src_00,
 // Calculate the absolute difference of 32 bytes from vec_src_00, vec_src_16,
 // and ref. Accumulate partial sums in vec_sum_ref_lo and vec_sum_ref_hi.
 static void sad_neon_32(const uint8x16_t vec_src_00,
-                        const uint8x16_t vec_src_16,
-                        const uint8_t *ref,
+                        const uint8x16_t vec_src_16, const uint8_t *ref,
                         uint16x8_t *vec_sum_ref_lo,
                         uint16x8_t *vec_sum_ref_hi) {
   const uint8x16_t vec_ref_00 = vld1q_u8(ref);
@@ -81,7 +79,7 @@ static void sad_neon_32(const uint8x16_t vec_src_00,
 }
 
 void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride,
-                          const uint8_t* const ref[4], int ref_stride,
+                          const uint8_t *const ref[4], int ref_stride,
                           uint32_t *res) {
   int i;
   uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
@@ -127,7 +125,7 @@ void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride,
 }
 
 void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride,
-                          const uint8_t* const ref[4], int ref_stride,
+                          const uint8_t *const ref[4], int ref_stride,
                           uint32_t *res) {
   int i;
   uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
@@ -148,14 +146,14 @@ void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride,
     const uint8x16_t vec_src_00 = vld1q_u8(src);
     const uint8x16_t vec_src_16 = vld1q_u8(src + 16);
 
-    sad_neon_32(vec_src_00, vec_src_16, ref0,
-                &vec_sum_ref0_lo, &vec_sum_ref0_hi);
-    sad_neon_32(vec_src_00, vec_src_16, ref1,
-                &vec_sum_ref1_lo, &vec_sum_ref1_hi);
-    sad_neon_32(vec_src_00, vec_src_16, ref2,
-                &vec_sum_ref2_lo, &vec_sum_ref2_hi);
-    sad_neon_32(vec_src_00, vec_src_16, ref3,
-                &vec_sum_ref3_lo, &vec_sum_ref3_hi);
+    sad_neon_32(vec_src_00, vec_src_16, ref0, &vec_sum_ref0_lo,
+                &vec_sum_ref0_hi);
+    sad_neon_32(vec_src_00, vec_src_16, ref1, &vec_sum_ref1_lo,
+                &vec_sum_ref1_hi);
+    sad_neon_32(vec_src_00, vec_src_16, ref2, &vec_sum_ref2_lo,
+                &vec_sum_ref2_hi);
+    sad_neon_32(vec_src_00, vec_src_16, ref3, &vec_sum_ref3_lo,
+                &vec_sum_ref3_hi);
 
     src += src_stride;
     ref0 += ref_stride;
@@ -171,7 +169,7 @@ void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride,
 }
 
 void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride,
-                          const uint8_t* const ref[4], int ref_stride,
+                          const uint8_t *const ref[4], int ref_stride,
                           uint32_t *res) {
   int i;
   uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
@@ -195,20 +193,20 @@ void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride,
     const uint8x16_t vec_ref2 = vld1q_u8(ref2);
     const uint8x16_t vec_ref3 = vld1q_u8(ref3);
 
-    vec_sum_ref0_lo = vabal_u8(vec_sum_ref0_lo, vget_low_u8(vec_src),
-                               vget_low_u8(vec_ref0));
+    vec_sum_ref0_lo =
+        vabal_u8(vec_sum_ref0_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref0));
     vec_sum_ref0_hi = vabal_u8(vec_sum_ref0_hi, vget_high_u8(vec_src),
                                vget_high_u8(vec_ref0));
-    vec_sum_ref1_lo = vabal_u8(vec_sum_ref1_lo, vget_low_u8(vec_src),
-                               vget_low_u8(vec_ref1));
+    vec_sum_ref1_lo =
+        vabal_u8(vec_sum_ref1_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref1));
     vec_sum_ref1_hi = vabal_u8(vec_sum_ref1_hi, vget_high_u8(vec_src),
                                vget_high_u8(vec_ref1));
-    vec_sum_ref2_lo = vabal_u8(vec_sum_ref2_lo, vget_low_u8(vec_src),
-                               vget_low_u8(vec_ref2));
+    vec_sum_ref2_lo =
+        vabal_u8(vec_sum_ref2_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref2));
     vec_sum_ref2_hi = vabal_u8(vec_sum_ref2_hi, vget_high_u8(vec_src),
                                vget_high_u8(vec_ref2));
-    vec_sum_ref3_lo = vabal_u8(vec_sum_ref3_lo, vget_low_u8(vec_src),
-                               vget_low_u8(vec_ref3));
+    vec_sum_ref3_lo =
+        vabal_u8(vec_sum_ref3_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref3));
     vec_sum_ref3_hi = vabal_u8(vec_sum_ref3_hi, vget_high_u8(vec_src),
                                vget_high_u8(vec_ref3));
 
diff --git a/vpx_dsp/arm/sad_neon.c b/vpx_dsp/arm/sad_neon.c
index 173f08ac3c3e202764a8dd01a43a9b8877d08289..ff3228768cefc8455f65267982b881c9c4ebf7d2 100644
--- a/vpx_dsp/arm/sad_neon.c
+++ b/vpx_dsp/arm/sad_neon.c
@@ -14,114 +14,105 @@
 
 #include "vpx/vpx_integer.h"
 
-unsigned int vpx_sad8x16_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
+unsigned int vpx_sad8x16_neon(unsigned char *src_ptr, int src_stride,
+                              unsigned char *ref_ptr, int ref_stride) {
+  uint8x8_t d0, d8;
+  uint16x8_t q12;
+  uint32x4_t q1;
+  uint64x2_t q3;
+  uint32x2_t d5;
+  int i;
+
+  d0 = vld1_u8(src_ptr);
+  src_ptr += src_stride;
+  d8 = vld1_u8(ref_ptr);
+  ref_ptr += ref_stride;
+  q12 = vabdl_u8(d0, d8);
 
+  for (i = 0; i < 15; i++) {
     d0 = vld1_u8(src_ptr);
     src_ptr += src_stride;
     d8 = vld1_u8(ref_ptr);
     ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 15; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
+    q12 = vabal_u8(q12, d0, d8);
+  }
+
+  q1 = vpaddlq_u16(q12);
+  q3 = vpaddlq_u32(q1);
+  d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
+                vreinterpret_u32_u64(vget_high_u64(q3)));
+
+  return vget_lane_u32(d5, 0);
 }
 
-unsigned int vpx_sad4x4_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x2_t d1;
-    uint64x1_t d3;
-    int i;
+unsigned int vpx_sad4x4_neon(unsigned char *src_ptr, int src_stride,
+                             unsigned char *ref_ptr, int ref_stride) {
+  uint8x8_t d0, d8;
+  uint16x8_t q12;
+  uint32x2_t d1;
+  uint64x1_t d3;
+  int i;
+
+  d0 = vld1_u8(src_ptr);
+  src_ptr += src_stride;
+  d8 = vld1_u8(ref_ptr);
+  ref_ptr += ref_stride;
+  q12 = vabdl_u8(d0, d8);
 
+  for (i = 0; i < 3; i++) {
     d0 = vld1_u8(src_ptr);
     src_ptr += src_stride;
     d8 = vld1_u8(ref_ptr);
     ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 3; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
+    q12 = vabal_u8(q12, d0, d8);
+  }
 
-    d1 = vpaddl_u16(vget_low_u16(q12));
-    d3 = vpaddl_u32(d1);
+  d1 = vpaddl_u16(vget_low_u16(q12));
+  d3 = vpaddl_u32(d1);
 
-    return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
+  return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
 }
 
-unsigned int vpx_sad16x8_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x16_t q0, q4;
-    uint16x8_t q12, q13;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
+unsigned int vpx_sad16x8_neon(unsigned char *src_ptr, int src_stride,
+                              unsigned char *ref_ptr, int ref_stride) {
+  uint8x16_t q0, q4;
+  uint16x8_t q12, q13;
+  uint32x4_t q1;
+  uint64x2_t q3;
+  uint32x2_t d5;
+  int i;
+
+  q0 = vld1q_u8(src_ptr);
+  src_ptr += src_stride;
+  q4 = vld1q_u8(ref_ptr);
+  ref_ptr += ref_stride;
+  q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4));
+  q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4));
 
+  for (i = 0; i < 7; i++) {
     q0 = vld1q_u8(src_ptr);
     src_ptr += src_stride;
     q4 = vld1q_u8(ref_ptr);
     ref_ptr += ref_stride;
-    q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4));
-    q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4));
-
-    for (i = 0; i < 7; i++) {
-        q0 = vld1q_u8(src_ptr);
-        src_ptr += src_stride;
-        q4 = vld1q_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4));
-        q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4));
-    }
-
-    q12 = vaddq_u16(q12, q13);
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
+    q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4));
+    q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4));
+  }
+
+  q12 = vaddq_u16(q12, q13);
+  q1 = vpaddlq_u16(q12);
+  q3 = vpaddlq_u32(q1);
+  d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
+                vreinterpret_u32_u64(vget_high_u64(q3)));
+
+  return vget_lane_u32(d5, 0);
 }
 
 static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo,
                                                     const uint16x8_t vec_hi) {
-  const uint32x4_t vec_l_lo = vaddl_u16(vget_low_u16(vec_lo),
-                                        vget_high_u16(vec_lo));
-  const uint32x4_t vec_l_hi = vaddl_u16(vget_low_u16(vec_hi),
-                                        vget_high_u16(vec_hi));
+  const uint32x4_t vec_l_lo =
+      vaddl_u16(vget_low_u16(vec_lo), vget_high_u16(vec_lo));
+  const uint32x4_t vec_l_hi =
+      vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi));
   const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi);
   const uint64x2_t b = vpaddlq_u32(a);
   const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
@@ -208,10 +199,10 @@ unsigned int vpx_sad16x16_neon(const uint8_t *src, int src_stride,
     const uint8x16_t vec_ref = vld1q_u8(ref);
     src += src_stride;
     ref += ref_stride;
-    vec_accum_lo = vabal_u8(vec_accum_lo, vget_low_u8(vec_src),
-                            vget_low_u8(vec_ref));
-    vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src),
-                            vget_high_u8(vec_ref));
+    vec_accum_lo =
+        vabal_u8(vec_accum_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref));
+    vec_accum_hi =
+        vabal_u8(vec_accum_hi, vget_high_u8(vec_src), vget_high_u8(vec_ref));
   }
   return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi));
 }
diff --git a/vpx_dsp/arm/subpel_variance_media.c b/vpx_dsp/arm/subpel_variance_media.c
index e7d8c85fb510028d6fde9330fd589d0841a95e48..ab53361579df363db0dfd5464aee8d860b06dc07 100644
--- a/vpx_dsp/arm/subpel_variance_media.c
+++ b/vpx_dsp/arm/subpel_variance_media.c
@@ -14,91 +14,66 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_MEDIA
-static const int16_t bilinear_filters_media[8][2] = {
-  { 128,   0 },
-  { 112,  16 },
-  {  96,  32 },
-  {  80,  48 },
-  {  64,  64 },
-  {  48,  80 },
-  {  32,  96 },
-  {  16, 112 }
-};
+static const int16_t bilinear_filters_media[8][2] = { { 128, 0 }, { 112, 16 },
+                                                      { 96, 32 }, { 80, 48 },
+                                                      { 64, 64 }, { 48, 80 },
+                                                      { 32, 96 }, { 16, 112 } };
 
-extern void vpx_filter_block2d_bil_first_pass_media(const uint8_t *src_ptr,
-                                                    uint16_t *dst_ptr,
-                                                    uint32_t src_pitch,
-                                                    uint32_t height,
-                                                    uint32_t width,
-                                                    const int16_t *filter);
+extern void vpx_filter_block2d_bil_first_pass_media(
+    const uint8_t *src_ptr, uint16_t *dst_ptr, uint32_t src_pitch,
+    uint32_t height, uint32_t width, const int16_t *filter);
 
-extern void vpx_filter_block2d_bil_second_pass_media(const uint16_t *src_ptr,
-                                                     uint8_t *dst_ptr,
-                                                     int32_t src_pitch,
-                                                     uint32_t height,
-                                                     uint32_t width,
-                                                     const int16_t *filter);
+extern void vpx_filter_block2d_bil_second_pass_media(
+    const uint16_t *src_ptr, uint8_t *dst_ptr, int32_t src_pitch,
+    uint32_t height, uint32_t width, const int16_t *filter);
 
-
-unsigned int vpx_sub_pixel_variance8x8_media(const uint8_t *src_ptr,
-                                             int src_pixels_per_line,
-                                             int xoffset, int yoffset,
-                                             const uint8_t *dst_ptr,
-                                             int dst_pixels_per_line,
-                                             unsigned int *sse) {
-  uint16_t first_pass[10*8];
-  uint8_t  second_pass[8*8];
+unsigned int vpx_sub_pixel_variance8x8_media(
+    const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset,
+    const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) {
+  uint16_t first_pass[10 * 8];
+  uint8_t second_pass[8 * 8];
   const int16_t *HFilter, *VFilter;
 
   HFilter = bilinear_filters_media[xoffset];
   VFilter = bilinear_filters_media[yoffset];
 
   vpx_filter_block2d_bil_first_pass_media(src_ptr, first_pass,
-                                          src_pixels_per_line,
-                                          9, 8, HFilter);
-  vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass,
-                                           8, 8, 8, VFilter);
+                                          src_pixels_per_line, 9, 8, HFilter);
+  vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, 8, 8, 8,
+                                           VFilter);
 
-  return vpx_variance8x8_media(second_pass, 8, dst_ptr,
-                               dst_pixels_per_line, sse);
+  return vpx_variance8x8_media(second_pass, 8, dst_ptr, dst_pixels_per_line,
+                               sse);
 }
 
-unsigned int vpx_sub_pixel_variance16x16_media(const uint8_t *src_ptr,
-                                               int src_pixels_per_line,
-                                               int xoffset,
-                                               int yoffset,
-                                               const uint8_t *dst_ptr,
-                                               int dst_pixels_per_line,
-                                               unsigned int *sse) {
-  uint16_t first_pass[36*16];
-  uint8_t  second_pass[20*16];
+unsigned int vpx_sub_pixel_variance16x16_media(
+    const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset,
+    const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) {
+  uint16_t first_pass[36 * 16];
+  uint8_t second_pass[20 * 16];
   const int16_t *HFilter, *VFilter;
   unsigned int var;
 
   if (xoffset == 4 && yoffset == 0) {
-    var = vpx_variance_halfpixvar16x16_h_media(src_ptr, src_pixels_per_line,
-                                               dst_ptr, dst_pixels_per_line,
-                                               sse);
+    var = vpx_variance_halfpixvar16x16_h_media(
+        src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
   } else if (xoffset == 0 && yoffset == 4) {
-    var = vpx_variance_halfpixvar16x16_v_media(src_ptr, src_pixels_per_line,
-                                               dst_ptr, dst_pixels_per_line,
-                                               sse);
+    var = vpx_variance_halfpixvar16x16_v_media(
+        src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
   } else if (xoffset == 4 && yoffset == 4) {
-    var = vpx_variance_halfpixvar16x16_hv_media(src_ptr, src_pixels_per_line,
-                                                dst_ptr, dst_pixels_per_line,
-                                                sse);
+    var = vpx_variance_halfpixvar16x16_hv_media(
+        src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
   } else {
     HFilter = bilinear_filters_media[xoffset];
     VFilter = bilinear_filters_media[yoffset];
 
-    vpx_filter_block2d_bil_first_pass_media(src_ptr, first_pass,
-                                            src_pixels_per_line,
-                                            17, 16, HFilter);
-    vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass,
-                                             16, 16, 16, VFilter);
+    vpx_filter_block2d_bil_first_pass_media(
+        src_ptr, first_pass, src_pixels_per_line, 17, 16, HFilter);
+    vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, 16, 16,
+                                             16, VFilter);
 
-    var = vpx_variance16x16_media(second_pass, 16, dst_ptr,
-                                  dst_pixels_per_line, sse);
+    var = vpx_variance16x16_media(second_pass, 16, dst_ptr, dst_pixels_per_line,
+                                  sse);
   }
   return var;
 }
diff --git a/vpx_dsp/arm/subpel_variance_neon.c b/vpx_dsp/arm/subpel_variance_neon.c
index 40e2cc89b35d3e657bb1d025bf548091672138fa..f044e11a1553651086eb88f0266713e62e05de3f 100644
--- a/vpx_dsp/arm/subpel_variance_neon.c
+++ b/vpx_dsp/arm/subpel_variance_neon.c
@@ -18,14 +18,8 @@
 #include "vpx_dsp/variance.h"
 
 static const uint8_t bilinear_filters[8][2] = {
-  { 128,   0, },
-  { 112,  16, },
-  {  96,  32, },
-  {  80,  48, },
-  {  64,  64, },
-  {  48,  80, },
-  {  32,  96, },
-  {  16, 112, },
+  { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 },
+  { 64, 64 }, { 48, 80 },  { 32, 96 }, { 16, 112 },
 };
 
 static void var_filter_block2d_bil_w8(const uint8_t *src_ptr,
@@ -79,74 +73,61 @@ static void var_filter_block2d_bil_w16(const uint8_t *src_ptr,
   }
 }
 
-unsigned int vpx_sub_pixel_variance8x8_neon(const uint8_t *src,
-                                            int src_stride,
-                                            int xoffset,
-                                            int yoffset,
-                                            const uint8_t *dst,
-                                            int dst_stride,
+unsigned int vpx_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride,
+                                            int xoffset, int yoffset,
+                                            const uint8_t *dst, int dst_stride,
                                             unsigned int *sse) {
   DECLARE_ALIGNED(16, uint8_t, temp2[8 * 8]);
   DECLARE_ALIGNED(16, uint8_t, fdata3[9 * 8]);
 
-  var_filter_block2d_bil_w8(src, fdata3, src_stride, 1,
-                            9, 8,
+  var_filter_block2d_bil_w8(src, fdata3, src_stride, 1, 9, 8,
                             bilinear_filters[xoffset]);
-  var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8,
-                            8, bilinear_filters[yoffset]);
+  var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8, 8,
+                            bilinear_filters[yoffset]);
   return vpx_variance8x8_neon(temp2, 8, dst, dst_stride, sse);
 }
 
 unsigned int vpx_sub_pixel_variance16x16_neon(const uint8_t *src,
-                                              int src_stride,
-                                              int xoffset,
-                                              int yoffset,
-                                              const uint8_t *dst,
+                                              int src_stride, int xoffset,
+                                              int yoffset, const uint8_t *dst,
                                               int dst_stride,
                                               unsigned int *sse) {
   DECLARE_ALIGNED(16, uint8_t, temp2[16 * 16]);
   DECLARE_ALIGNED(16, uint8_t, fdata3[17 * 16]);
 
-  var_filter_block2d_bil_w16(src, fdata3, src_stride, 1,
-                             17, 16,
+  var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, 17, 16,
                              bilinear_filters[xoffset]);
-  var_filter_block2d_bil_w16(fdata3, temp2, 16, 16, 16,
-                             16, bilinear_filters[yoffset]);
+  var_filter_block2d_bil_w16(fdata3, temp2, 16, 16, 16, 16,
+                             bilinear_filters[yoffset]);
   return vpx_variance16x16_neon(temp2, 16, dst, dst_stride, sse);
 }
 
 unsigned int vpx_sub_pixel_variance32x32_neon(const uint8_t *src,
-                                              int src_stride,
-                                              int xoffset,
-                                              int yoffset,
-                                              const uint8_t *dst,
+                                              int src_stride, int xoffset,
+                                              int yoffset, const uint8_t *dst,
                                               int dst_stride,
                                               unsigned int *sse) {
   DECLARE_ALIGNED(16, uint8_t, temp2[32 * 32]);
   DECLARE_ALIGNED(16, uint8_t, fdata3[33 * 32]);
 
-  var_filter_block2d_bil_w16(src, fdata3, src_stride, 1,
-                             33, 32,
+  var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, 33, 32,
                              bilinear_filters[xoffset]);
-  var_filter_block2d_bil_w16(fdata3, temp2, 32, 32, 32,
-                             32, bilinear_filters[yoffset]);
+  var_filter_block2d_bil_w16(fdata3, temp2, 32, 32, 32, 32,
+                             bilinear_filters[yoffset]);
   return vpx_variance32x32_neon(temp2, 32, dst, dst_stride, sse);
 }
 
 unsigned int vpx_sub_pixel_variance64x64_neon(const uint8_t *src,
-                                              int src_stride,
-                                              int xoffset,
-                                              int yoffset,
-                                              const uint8_t *dst,
+                                              int src_stride, int xoffset,
+                                              int yoffset, const uint8_t *dst,
                                               int dst_stride,
                                               unsigned int *sse) {
   DECLARE_ALIGNED(16, uint8_t, temp2[64 * 64]);
   DECLARE_ALIGNED(16, uint8_t, fdata3[65 * 64]);
 
-  var_filter_block2d_bil_w16(src, fdata3, src_stride, 1,
-                             65, 64,
+  var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, 65, 64,
                              bilinear_filters[xoffset]);
-  var_filter_block2d_bil_w16(fdata3, temp2, 64, 64, 64,
-                             64, bilinear_filters[yoffset]);
+  var_filter_block2d_bil_w16(fdata3, temp2, 64, 64, 64, 64,
+                             bilinear_filters[yoffset]);
   return vpx_variance64x64_neon(temp2, 64, dst, dst_stride, sse);
 }
diff --git a/vpx_dsp/arm/subtract_neon.c b/vpx_dsp/arm/subtract_neon.c
index 7b146095ea2f5aed80e05a3bd7f52e1d73c62405..ce81fb630f248f2f2053a15befb1691333b6d083 100644
--- a/vpx_dsp/arm/subtract_neon.c
+++ b/vpx_dsp/arm/subtract_neon.c
@@ -13,10 +13,10 @@
 #include "./vpx_config.h"
 #include "vpx/vpx_integer.h"
 
-void vpx_subtract_block_neon(int rows, int cols,
-                             int16_t *diff, ptrdiff_t diff_stride,
-                             const uint8_t *src, ptrdiff_t src_stride,
-                             const uint8_t *pred, ptrdiff_t pred_stride) {
+void vpx_subtract_block_neon(int rows, int cols, int16_t *diff,
+                             ptrdiff_t diff_stride, const uint8_t *src,
+                             ptrdiff_t src_stride, const uint8_t *pred,
+                             ptrdiff_t pred_stride) {
   int r, c;
 
   if (cols > 16) {
@@ -24,38 +24,38 @@ void vpx_subtract_block_neon(int rows, int cols,
       for (c = 0; c < cols; c += 32) {
         const uint8x16_t v_src_00 = vld1q_u8(&src[c + 0]);
         const uint8x16_t v_src_16 = vld1q_u8(&src[c + 16]);
-        const uint8x16_t v_pred_00 = vld1q_u8(&pred[c +  0]);
+        const uint8x16_t v_pred_00 = vld1q_u8(&pred[c + 0]);
         const uint8x16_t v_pred_16 = vld1q_u8(&pred[c + 16]);
-        const uint16x8_t v_diff_lo_00 = vsubl_u8(vget_low_u8(v_src_00),
-                                                 vget_low_u8(v_pred_00));
-        const uint16x8_t v_diff_hi_00 = vsubl_u8(vget_high_u8(v_src_00),
-                                                 vget_high_u8(v_pred_00));
-        const uint16x8_t v_diff_lo_16 = vsubl_u8(vget_low_u8(v_src_16),
-                                                 vget_low_u8(v_pred_16));
-        const uint16x8_t v_diff_hi_16 = vsubl_u8(vget_high_u8(v_src_16),
-                                                 vget_high_u8(v_pred_16));
-        vst1q_s16(&diff[c +  0], vreinterpretq_s16_u16(v_diff_lo_00));
-        vst1q_s16(&diff[c +  8], vreinterpretq_s16_u16(v_diff_hi_00));
+        const uint16x8_t v_diff_lo_00 =
+            vsubl_u8(vget_low_u8(v_src_00), vget_low_u8(v_pred_00));
+        const uint16x8_t v_diff_hi_00 =
+            vsubl_u8(vget_high_u8(v_src_00), vget_high_u8(v_pred_00));
+        const uint16x8_t v_diff_lo_16 =
+            vsubl_u8(vget_low_u8(v_src_16), vget_low_u8(v_pred_16));
+        const uint16x8_t v_diff_hi_16 =
+            vsubl_u8(vget_high_u8(v_src_16), vget_high_u8(v_pred_16));
+        vst1q_s16(&diff[c + 0], vreinterpretq_s16_u16(v_diff_lo_00));
+        vst1q_s16(&diff[c + 8], vreinterpretq_s16_u16(v_diff_hi_00));
         vst1q_s16(&diff[c + 16], vreinterpretq_s16_u16(v_diff_lo_16));
         vst1q_s16(&diff[c + 24], vreinterpretq_s16_u16(v_diff_hi_16));
       }
       diff += diff_stride;
       pred += pred_stride;
-      src  += src_stride;
+      src += src_stride;
     }
   } else if (cols > 8) {
     for (r = 0; r < rows; ++r) {
       const uint8x16_t v_src = vld1q_u8(&src[0]);
       const uint8x16_t v_pred = vld1q_u8(&pred[0]);
-      const uint16x8_t v_diff_lo = vsubl_u8(vget_low_u8(v_src),
-                                            vget_low_u8(v_pred));
-      const uint16x8_t v_diff_hi = vsubl_u8(vget_high_u8(v_src),
-                                            vget_high_u8(v_pred));
+      const uint16x8_t v_diff_lo =
+          vsubl_u8(vget_low_u8(v_src), vget_low_u8(v_pred));
+      const uint16x8_t v_diff_hi =
+          vsubl_u8(vget_high_u8(v_src), vget_high_u8(v_pred));
       vst1q_s16(&diff[0], vreinterpretq_s16_u16(v_diff_lo));
       vst1q_s16(&diff[8], vreinterpretq_s16_u16(v_diff_hi));
       diff += diff_stride;
       pred += pred_stride;
-      src  += src_stride;
+      src += src_stride;
     }
   } else if (cols > 4) {
     for (r = 0; r < rows; ++r) {
@@ -65,16 +65,15 @@ void vpx_subtract_block_neon(int rows, int cols,
       vst1q_s16(&diff[0], vreinterpretq_s16_u16(v_diff));
       diff += diff_stride;
       pred += pred_stride;
-      src  += src_stride;
+      src += src_stride;
     }
   } else {
     for (r = 0; r < rows; ++r) {
-      for (c = 0; c < cols; ++c)
-        diff[c] = src[c] - pred[c];
+      for (c = 0; c < cols; ++c) diff[c] = src[c] - pred[c];
 
       diff += diff_stride;
       pred += pred_stride;
-      src  += src_stride;
+      src += src_stride;
     }
   }
 }
diff --git a/vpx_dsp/arm/variance_neon.c b/vpx_dsp/arm/variance_neon.c
index ede6e7bbb03b6915f6e19a1c383d86fd7477646c..f469afc4e4b8db4a93c6607a72678e315e3d3b2d 100644
--- a/vpx_dsp/arm/variance_neon.c
+++ b/vpx_dsp/arm/variance_neon.c
@@ -32,9 +32,9 @@ static INLINE int horizontal_add_s32x4(const int32x4_t v_32x4) {
 }
 
 // w * h must be less than 2048 or local variable v_sum may overflow.
-static void variance_neon_w8(const uint8_t *a, int a_stride,
-                             const uint8_t *b, int b_stride,
-                             int w, int h, uint32_t *sse, int *sum) {
+static void variance_neon_w8(const uint8_t *a, int a_stride, const uint8_t *b,
+                             int b_stride, int w, int h, uint32_t *sse,
+                             int *sum) {
   int i, j;
   int16x8_t v_sum = vdupq_n_s16(0);
   int32x4_t v_sse_lo = vdupq_n_s32(0);
@@ -47,12 +47,10 @@ static void variance_neon_w8(const uint8_t *a, int a_stride,
       const uint16x8_t v_diff = vsubl_u8(v_a, v_b);
       const int16x8_t sv_diff = vreinterpretq_s16_u16(v_diff);
       v_sum = vaddq_s16(v_sum, sv_diff);
-      v_sse_lo = vmlal_s16(v_sse_lo,
-                           vget_low_s16(sv_diff),
-                           vget_low_s16(sv_diff));
-      v_sse_hi = vmlal_s16(v_sse_hi,
-                           vget_high_s16(sv_diff),
-                           vget_high_s16(sv_diff));
+      v_sse_lo =
+          vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
+      v_sse_hi =
+          vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
     }
     a += a_stride;
     b += b_stride;
@@ -62,15 +60,13 @@ static void variance_neon_w8(const uint8_t *a, int a_stride,
   *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
 }
 
-void vpx_get8x8var_neon(const uint8_t *a, int a_stride,
-                        const uint8_t *b, int b_stride,
-                        unsigned int *sse, int *sum) {
+void vpx_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+                        int b_stride, unsigned int *sse, int *sum) {
   variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, sum);
 }
 
-void vpx_get16x16var_neon(const uint8_t *a, int a_stride,
-                          const uint8_t *b, int b_stride,
-                          unsigned int *sse, int *sum) {
+void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+                          int b_stride, unsigned int *sse, int *sum) {
   variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, sum);
 }
 
@@ -104,9 +100,8 @@ unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride,
   int sum1, sum2;
   uint32_t sse1, sse2;
   variance_neon_w8(a, a_stride, b, b_stride, 32, 32, &sse1, &sum1);
-  variance_neon_w8(a + (32 * a_stride), a_stride,
-                   b + (32 * b_stride), b_stride, 32, 32,
-                   &sse2, &sum2);
+  variance_neon_w8(a + (32 * a_stride), a_stride, b + (32 * b_stride), b_stride,
+                   32, 32, &sse2, &sum2);
   *sse = sse1 + sse2;
   sum1 += sum2;
   return *sse - (((int64_t)sum1 * sum1) >> 11);  // >> 11 = / 32 * 64
@@ -118,9 +113,8 @@ unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride,
   int sum1, sum2;
   uint32_t sse1, sse2;
   variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
-  variance_neon_w8(a + (16 * a_stride), a_stride,
-                   b + (16 * b_stride), b_stride, 64, 16,
-                   &sse2, &sum2);
+  variance_neon_w8(a + (16 * a_stride), a_stride, b + (16 * b_stride), b_stride,
+                   64, 16, &sse2, &sum2);
   *sse = sse1 + sse2;
   sum1 += sum2;
   return *sse - (((int64_t)sum1 * sum1) >> 11);  // >> 11 = / 32 * 64
@@ -133,286 +127,273 @@ unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride,
   uint32_t sse1, sse2;
 
   variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
-  variance_neon_w8(a + (16 * a_stride), a_stride,
-                   b + (16 * b_stride), b_stride, 64, 16,
-                   &sse2, &sum2);
+  variance_neon_w8(a + (16 * a_stride), a_stride, b + (16 * b_stride), b_stride,
+                   64, 16, &sse2, &sum2);
   sse1 += sse2;
   sum1 += sum2;
 
-  variance_neon_w8(a + (16 * 2 * a_stride), a_stride,
-                   b + (16 * 2 * b_stride), b_stride,
-                   64, 16, &sse2, &sum2);
+  variance_neon_w8(a + (16 * 2 * a_stride), a_stride, b + (16 * 2 * b_stride),
+                   b_stride, 64, 16, &sse2, &sum2);
   sse1 += sse2;
   sum1 += sum2;
 
-  variance_neon_w8(a + (16 * 3 * a_stride), a_stride,
-                   b + (16 * 3 * b_stride), b_stride,
-                   64, 16, &sse2, &sum2);
+  variance_neon_w8(a + (16 * 3 * a_stride), a_stride, b + (16 * 3 * b_stride),
+                   b_stride, 64, 16, &sse2, &sum2);
   *sse = sse1 + sse2;
   sum1 += sum2;
   return *sse - (((int64_t)sum1 * sum1) >> 12);  // >> 12 = / 64 * 64
 }
 
-unsigned int vpx_variance16x8_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8;
-    uint16x8_t q11u16, q12u16, q13u16, q14u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 4; i++) {
-        q0u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q1u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        __builtin_prefetch(src_ptr);
-
-        q2u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q3u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        __builtin_prefetch(ref_ptr);
-
-        q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
-        q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
-        q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
-        q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
-        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
-        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
-
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
+unsigned int vpx_variance16x8_neon(const unsigned char *src_ptr,
+                                   int source_stride,
+                                   const unsigned char *ref_ptr,
+                                   int recon_stride, unsigned int *sse) {
+  int i;
+  int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
+  uint32x2_t d0u32, d10u32;
+  int64x1_t d0s64, d1s64;
+  uint8x16_t q0u8, q1u8, q2u8, q3u8;
+  uint16x8_t q11u16, q12u16, q13u16, q14u16;
+  int32x4_t q8s32, q9s32, q10s32;
+  int64x2_t q0s64, q1s64, q5s64;
+
+  q8s32 = vdupq_n_s32(0);
+  q9s32 = vdupq_n_s32(0);
+  q10s32 = vdupq_n_s32(0);
+
+  for (i = 0; i < 4; i++) {
+    q0u8 = vld1q_u8(src_ptr);
+    src_ptr += source_stride;
+    q1u8 = vld1q_u8(src_ptr);
+    src_ptr += source_stride;
+    __builtin_prefetch(src_ptr);
 
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+    q2u8 = vld1q_u8(ref_ptr);
+    ref_ptr += recon_stride;
+    q3u8 = vld1q_u8(ref_ptr);
+    ref_ptr += recon_stride;
+    __builtin_prefetch(ref_ptr);
+
+    q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
+    q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
+    q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
+    q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
+
+    d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
+    d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
+    q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
+    q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
+    q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
+
+    d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
+    d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
+    q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
+    q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
+    q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
+
+    d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
+    d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
+    q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
+    q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
+    q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
+
+    d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
+    d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
+    q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
+    q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
+    q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
+  }
 
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
+  q10s32 = vaddq_s32(q10s32, q9s32);
+  q0s64 = vpaddlq_s32(q8s32);
+  q1s64 = vpaddlq_s32(q10s32);
 
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
+  d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
+  d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
 
-    return vget_lane_u32(d0u32, 0);
-}
+  q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), vreinterpret_s32_s64(d0s64));
+  vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
 
-unsigned int vpx_variance8x16_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    uint8x8_t d0u8, d2u8, d4u8, d6u8;
-    int16x4_t d22s16, d23s16, d24s16, d25s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint16x8_t q11u16, q12u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 8; i++) {
-        d0u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        d2u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        __builtin_prefetch(src_ptr);
-
-        d4u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        d6u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        __builtin_prefetch(ref_ptr);
-
-        q11u16 = vsubl_u8(d0u8, d4u8);
-        q12u16 = vsubl_u8(d2u8, d6u8);
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-    }
+  d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
+  d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
 
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
+  return vget_lane_u32(d0u32, 0);
+}
 
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+unsigned int vpx_variance8x16_neon(const unsigned char *src_ptr,
+                                   int source_stride,
+                                   const unsigned char *ref_ptr,
+                                   int recon_stride, unsigned int *sse) {
+  int i;
+  uint8x8_t d0u8, d2u8, d4u8, d6u8;
+  int16x4_t d22s16, d23s16, d24s16, d25s16;
+  uint32x2_t d0u32, d10u32;
+  int64x1_t d0s64, d1s64;
+  uint16x8_t q11u16, q12u16;
+  int32x4_t q8s32, q9s32, q10s32;
+  int64x2_t q0s64, q1s64, q5s64;
+
+  q8s32 = vdupq_n_s32(0);
+  q9s32 = vdupq_n_s32(0);
+  q10s32 = vdupq_n_s32(0);
+
+  for (i = 0; i < 8; i++) {
+    d0u8 = vld1_u8(src_ptr);
+    src_ptr += source_stride;
+    d2u8 = vld1_u8(src_ptr);
+    src_ptr += source_stride;
+    __builtin_prefetch(src_ptr);
 
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
+    d4u8 = vld1_u8(ref_ptr);
+    ref_ptr += recon_stride;
+    d6u8 = vld1_u8(ref_ptr);
+    ref_ptr += recon_stride;
+    __builtin_prefetch(ref_ptr);
 
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
+    q11u16 = vsubl_u8(d0u8, d4u8);
+    q12u16 = vsubl_u8(d2u8, d6u8);
+
+    d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
+    d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
+    q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
+    q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
+    q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
+
+    d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
+    d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
+    q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
+    q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
+    q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
+  }
 
-    return vget_lane_u32(d0u32, 0);
-}
+  q10s32 = vaddq_s32(q10s32, q9s32);
+  q0s64 = vpaddlq_s32(q8s32);
+  q1s64 = vpaddlq_s32(q10s32);
 
-unsigned int vpx_mse16x16_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    int64x1_t d0s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8;
-    int32x4_t q7s32, q8s32, q9s32, q10s32;
-    uint16x8_t q11u16, q12u16, q13u16, q14u16;
-    int64x2_t q1s64;
-
-    q7s32 = vdupq_n_s32(0);
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 8; i++) {  // mse16x16_neon_loop
-        q0u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q1u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q2u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q3u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-
-        q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
-        q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
-        q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
-        q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q7s32 = vmlal_s16(q7s32, d22s16, d22s16);
-        q8s32 = vmlal_s16(q8s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q7s32 = vmlal_s16(q7s32, d26s16, d26s16);
-        q8s32 = vmlal_s16(q8s32, d27s16, d27s16);
-
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-    }
+  d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
+  d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
 
-    q7s32 = vaddq_s32(q7s32, q8s32);
-    q9s32 = vaddq_s32(q9s32, q10s32);
-    q10s32 = vaddq_s32(q7s32, q9s32);
+  q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), vreinterpret_s32_s64(d0s64));
+  vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
 
-    q1s64 = vpaddlq_s32(q10s32);
-    d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+  d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
+  d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
 
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d0s64), 0);
-    return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
+  return vget_lane_u32(d0u32, 0);
 }
 
-unsigned int vpx_get4x4sse_cs_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride) {
-    int16x4_t d22s16, d24s16, d26s16, d28s16;
-    int64x1_t d0s64;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
-    int32x4_t q7s32, q8s32, q9s32, q10s32;
-    uint16x8_t q11u16, q12u16, q13u16, q14u16;
-    int64x2_t q1s64;
-
-    d0u8 = vld1_u8(src_ptr);
+unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
+                               const unsigned char *ref_ptr, int recon_stride,
+                               unsigned int *sse) {
+  int i;
+  int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
+  int64x1_t d0s64;
+  uint8x16_t q0u8, q1u8, q2u8, q3u8;
+  int32x4_t q7s32, q8s32, q9s32, q10s32;
+  uint16x8_t q11u16, q12u16, q13u16, q14u16;
+  int64x2_t q1s64;
+
+  q7s32 = vdupq_n_s32(0);
+  q8s32 = vdupq_n_s32(0);
+  q9s32 = vdupq_n_s32(0);
+  q10s32 = vdupq_n_s32(0);
+
+  for (i = 0; i < 8; i++) {  // mse16x16_neon_loop
+    q0u8 = vld1q_u8(src_ptr);
     src_ptr += source_stride;
-    d4u8 = vld1_u8(ref_ptr);
-    ref_ptr += recon_stride;
-    d1u8 = vld1_u8(src_ptr);
+    q1u8 = vld1q_u8(src_ptr);
     src_ptr += source_stride;
-    d5u8 = vld1_u8(ref_ptr);
+    q2u8 = vld1q_u8(ref_ptr);
     ref_ptr += recon_stride;
-    d2u8 = vld1_u8(src_ptr);
-    src_ptr += source_stride;
-    d6u8 = vld1_u8(ref_ptr);
-    ref_ptr += recon_stride;
-    d3u8 = vld1_u8(src_ptr);
-    src_ptr += source_stride;
-    d7u8 = vld1_u8(ref_ptr);
+    q3u8 = vld1q_u8(ref_ptr);
     ref_ptr += recon_stride;
 
-    q11u16 = vsubl_u8(d0u8, d4u8);
-    q12u16 = vsubl_u8(d1u8, d5u8);
-    q13u16 = vsubl_u8(d2u8, d6u8);
-    q14u16 = vsubl_u8(d3u8, d7u8);
-
-    d22s16 = vget_low_s16(vreinterpretq_s16_u16(q11u16));
-    d24s16 = vget_low_s16(vreinterpretq_s16_u16(q12u16));
-    d26s16 = vget_low_s16(vreinterpretq_s16_u16(q13u16));
-    d28s16 = vget_low_s16(vreinterpretq_s16_u16(q14u16));
+    q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
+    q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
+    q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
+    q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
+
+    d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
+    d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
+    q7s32 = vmlal_s16(q7s32, d22s16, d22s16);
+    q8s32 = vmlal_s16(q8s32, d23s16, d23s16);
+
+    d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
+    d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
+    q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
+    q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
+
+    d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
+    d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
+    q7s32 = vmlal_s16(q7s32, d26s16, d26s16);
+    q8s32 = vmlal_s16(q8s32, d27s16, d27s16);
+
+    d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
+    d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
+    q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
+    q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
+  }
 
-    q7s32 = vmull_s16(d22s16, d22s16);
-    q8s32 = vmull_s16(d24s16, d24s16);
-    q9s32 = vmull_s16(d26s16, d26s16);
-    q10s32 = vmull_s16(d28s16, d28s16);
+  q7s32 = vaddq_s32(q7s32, q8s32);
+  q9s32 = vaddq_s32(q9s32, q10s32);
+  q10s32 = vaddq_s32(q7s32, q9s32);
 
-    q7s32 = vaddq_s32(q7s32, q8s32);
-    q9s32 = vaddq_s32(q9s32, q10s32);
-    q9s32 = vaddq_s32(q7s32, q9s32);
+  q1s64 = vpaddlq_s32(q10s32);
+  d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
 
-    q1s64 = vpaddlq_s32(q9s32);
-    d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+  vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d0s64), 0);
+  return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
+}
 
-    return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
+unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr,
+                                   int source_stride,
+                                   const unsigned char *ref_ptr,
+                                   int recon_stride) {
+  int16x4_t d22s16, d24s16, d26s16, d28s16;
+  int64x1_t d0s64;
+  uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
+  int32x4_t q7s32, q8s32, q9s32, q10s32;
+  uint16x8_t q11u16, q12u16, q13u16, q14u16;
+  int64x2_t q1s64;
+
+  d0u8 = vld1_u8(src_ptr);
+  src_ptr += source_stride;
+  d4u8 = vld1_u8(ref_ptr);
+  ref_ptr += recon_stride;
+  d1u8 = vld1_u8(src_ptr);
+  src_ptr += source_stride;
+  d5u8 = vld1_u8(ref_ptr);
+  ref_ptr += recon_stride;
+  d2u8 = vld1_u8(src_ptr);
+  src_ptr += source_stride;
+  d6u8 = vld1_u8(ref_ptr);
+  ref_ptr += recon_stride;
+  d3u8 = vld1_u8(src_ptr);
+  src_ptr += source_stride;
+  d7u8 = vld1_u8(ref_ptr);
+  ref_ptr += recon_stride;
+
+  q11u16 = vsubl_u8(d0u8, d4u8);
+  q12u16 = vsubl_u8(d1u8, d5u8);
+  q13u16 = vsubl_u8(d2u8, d6u8);
+  q14u16 = vsubl_u8(d3u8, d7u8);
+
+  d22s16 = vget_low_s16(vreinterpretq_s16_u16(q11u16));
+  d24s16 = vget_low_s16(vreinterpretq_s16_u16(q12u16));
+  d26s16 = vget_low_s16(vreinterpretq_s16_u16(q13u16));
+  d28s16 = vget_low_s16(vreinterpretq_s16_u16(q14u16));
+
+  q7s32 = vmull_s16(d22s16, d22s16);
+  q8s32 = vmull_s16(d24s16, d24s16);
+  q9s32 = vmull_s16(d26s16, d26s16);
+  q10s32 = vmull_s16(d28s16, d28s16);
+
+  q7s32 = vaddq_s32(q7s32, q8s32);
+  q9s32 = vaddq_s32(q9s32, q10s32);
+  q9s32 = vaddq_s32(q7s32, q9s32);
+
+  q1s64 = vpaddlq_s32(q9s32);
+  d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+
+  return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
 }
diff --git a/vpx_dsp/arm/vpx_convolve8_avg_neon.c b/vpx_dsp/arm/vpx_convolve8_avg_neon.c
index 8632250138c18b7f7ce86cac0892a76619857026..69cb28400538d1f8767c7dc863d764206158c701 100644
--- a/vpx_dsp/arm/vpx_convolve8_avg_neon.c
+++ b/vpx_dsp/arm/vpx_convolve8_avg_neon.c
@@ -16,16 +16,11 @@
 #include "vpx/vpx_integer.h"
 #include "vpx_ports/mem.h"
 
-static INLINE int32x4_t MULTIPLY_BY_Q0(
-    int16x4_t dsrc0,
-    int16x4_t dsrc1,
-    int16x4_t dsrc2,
-    int16x4_t dsrc3,
-    int16x4_t dsrc4,
-    int16x4_t dsrc5,
-    int16x4_t dsrc6,
-    int16x4_t dsrc7,
-    int16x8_t q0s16) {
+static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1,
+                                       int16x4_t dsrc2, int16x4_t dsrc3,
+                                       int16x4_t dsrc4, int16x4_t dsrc5,
+                                       int16x4_t dsrc6, int16x4_t dsrc7,
+                                       int16x8_t q0s16) {
   int32x4_t qdst;
   int16x4_t d0s16, d1s16;
 
@@ -43,17 +38,12 @@ static INLINE int32x4_t MULTIPLY_BY_Q0(
   return qdst;
 }
 
-void vpx_convolve8_avg_horiz_neon(
-    const uint8_t *src,
-    ptrdiff_t src_stride,
-    uint8_t *dst,
-    ptrdiff_t dst_stride,
-    const int16_t *filter_x,
-    int x_step_q4,
-    const int16_t *filter_y,  // unused
-    int y_step_q4,            // unused
-    int w,
-    int h) {
+void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
+                                  uint8_t *dst, ptrdiff_t dst_stride,
+                                  const int16_t *filter_x, int x_step_q4,
+                                  const int16_t *filter_y,  // unused
+                                  int y_step_q4,            // unused
+                                  int w, int h) {
   int width;
   const uint8_t *s;
   uint8_t *d;
@@ -76,7 +66,7 @@ void vpx_convolve8_avg_horiz_neon(
 
   q0s16 = vld1q_s16(filter_x);
 
-  src -= 3;  // adjust for taps
+  src -= 3;                // adjust for taps
   for (; h > 0; h -= 4) {  // loop_horiz_v
     s = src;
     d24u8 = vld1_u8(s);
@@ -90,8 +80,8 @@ void vpx_convolve8_avg_horiz_neon(
     q12u8 = vcombine_u8(d24u8, d25u8);
     q13u8 = vcombine_u8(d26u8, d27u8);
 
-    q0x2u16 = vtrnq_u16(vreinterpretq_u16_u8(q12u8),
-                        vreinterpretq_u16_u8(q13u8));
+    q0x2u16 =
+        vtrnq_u16(vreinterpretq_u16_u8(q12u8), vreinterpretq_u16_u8(q13u8));
     d24u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[0]));
     d25u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[0]));
     d26u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[1]));
@@ -116,10 +106,8 @@ void vpx_convolve8_avg_horiz_neon(
     q9u16 = vcombine_u16(d17u16, d19u16);
 
     d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
-    d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));  // vmov 23 21
-    for (width = w;
-         width > 0;
-         width -= 4, src += 4, dst += 4) {  // loop_horiz
+    d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));         // vmov 23 21
+    for (width = w; width > 0; width -= 4, src += 4, dst += 4) {  // loop_horiz
       s = src;
       d28u32 = vld1_dup_u32((const uint32_t *)s);
       s += src_stride;
@@ -131,10 +119,10 @@ void vpx_convolve8_avg_horiz_neon(
 
       __builtin_prefetch(src + 64);
 
-      d0x2u16 = vtrn_u16(vreinterpret_u16_u32(d28u32),
-                         vreinterpret_u16_u32(d31u32));
-      d1x2u16 = vtrn_u16(vreinterpret_u16_u32(d29u32),
-                         vreinterpret_u16_u32(d30u32));
+      d0x2u16 =
+          vtrn_u16(vreinterpret_u16_u32(d28u32), vreinterpret_u16_u32(d31u32));
+      d1x2u16 =
+          vtrn_u16(vreinterpret_u16_u32(d29u32), vreinterpret_u16_u32(d30u32));
       d0x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[0]),   // d28
                        vreinterpret_u8_u16(d1x2u16.val[0]));  // d29
       d1x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[1]),   // d31
@@ -144,8 +132,8 @@ void vpx_convolve8_avg_horiz_neon(
 
       q14u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]);
       q15u8 = vcombine_u8(d1x2u8.val[1], d1x2u8.val[0]);
-      q0x2u32 = vtrnq_u32(vreinterpretq_u32_u8(q14u8),
-                          vreinterpretq_u32_u8(q15u8));
+      q0x2u32 =
+          vtrnq_u32(vreinterpretq_u32_u8(q14u8), vreinterpretq_u32_u8(q15u8));
 
       d28u8 = vreinterpret_u8_u32(vget_low_u32(q0x2u32.val[0]));
       d29u8 = vreinterpret_u8_u32(vget_high_u32(q0x2u32.val[0]));
@@ -173,14 +161,14 @@ void vpx_convolve8_avg_horiz_neon(
       d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
       d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
 
-      q1s32  = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16,
-                              d18s16, d19s16, d23s16, d24s16, q0s16);
-      q2s32  = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16,
-                              d19s16, d23s16, d24s16, d26s16, q0s16);
-      q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16,
-                              d23s16, d24s16, d26s16, d27s16, q0s16);
-      q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16,
-                              d24s16, d26s16, d27s16, d25s16, q0s16);
+      q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, d18s16, d19s16,
+                             d23s16, d24s16, q0s16);
+      q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, d19s16, d23s16,
+                             d24s16, d26s16, q0s16);
+      q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, d23s16, d24s16,
+                              d26s16, d27s16, q0s16);
+      q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, d24s16, d26s16,
+                              d27s16, d25s16, q0s16);
 
       __builtin_prefetch(src + 64 + src_stride * 3);
 
@@ -195,8 +183,7 @@ void vpx_convolve8_avg_horiz_neon(
       d2u8 = vqmovn_u16(q1u16);
       d3u8 = vqmovn_u16(q2u16);
 
-      d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8),
-                         vreinterpret_u16_u8(d3u8));
+      d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), vreinterpret_u16_u8(d3u8));
       d0x2u32 = vtrn_u32(vreinterpret_u32_u16(d0x2u16.val[0]),
                          vreinterpret_u32_u16(d0x2u16.val[1]));
       d0x2u8 = vtrn_u8(vreinterpret_u8_u32(d0x2u32.val[0]),
@@ -231,17 +218,12 @@ void vpx_convolve8_avg_horiz_neon(
   return;
 }
 
-void vpx_convolve8_avg_vert_neon(
-    const uint8_t *src,
-    ptrdiff_t src_stride,
-    uint8_t *dst,
-    ptrdiff_t dst_stride,
-    const int16_t *filter_x,  // unused
-    int x_step_q4,            // unused
-    const int16_t *filter_y,
-    int y_step_q4,
-    int w,
-    int h) {
+void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
+                                 uint8_t *dst, ptrdiff_t dst_stride,
+                                 const int16_t *filter_x,  // unused
+                                 int x_step_q4,            // unused
+                                 const int16_t *filter_y, int y_step_q4, int w,
+                                 int h) {
   int height;
   const uint8_t *s;
   uint8_t *d;
@@ -277,8 +259,8 @@ void vpx_convolve8_avg_vert_neon(
     d22u32 = vld1_lane_u32((const uint32_t *)s, d22u32, 0);
     s += src_stride;
 
-    q8u16  = vmovl_u8(vreinterpret_u8_u32(d16u32));
-    q9u16  = vmovl_u8(vreinterpret_u8_u32(d18u32));
+    q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32));
+    q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32));
     q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32));
     q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32));
 
@@ -319,20 +301,20 @@ void vpx_convolve8_avg_vert_neon(
 
       __builtin_prefetch(s);
       __builtin_prefetch(s + src_stride);
-      q1s32  = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16,
-                              d20s16, d21s16, d22s16, d24s16, q0s16);
+      q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, d20s16, d21s16,
+                             d22s16, d24s16, q0s16);
       __builtin_prefetch(s + src_stride * 2);
       __builtin_prefetch(s + src_stride * 3);
-      q2s32  = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16,
-                              d21s16, d22s16, d24s16, d26s16, q0s16);
+      q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, d21s16, d22s16,
+                             d24s16, d26s16, q0s16);
       __builtin_prefetch(d);
       __builtin_prefetch(d + dst_stride);
-      q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16,
-                              d22s16, d24s16, d26s16, d27s16, q0s16);
+      q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, d22s16, d24s16,
+                              d26s16, d27s16, q0s16);
       __builtin_prefetch(d + dst_stride * 2);
       __builtin_prefetch(d + dst_stride * 3);
-      q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16,
-                              d24s16, d26s16, d27s16, d25s16, q0s16);
+      q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, d24s16, d26s16,
+                              d27s16, d25s16, q0s16);
 
       d2u16 = vqrshrun_n_s32(q1s32, 7);
       d3u16 = vqrshrun_n_s32(q2s32, 7);
diff --git a/vpx_dsp/arm/vpx_convolve8_neon.c b/vpx_dsp/arm/vpx_convolve8_neon.c
index 9bd715e2c630b2f65adeeb18195f6eea9b2685d8..514525696b0dd99922a4e0e7ab67394de1644310 100644
--- a/vpx_dsp/arm/vpx_convolve8_neon.c
+++ b/vpx_dsp/arm/vpx_convolve8_neon.c
@@ -16,16 +16,11 @@
 #include "vpx/vpx_integer.h"
 #include "vpx_ports/mem.h"
 
-static INLINE int32x4_t MULTIPLY_BY_Q0(
-    int16x4_t dsrc0,
-    int16x4_t dsrc1,
-    int16x4_t dsrc2,
-    int16x4_t dsrc3,
-    int16x4_t dsrc4,
-    int16x4_t dsrc5,
-    int16x4_t dsrc6,
-    int16x4_t dsrc7,
-    int16x8_t q0s16) {
+static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1,
+                                       int16x4_t dsrc2, int16x4_t dsrc3,
+                                       int16x4_t dsrc4, int16x4_t dsrc5,
+                                       int16x4_t dsrc6, int16x4_t dsrc7,
+                                       int16x8_t q0s16) {
   int32x4_t qdst;
   int16x4_t d0s16, d1s16;
 
@@ -43,17 +38,12 @@ static INLINE int32x4_t MULTIPLY_BY_Q0(
   return qdst;
 }
 
-void vpx_convolve8_horiz_neon(
-    const uint8_t *src,
-    ptrdiff_t src_stride,
-    uint8_t *dst,
-    ptrdiff_t dst_stride,
-    const int16_t *filter_x,
-    int x_step_q4,
-    const int16_t *filter_y,  // unused
-    int y_step_q4,            // unused
-    int w,
-    int h) {
+void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
+                              uint8_t *dst, ptrdiff_t dst_stride,
+                              const int16_t *filter_x, int x_step_q4,
+                              const int16_t *filter_y,  // unused
+                              int y_step_q4,            // unused
+                              int w, int h) {
   int width;
   const uint8_t *s, *psrc;
   uint8_t *d, *pdst;
@@ -77,9 +67,8 @@ void vpx_convolve8_horiz_neon(
   q0s16 = vld1q_s16(filter_x);
 
   src -= 3;  // adjust for taps
-  for (; h > 0; h -= 4,
-    src += src_stride * 4,
-    dst += dst_stride * 4) {  // loop_horiz_v
+  for (; h > 0; h -= 4, src += src_stride * 4,
+                dst += dst_stride * 4) {  // loop_horiz_v
     s = src;
     d24u8 = vld1_u8(s);
     s += src_stride;
@@ -92,8 +81,8 @@ void vpx_convolve8_horiz_neon(
     q12u8 = vcombine_u8(d24u8, d25u8);
     q13u8 = vcombine_u8(d26u8, d27u8);
 
-    q0x2u16 = vtrnq_u16(vreinterpretq_u16_u8(q12u8),
-                        vreinterpretq_u16_u8(q13u8));
+    q0x2u16 =
+        vtrnq_u16(vreinterpretq_u16_u8(q12u8), vreinterpretq_u16_u8(q13u8));
     d24u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[0]));
     d25u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[0]));
     d26u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[1]));
@@ -105,8 +94,8 @@ void vpx_convolve8_horiz_neon(
     __builtin_prefetch(src + src_stride * 5);
     __builtin_prefetch(src + src_stride * 6);
 
-    q8u16  = vmovl_u8(d0x2u8.val[0]);
-    q9u16  = vmovl_u8(d0x2u8.val[1]);
+    q8u16 = vmovl_u8(d0x2u8.val[0]);
+    q9u16 = vmovl_u8(d0x2u8.val[1]);
     q10u16 = vmovl_u8(d1x2u8.val[0]);
     q11u16 = vmovl_u8(d1x2u8.val[1]);
 
@@ -119,8 +108,7 @@ void vpx_convolve8_horiz_neon(
 
     d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
     d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));  // vmov 23 21
-    for (width = w, psrc = src + 7, pdst = dst;
-         width > 0;
+    for (width = w, psrc = src + 7, pdst = dst; width > 0;
          width -= 4, psrc += 4, pdst += 4) {  // loop_horiz
       s = psrc;
       d28u32 = vld1_dup_u32((const uint32_t *)s);
@@ -133,10 +121,10 @@ void vpx_convolve8_horiz_neon(
 
       __builtin_prefetch(psrc + 64);
 
-      d0x2u16 = vtrn_u16(vreinterpret_u16_u32(d28u32),
-                         vreinterpret_u16_u32(d31u32));
-      d1x2u16 = vtrn_u16(vreinterpret_u16_u32(d29u32),
-                         vreinterpret_u16_u32(d30u32));
+      d0x2u16 =
+          vtrn_u16(vreinterpret_u16_u32(d28u32), vreinterpret_u16_u32(d31u32));
+      d1x2u16 =
+          vtrn_u16(vreinterpret_u16_u32(d29u32), vreinterpret_u16_u32(d30u32));
       d0x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[0]),   // d28
                        vreinterpret_u8_u16(d1x2u16.val[0]));  // d29
       d1x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[1]),   // d31
@@ -146,8 +134,8 @@ void vpx_convolve8_horiz_neon(
 
       q14u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]);
       q15u8 = vcombine_u8(d1x2u8.val[1], d1x2u8.val[0]);
-      q0x2u32 = vtrnq_u32(vreinterpretq_u32_u8(q14u8),
-                          vreinterpretq_u32_u8(q15u8));
+      q0x2u32 =
+          vtrnq_u32(vreinterpretq_u32_u8(q14u8), vreinterpretq_u32_u8(q15u8));
 
       d28u8 = vreinterpret_u8_u32(vget_low_u32(q0x2u32.val[0]));
       d29u8 = vreinterpret_u8_u32(vget_high_u32(q0x2u32.val[0]));
@@ -166,14 +154,14 @@ void vpx_convolve8_horiz_neon(
       d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
       d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
 
-      q1s32  = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16,
-                              d18s16, d19s16, d23s16, d24s16, q0s16);
-      q2s32  = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16,
-                              d19s16, d23s16, d24s16, d26s16, q0s16);
-      q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16,
-                              d23s16, d24s16, d26s16, d27s16, q0s16);
-      q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16,
-                              d24s16, d26s16, d27s16, d25s16, q0s16);
+      q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, d18s16, d19s16,
+                             d23s16, d24s16, q0s16);
+      q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, d19s16, d23s16,
+                             d24s16, d26s16, q0s16);
+      q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, d23s16, d24s16,
+                              d26s16, d27s16, q0s16);
+      q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, d24s16, d26s16,
+                              d27s16, d25s16, q0s16);
 
       __builtin_prefetch(psrc + 60 + src_stride * 3);
 
@@ -188,8 +176,7 @@ void vpx_convolve8_horiz_neon(
       d2u8 = vqmovn_u16(q1u16);
       d3u8 = vqmovn_u16(q2u16);
 
-      d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8),
-                         vreinterpret_u16_u8(d3u8));
+      d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), vreinterpret_u16_u8(d3u8));
       d0x2u32 = vtrn_u32(vreinterpret_u32_u16(d0x2u16.val[0]),
                          vreinterpret_u32_u16(d0x2u16.val[1]));
       d0x2u8 = vtrn_u8(vreinterpret_u8_u32(d0x2u32.val[0]),
@@ -217,17 +204,12 @@ void vpx_convolve8_horiz_neon(
   return;
 }
 
-void vpx_convolve8_vert_neon(
-    const uint8_t *src,
-    ptrdiff_t src_stride,
-    uint8_t *dst,
-    ptrdiff_t dst_stride,
-    const int16_t *filter_x,  // unused
-    int x_step_q4,            // unused
-    const int16_t *filter_y,
-    int y_step_q4,
-    int w,
-    int h) {
+void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
+                             uint8_t *dst, ptrdiff_t dst_stride,
+                             const int16_t *filter_x,  // unused
+                             int x_step_q4,            // unused
+                             const int16_t *filter_y, int y_step_q4, int w,
+                             int h) {
   int height;
   const uint8_t *s;
   uint8_t *d;
@@ -261,8 +243,8 @@ void vpx_convolve8_vert_neon(
     d22u32 = vld1_lane_u32((const uint32_t *)s, d22u32, 0);
     s += src_stride;
 
-    q8u16  = vmovl_u8(vreinterpret_u8_u32(d16u32));
-    q9u16  = vmovl_u8(vreinterpret_u8_u32(d18u32));
+    q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32));
+    q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32));
     q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32));
     q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32));
 
@@ -294,20 +276,20 @@ void vpx_convolve8_vert_neon(
 
       __builtin_prefetch(d);
       __builtin_prefetch(d + dst_stride);
-      q1s32  = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16,
-                              d20s16, d21s16, d22s16, d24s16, q0s16);
+      q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, d20s16, d21s16,
+                             d22s16, d24s16, q0s16);
       __builtin_prefetch(d + dst_stride * 2);
       __builtin_prefetch(d + dst_stride * 3);
-      q2s32  = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16,
-                              d21s16, d22s16, d24s16, d26s16, q0s16);
+      q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, d21s16, d22s16,
+                             d24s16, d26s16, q0s16);
       __builtin_prefetch(s);
       __builtin_prefetch(s + src_stride);
-      q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16,
-                              d22s16, d24s16, d26s16, d27s16, q0s16);
+      q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, d22s16, d24s16,
+                              d26s16, d27s16, q0s16);
       __builtin_prefetch(s + src_stride * 2);
       __builtin_prefetch(s + src_stride * 3);
-      q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16,
-                              d24s16, d26s16, d27s16, d25s16, q0s16);
+      q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, d24s16, d26s16,
+                              d27s16, d25s16, q0s16);
 
       d2u16 = vqrshrun_n_s32(q1s32, 7);
       d3u16 = vqrshrun_n_s32(q2s32, 7);
diff --git a/vpx_dsp/arm/vpx_convolve_avg_neon.c b/vpx_dsp/arm/vpx_convolve_avg_neon.c
index dc58a332f81d147acc3e9b60f19ac8de32347f9b..abc2511ea291f34c26710c9eae7d3a24138c2940 100644
--- a/vpx_dsp/arm/vpx_convolve_avg_neon.c
+++ b/vpx_dsp/arm/vpx_convolve_avg_neon.c
@@ -13,34 +13,32 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx/vpx_integer.h"
 
-void vpx_convolve_avg_neon(
-    const uint8_t *src,    // r0
-    ptrdiff_t src_stride,  // r1
-    uint8_t *dst,          // r2
-    ptrdiff_t dst_stride,  // r3
-    const int16_t *filter_x,
-    int filter_x_stride,
-    const int16_t *filter_y,
-    int filter_y_stride,
-    int w,
-    int h) {
+void vpx_convolve_avg_neon(const uint8_t *src,    // r0
+                           ptrdiff_t src_stride,  // r1
+                           uint8_t *dst,          // r2
+                           ptrdiff_t dst_stride,  // r3
+                           const int16_t *filter_x, int filter_x_stride,
+                           const int16_t *filter_y, int filter_y_stride, int w,
+                           int h) {
   uint8_t *d;
   uint8x8_t d0u8, d1u8, d2u8, d3u8;
   uint32x2_t d0u32, d2u32;
   uint8x16_t q0u8, q1u8, q2u8, q3u8, q8u8, q9u8, q10u8, q11u8;
-  (void)filter_x;  (void)filter_x_stride;
-  (void)filter_y;  (void)filter_y_stride;
+  (void)filter_x;
+  (void)filter_x_stride;
+  (void)filter_y;
+  (void)filter_y_stride;
 
   d = dst;
   if (w > 32) {  // avg64
     for (; h > 0; h -= 1) {
-      q0u8  = vld1q_u8(src);
-      q1u8  = vld1q_u8(src + 16);
-      q2u8  = vld1q_u8(src + 32);
-      q3u8  = vld1q_u8(src + 48);
+      q0u8 = vld1q_u8(src);
+      q1u8 = vld1q_u8(src + 16);
+      q2u8 = vld1q_u8(src + 32);
+      q3u8 = vld1q_u8(src + 48);
       src += src_stride;
-      q8u8  = vld1q_u8(d);
-      q9u8  = vld1q_u8(d + 16);
+      q8u8 = vld1q_u8(d);
+      q9u8 = vld1q_u8(d + 16);
       q10u8 = vld1q_u8(d + 32);
       q11u8 = vld1q_u8(d + 48);
       d += dst_stride;
@@ -133,8 +131,7 @@ void vpx_convolve_avg_neon(
       d2u32 = vld1_lane_u32((const uint32_t *)d, d2u32, 1);
       d += dst_stride;
 
-      d0u8 = vrhadd_u8(vreinterpret_u8_u32(d0u32),
-                       vreinterpret_u8_u32(d2u32));
+      d0u8 = vrhadd_u8(vreinterpret_u8_u32(d0u32), vreinterpret_u8_u32(d2u32));
 
       d0u32 = vreinterpret_u32_u8(d0u8);
       vst1_lane_u32((uint32_t *)dst, d0u32, 0);
diff --git a/vpx_dsp/arm/vpx_convolve_copy_neon.c b/vpx_dsp/arm/vpx_convolve_copy_neon.c
index d8fb97a861907cc834765d3e259e3d570a34770a..fec189e0e4267ed9fe3f257b59c61b3c7ac2c85d 100644
--- a/vpx_dsp/arm/vpx_convolve_copy_neon.c
+++ b/vpx_dsp/arm/vpx_convolve_copy_neon.c
@@ -13,21 +13,19 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx/vpx_integer.h"
 
-void vpx_convolve_copy_neon(
-    const uint8_t *src,    // r0
-    ptrdiff_t src_stride,  // r1
-    uint8_t *dst,          // r2
-    ptrdiff_t dst_stride,  // r3
-    const int16_t *filter_x,
-    int filter_x_stride,
-    const int16_t *filter_y,
-    int filter_y_stride,
-    int w,
-    int h) {
+void vpx_convolve_copy_neon(const uint8_t *src,    // r0
+                            ptrdiff_t src_stride,  // r1
+                            uint8_t *dst,          // r2
+                            ptrdiff_t dst_stride,  // r3
+                            const int16_t *filter_x, int filter_x_stride,
+                            const int16_t *filter_y, int filter_y_stride, int w,
+                            int h) {
   uint8x8_t d0u8, d2u8;
   uint8x16_t q0u8, q1u8, q2u8, q3u8;
-  (void)filter_x;  (void)filter_x_stride;
-  (void)filter_y;  (void)filter_y_stride;
+  (void)filter_x;
+  (void)filter_x_stride;
+  (void)filter_y;
+  (void)filter_y_stride;
 
   if (w > 32) {  // copy64
     for (; h > 0; h--) {
diff --git a/vpx_dsp/arm/vpx_convolve_neon.c b/vpx_dsp/arm/vpx_convolve_neon.c
index 1506ce6203de21ade9449453b47c94237cfa608b..c2d5895b718bd02e8ff91cc45ff62efc7ad46f17 100644
--- a/vpx_dsp/arm/vpx_convolve_neon.c
+++ b/vpx_dsp/arm/vpx_convolve_neon.c
@@ -14,10 +14,9 @@
 #include "vpx_dsp/vpx_dsp_common.h"
 #include "vpx_ports/mem.h"
 
-void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride,
-                        uint8_t *dst, ptrdiff_t dst_stride,
-                        const int16_t *filter_x, int x_step_q4,
-                        const int16_t *filter_y, int y_step_q4,
+void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                        ptrdiff_t dst_stride, const int16_t *filter_x,
+                        int x_step_q4, const int16_t *filter_y, int y_step_q4,
                         int w, int h) {
   /* Given our constraints: w <= 64, h <= 64, taps == 8 we can reduce the
    * maximum buffer size to 64 * 64 + 7 (+ 1 to make it divisible by 4).
@@ -35,23 +34,20 @@ void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride,
    * the temp buffer which has lots of extra room and is subsequently discarded
    * this is safe if somewhat less than ideal.
    */
-  vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride,
-                           temp, 64,
-                           filter_x, x_step_q4, filter_y, y_step_q4,
-                           w, intermediate_height);
+  vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
+                           x_step_q4, filter_y, y_step_q4, w,
+                           intermediate_height);
 
   /* Step into the temp buffer 3 lines to get the actual frame data */
-  vpx_convolve8_vert_neon(temp + 64 * 3, 64,
-                          dst, dst_stride,
-                          filter_x, x_step_q4, filter_y, y_step_q4,
-                          w, h);
+  vpx_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+                          x_step_q4, filter_y, y_step_q4, w, h);
 }
 
 void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int x_step_q4,
-                            const int16_t *filter_y, int y_step_q4,
-                            int w, int h) {
+                            const int16_t *filter_y, int y_step_q4, int w,
+                            int h) {
   DECLARE_ALIGNED(8, uint8_t, temp[64 * 72]);
   int intermediate_height = h + 7;
 
@@ -61,12 +57,9 @@ void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
   /* This implementation has the same issues as above. In addition, we only want
    * to average the values after both passes.
    */
-  vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride,
-                           temp, 64,
-                           filter_x, x_step_q4, filter_y, y_step_q4,
-                           w, intermediate_height);
-  vpx_convolve8_avg_vert_neon(temp + 64 * 3,
-                              64, dst, dst_stride,
-                              filter_x, x_step_q4, filter_y, y_step_q4,
-                              w, h);
+  vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
+                           x_step_q4, filter_y, y_step_q4, w,
+                           intermediate_height);
+  vpx_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+                              x_step_q4, filter_y, y_step_q4, w, h);
 }
diff --git a/vpx_dsp/avg.c b/vpx_dsp/avg.c
index 15df332c4e9be72cd831797ca884a7378c4464a9..293387c3a00caee0311636a66c92215a2353918a 100644
--- a/vpx_dsp/avg.c
+++ b/vpx_dsp/avg.c
@@ -15,8 +15,9 @@
 unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) {
   int i, j;
   int sum = 0;
-  for (i = 0; i < 8; ++i, s+=p)
-    for (j = 0; j < 8; sum += s[j], ++j) {}
+  for (i = 0; i < 8; ++i, s += p)
+    for (j = 0; j < 8; sum += s[j], ++j) {
+    }
 
   return (sum + 32) >> 6;
 }
@@ -24,8 +25,9 @@ unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) {
 unsigned int vpx_avg_4x4_c(const uint8_t *s, int p) {
   int i, j;
   int sum = 0;
-  for (i = 0; i < 4; ++i, s+=p)
-    for (j = 0; j < 4; sum += s[j], ++j) {}
+  for (i = 0; i < 4; ++i, s += p)
+    for (j = 0; j < 4; sum += s[j], ++j) {
+    }
 
   return (sum + 8) >> 4;
 }
@@ -78,8 +80,8 @@ void vpx_hadamard_8x8_c(int16_t const *src_diff, int src_stride,
   for (idx = 0; idx < 8; ++idx) {
     hadamard_col8(tmp_buf, 8, coeff);  // tmp_buf: 12 bit
                                        // dynamic range [-2040, 2040]
-    coeff += 8;  // coeff: 15 bit
-                 // dynamic range [-16320, 16320]
+    coeff += 8;                        // coeff: 15 bit
+                                       // dynamic range [-16320, 16320]
     ++tmp_buf;
   }
 }
@@ -90,8 +92,8 @@ void vpx_hadamard_16x16_c(int16_t const *src_diff, int src_stride,
   int idx;
   for (idx = 0; idx < 4; ++idx) {
     // src_diff: 9 bit, dynamic range [-255, 255]
-    int16_t const *src_ptr = src_diff + (idx >> 1) * 8 * src_stride
-                                + (idx & 0x01) * 8;
+    int16_t const *src_ptr =
+        src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
     vpx_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
   }
 
@@ -107,8 +109,8 @@ void vpx_hadamard_16x16_c(int16_t const *src_diff, int src_stride,
     int16_t b2 = (a2 + a3) >> 1;  // [-16320, 16320]
     int16_t b3 = (a2 - a3) >> 1;
 
-    coeff[0]   = b0 + b2;  // 16 bit, [-32640, 32640]
-    coeff[64]  = b1 + b3;
+    coeff[0] = b0 + b2;  // 16 bit, [-32640, 32640]
+    coeff[64] = b1 + b3;
     coeff[128] = b0 - b2;
     coeff[192] = b1 - b3;
 
@@ -121,8 +123,7 @@ void vpx_hadamard_16x16_c(int16_t const *src_diff, int src_stride,
 int vpx_satd_c(const int16_t *coeff, int length) {
   int i;
   int satd = 0;
-  for (i = 0; i < length; ++i)
-    satd += abs(coeff[i]);
+  for (i = 0; i < length; ++i) satd += abs(coeff[i]);
 
   // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024]
   return satd;
@@ -138,8 +139,7 @@ void vpx_int_pro_row_c(int16_t hbuf[16], uint8_t const *ref,
     int i;
     hbuf[idx] = 0;
     // hbuf[idx]: 14 bit, dynamic range [0, 16320].
-    for (i = 0; i < height; ++i)
-      hbuf[idx] += ref[i * ref_stride];
+    for (i = 0; i < height; ++i) hbuf[idx] += ref[i * ref_stride];
     // hbuf[idx]: 9 bit, dynamic range [0, 510].
     hbuf[idx] /= norm_factor;
     ++ref;
@@ -151,16 +151,14 @@ int16_t vpx_int_pro_col_c(uint8_t const *ref, const int width) {
   int idx;
   int16_t sum = 0;
   // sum: 14 bit, dynamic range [0, 16320]
-  for (idx = 0; idx < width; ++idx)
-    sum += ref[idx];
+  for (idx = 0; idx < width; ++idx) sum += ref[idx];
   return sum;
 }
 
 // ref: [0 - 510]
 // src: [0 - 510]
 // bwl: {2, 3, 4}
-int vpx_vector_var_c(int16_t const *ref, int16_t const *src,
-                     const int bwl) {
+int vpx_vector_var_c(int16_t const *ref, int16_t const *src, const int bwl) {
   int i;
   int width = 4 << bwl;
   int sse = 0, mean = 0, var;
@@ -183,7 +181,7 @@ void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp,
   *max = 0;
   for (i = 0; i < 8; ++i, s += p, d += dp) {
     for (j = 0; j < 8; ++j) {
-      int diff = abs(s[j]-d[j]);
+      int diff = abs(s[j] - d[j]);
       *min = diff < *min ? diff : *min;
       *max = diff > *max ? diff : *max;
     }
@@ -194,9 +192,10 @@ void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp,
 unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) {
   int i, j;
   int sum = 0;
-  const uint16_t* s = CONVERT_TO_SHORTPTR(s8);
-  for (i = 0; i < 8; ++i, s+=p)
-    for (j = 0; j < 8; sum += s[j], ++j) {}
+  const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
+  for (i = 0; i < 8; ++i, s += p)
+    for (j = 0; j < 8; sum += s[j], ++j) {
+    }
 
   return (sum + 32) >> 6;
 }
@@ -204,9 +203,10 @@ unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) {
 unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) {
   int i, j;
   int sum = 0;
-  const uint16_t* s = CONVERT_TO_SHORTPTR(s8);
-  for (i = 0; i < 4; ++i, s+=p)
-    for (j = 0; j < 4; sum += s[j], ++j) {}
+  const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
+  for (i = 0; i < 4; ++i, s += p)
+    for (j = 0; j < 4; sum += s[j], ++j) {
+    }
 
   return (sum + 8) >> 4;
 }
@@ -214,18 +214,16 @@ unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) {
 void vpx_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
                              int dp, int *min, int *max) {
   int i, j;
-  const uint16_t* s = CONVERT_TO_SHORTPTR(s8);
-  const uint16_t* d = CONVERT_TO_SHORTPTR(d8);
+  const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
+  const uint16_t *d = CONVERT_TO_SHORTPTR(d8);
   *min = 255;
   *max = 0;
   for (i = 0; i < 8; ++i, s += p, d += dp) {
     for (j = 0; j < 8; ++j) {
-      int diff = abs(s[j]-d[j]);
+      int diff = abs(s[j] - d[j]);
       *min = diff < *min ? diff : *min;
       *max = diff > *max ? diff : *max;
     }
   }
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
-
-
diff --git a/vpx_dsp/bitreader.c b/vpx_dsp/bitreader.c
index 6ad806ac3f5b1a7bd3481acc9dd4e1d6b59c7f53..6e3091a773d67c5d3a55fe248553a6978cb6adc8 100644
--- a/vpx_dsp/bitreader.c
+++ b/vpx_dsp/bitreader.c
@@ -18,11 +18,8 @@
 #include "vpx_mem/vpx_mem.h"
 #include "vpx_util/endian_inl.h"
 
-int vpx_reader_init(vpx_reader *r,
-                    const uint8_t *buffer,
-                    size_t size,
-                    vpx_decrypt_cb decrypt_cb,
-                    void *decrypt_state) {
+int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size,
+                    vpx_decrypt_cb decrypt_cb, void *decrypt_state) {
   if (size && !buffer) {
     return 1;
   } else {
@@ -55,19 +52,19 @@ void vpx_reader_fill(vpx_reader *r) {
     buffer_start = r->clear_buffer;
   }
   if (bits_left > BD_VALUE_SIZE) {
-      const int bits = (shift & 0xfffffff8) + CHAR_BIT;
-      BD_VALUE nv;
-      BD_VALUE big_endian_values;
-      memcpy(&big_endian_values, buffer, sizeof(BD_VALUE));
+    const int bits = (shift & 0xfffffff8) + CHAR_BIT;
+    BD_VALUE nv;
+    BD_VALUE big_endian_values;
+    memcpy(&big_endian_values, buffer, sizeof(BD_VALUE));
 #if SIZE_MAX == 0xffffffffffffffffULL
-        big_endian_values = HToBE64(big_endian_values);
+    big_endian_values = HToBE64(big_endian_values);
 #else
-        big_endian_values = HToBE32(big_endian_values);
+    big_endian_values = HToBE32(big_endian_values);
 #endif
-      nv = big_endian_values >> (BD_VALUE_SIZE - bits);
-      count += bits;
-      buffer += (bits >> 3);
-      value = r->value | (nv << (shift & 0x7));
+    nv = big_endian_values >> (BD_VALUE_SIZE - bits);
+    count += bits;
+    buffer += (bits >> 3);
+    value = r->value | (nv << (shift & 0x7));
   } else {
     const int bits_over = (int)(shift + CHAR_BIT - bits_left);
     int loop_end = 0;
diff --git a/vpx_dsp/bitreader.h b/vpx_dsp/bitreader.h
index 9a441b41077e6b4100b746168dddef7bcd74cbf6..6ee2a58632c5e4c5a2c7574f9cc430be74aee37a 100644
--- a/vpx_dsp/bitreader.h
+++ b/vpx_dsp/bitreader.h
@@ -45,11 +45,8 @@ typedef struct {
   uint8_t clear_buffer[sizeof(BD_VALUE) + 1];
 } vpx_reader;
 
-int vpx_reader_init(vpx_reader *r,
-                    const uint8_t *buffer,
-                    size_t size,
-                    vpx_decrypt_cb decrypt_cb,
-                    void *decrypt_state);
+int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size,
+                    vpx_decrypt_cb decrypt_cb, void *decrypt_state);
 
 void vpx_reader_fill(vpx_reader *r);
 
@@ -81,8 +78,7 @@ static INLINE int vpx_read(vpx_reader *r, int prob) {
   unsigned int range;
   unsigned int split = (r->range * prob + (256 - prob)) >> CHAR_BIT;
 
-  if (r->count < 0)
-    vpx_reader_fill(r);
+  if (r->count < 0) vpx_reader_fill(r);
 
   value = r->value;
   count = r->count;
@@ -117,8 +113,7 @@ static INLINE int vpx_read_bit(vpx_reader *r) {
 static INLINE int vpx_read_literal(vpx_reader *r, int bits) {
   int literal = 0, bit;
 
-  for (bit = bits - 1; bit >= 0; bit--)
-    literal |= vpx_read_bit(r) << bit;
+  for (bit = bits - 1; bit >= 0; bit--) literal |= vpx_read_bit(r) << bit;
 
   return literal;
 }
@@ -127,8 +122,7 @@ static INLINE int vpx_read_tree(vpx_reader *r, const vpx_tree_index *tree,
                                 const vpx_prob *probs) {
   vpx_tree_index i = 0;
 
-  while ((i = tree[i + vpx_read(r, probs[i >> 1])]) > 0)
-    continue;
+  while ((i = tree[i + vpx_read(r, probs[i >> 1])]) > 0) continue;
 
   return -i;
 }
diff --git a/vpx_dsp/bitreader_buffer.c b/vpx_dsp/bitreader_buffer.c
index d7b55cf9f410ed962dd71a427c9c2e8e02dbb0f5..e99fffb605ff9a4a4107a70752d53c7851917d74 100644
--- a/vpx_dsp/bitreader_buffer.c
+++ b/vpx_dsp/bitreader_buffer.c
@@ -30,23 +30,20 @@ int vpx_rb_read_bit(struct vpx_read_bit_buffer *rb) {
 
 int vpx_rb_read_literal(struct vpx_read_bit_buffer *rb, int bits) {
   int value = 0, bit;
-  for (bit = bits - 1; bit >= 0; bit--)
-    value |= vpx_rb_read_bit(rb) << bit;
+  for (bit = bits - 1; bit >= 0; bit--) value |= vpx_rb_read_bit(rb) << bit;
   return value;
 }
 
-int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb,
-                               int bits) {
+int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb, int bits) {
   const int value = vpx_rb_read_literal(rb, bits);
   return vpx_rb_read_bit(rb) ? -value : value;
 }
 
-int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb,
-                                   int bits) {
+int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb, int bits) {
 #if CONFIG_MISC_FIXES
   const int nbits = sizeof(unsigned) * 8 - bits - 1;
   const unsigned value = (unsigned)vpx_rb_read_literal(rb, bits + 1) << nbits;
-  return ((int) value) >> nbits;
+  return ((int)value) >> nbits;
 #else
   return vpx_rb_read_signed_literal(rb, bits);
 #endif
diff --git a/vpx_dsp/bitwriter.c b/vpx_dsp/bitwriter.c
index 5b232e346e22a7214add60f5a9d21d760b62585b..81e28b309f573e2cabb1b6c29f9324655eacca86 100644
--- a/vpx_dsp/bitwriter.c
+++ b/vpx_dsp/bitwriter.c
@@ -14,21 +14,18 @@
 
 void vpx_start_encode(vpx_writer *br, uint8_t *source) {
   br->lowvalue = 0;
-  br->range    = 255;
-  br->count    = -24;
-  br->buffer   = source;
-  br->pos      = 0;
+  br->range = 255;
+  br->count = -24;
+  br->buffer = source;
+  br->pos = 0;
   vpx_write_bit(br, 0);
 }
 
 void vpx_stop_encode(vpx_writer *br) {
   int i;
 
-  for (i = 0; i < 32; i++)
-    vpx_write_bit(br, 0);
+  for (i = 0; i < 32; i++) vpx_write_bit(br, 0);
 
   // Ensure there's no ambigous collision with any index marker bytes
-  if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0)
-    br->buffer[br->pos++] = 0;
+  if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0) br->buffer[br->pos++] = 0;
 }
-
diff --git a/vpx_dsp/bitwriter.h b/vpx_dsp/bitwriter.h
index d904997af309ffcd36c0295c2d21c551a7cc35cb..41040cf93549829d36297293d1a655a6b52ac09d 100644
--- a/vpx_dsp/bitwriter.h
+++ b/vpx_dsp/bitwriter.h
@@ -85,8 +85,7 @@ static INLINE void vpx_write_bit(vpx_writer *w, int bit) {
 static INLINE void vpx_write_literal(vpx_writer *w, int data, int bits) {
   int bit;
 
-  for (bit = bits - 1; bit >= 0; bit--)
-    vpx_write_bit(w, 1 & (data >> bit));
+  for (bit = bits - 1; bit >= 0; bit--) vpx_write_bit(w, 1 & (data >> bit));
 }
 
 #define vpx_write_prob(w, v) vpx_write_literal((w), (v), 8)
diff --git a/vpx_dsp/bitwriter_buffer.c b/vpx_dsp/bitwriter_buffer.c
index 6182a722218af880e9dd465d3c5b59605b2c3cf5..1043cdc784ac54b008f3589384e2760f20f5d7a1 100644
--- a/vpx_dsp/bitwriter_buffer.c
+++ b/vpx_dsp/bitwriter_buffer.c
@@ -22,7 +22,7 @@ void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit) {
   const int off = (int)wb->bit_offset;
   const int p = off / CHAR_BIT;
   const int q = CHAR_BIT - 1 - off % CHAR_BIT;
-  if (q == CHAR_BIT -1) {
+  if (q == CHAR_BIT - 1) {
     wb->bit_buffer[p] = bit << q;
   } else {
     wb->bit_buffer[p] &= ~(1 << q);
@@ -33,12 +33,11 @@ void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit) {
 
 void vpx_wb_write_literal(struct vpx_write_bit_buffer *wb, int data, int bits) {
   int bit;
-  for (bit = bits - 1; bit >= 0; bit--)
-    vpx_wb_write_bit(wb, (data >> bit) & 1);
+  for (bit = bits - 1; bit >= 0; bit--) vpx_wb_write_bit(wb, (data >> bit) & 1);
 }
 
-void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb,
-                                     int data, int bits) {
+void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb, int data,
+                                     int bits) {
 #if CONFIG_MISC_FIXES
   vpx_wb_write_literal(wb, data, bits + 1);
 #else
diff --git a/vpx_dsp/fastssim.c b/vpx_dsp/fastssim.c
index 1405a30e00af02d932f9c27ea4e3cfbc41f184c8..d89ab6a8edcebbb9dfbcb3b928938bef522824eb 100644
--- a/vpx_dsp/fastssim.c
+++ b/vpx_dsp/fastssim.c
@@ -49,12 +49,12 @@ static void fs_ctx_init(fs_ctx *_ctx, int _w, int _h, int _nlevels) {
   int l;
   lw = (_w + 1) >> 1;
   lh = (_h + 1) >> 1;
-  data_size = _nlevels * sizeof(fs_level)
-      + 2 * (lw + 8) * 8 * sizeof(*_ctx->col_buf);
+  data_size =
+      _nlevels * sizeof(fs_level) + 2 * (lw + 8) * 8 * sizeof(*_ctx->col_buf);
   for (l = 0; l < _nlevels; l++) {
     size_t im_size;
     size_t level_size;
-    im_size = lw * (size_t) lh;
+    im_size = lw * (size_t)lh;
     level_size = 2 * im_size * sizeof(*_ctx->level[l].im1);
     level_size += sizeof(*_ctx->level[l].ssim) - 1;
     level_size /= sizeof(*_ctx->level[l].ssim);
@@ -64,8 +64,8 @@ static void fs_ctx_init(fs_ctx *_ctx, int _w, int _h, int _nlevels) {
     lw = (lw + 1) >> 1;
     lh = (lh + 1) >> 1;
   }
-  data = (unsigned char *) malloc(data_size);
-  _ctx->level = (fs_level *) data;
+  data = (unsigned char *)malloc(data_size);
+  _ctx->level = (fs_level *)data;
   _ctx->nlevels = _nlevels;
   data += _nlevels * sizeof(*_ctx->level);
   lw = (_w + 1) >> 1;
@@ -75,25 +75,23 @@ static void fs_ctx_init(fs_ctx *_ctx, int _w, int _h, int _nlevels) {
     size_t level_size;
     _ctx->level[l].w = lw;
     _ctx->level[l].h = lh;
-    im_size = lw * (size_t) lh;
+    im_size = lw * (size_t)lh;
     level_size = 2 * im_size * sizeof(*_ctx->level[l].im1);
     level_size += sizeof(*_ctx->level[l].ssim) - 1;
     level_size /= sizeof(*_ctx->level[l].ssim);
     level_size *= sizeof(*_ctx->level[l].ssim);
-    _ctx->level[l].im1 = (uint16_t *) data;
+    _ctx->level[l].im1 = (uint16_t *)data;
     _ctx->level[l].im2 = _ctx->level[l].im1 + im_size;
     data += level_size;
-    _ctx->level[l].ssim = (double *) data;
+    _ctx->level[l].ssim = (double *)data;
     data += im_size * sizeof(*_ctx->level[l].ssim);
     lw = (lw + 1) >> 1;
     lh = (lh + 1) >> 1;
   }
-  _ctx->col_buf = (unsigned *) data;
+  _ctx->col_buf = (unsigned *)data;
 }
 
-static void fs_ctx_clear(fs_ctx *_ctx) {
-  free(_ctx->level);
-}
+static void fs_ctx_clear(fs_ctx *_ctx) { free(_ctx->level); }
 
 static void fs_downsample_level(fs_ctx *_ctx, int _l) {
   const uint16_t *src1;
@@ -124,10 +122,10 @@ static void fs_downsample_level(fs_ctx *_ctx, int _l) {
       int i1;
       i0 = 2 * i;
       i1 = FS_MINI(i0 + 1, w2);
-      dst1[j * w + i] = src1[j0offs + i0] + src1[j0offs + i1]
-          + src1[j1offs + i0] + src1[j1offs + i1];
-      dst2[j * w + i] = src2[j0offs + i0] + src2[j0offs + i1]
-          + src2[j1offs + i0] + src2[j1offs + i1];
+      dst1[j * w + i] = src1[j0offs + i0] + src1[j0offs + i1] +
+                        src1[j1offs + i0] + src1[j1offs + i1];
+      dst2[j * w + i] = src2[j0offs + i0] + src2[j0offs + i1] +
+                        src2[j1offs + i0] + src2[j1offs + i1];
     }
   }
 }
@@ -155,12 +153,12 @@ static void fs_downsample_level0(fs_ctx *_ctx, const unsigned char *_src1,
       int i1;
       i0 = 2 * i;
       i1 = FS_MINI(i0 + 1, _w);
-      dst1[j * w + i] = _src1[j0 * _s1ystride + i0]
-          + _src1[j0 * _s1ystride + i1] + _src1[j1 * _s1ystride + i0]
-          + _src1[j1 * _s1ystride + i1];
-      dst2[j * w + i] = _src2[j0 * _s2ystride + i0]
-          + _src2[j0 * _s2ystride + i1] + _src2[j1 * _s2ystride + i0]
-          + _src2[j1 * _s2ystride + i1];
+      dst1[j * w + i] =
+          _src1[j0 * _s1ystride + i0] + _src1[j0 * _s1ystride + i1] +
+          _src1[j1 * _s1ystride + i0] + _src1[j1 * _s1ystride + i1];
+      dst2[j * w + i] =
+          _src2[j0 * _s2ystride + i0] + _src2[j0 * _s2ystride + i1] +
+          _src2[j1 * _s2ystride + i0] + _src2[j1 * _s2ystride + i1];
     }
   }
 }
@@ -184,19 +182,15 @@ static void fs_apply_luminance(fs_ctx *_ctx, int _l) {
   col_sums_y = col_sums_x + w;
   im1 = _ctx->level[_l].im1;
   im2 = _ctx->level[_l].im2;
-  for (i = 0; i < w; i++)
-    col_sums_x[i] = 5 * im1[i];
-  for (i = 0; i < w; i++)
-    col_sums_y[i] = 5 * im2[i];
+  for (i = 0; i < w; i++) col_sums_x[i] = 5 * im1[i];
+  for (i = 0; i < w; i++) col_sums_y[i] = 5 * im2[i];
   for (j = 1; j < 4; j++) {
     j1offs = FS_MINI(j, h - 1) * w;
-    for (i = 0; i < w; i++)
-      col_sums_x[i] += im1[j1offs + i];
-    for (i = 0; i < w; i++)
-      col_sums_y[i] += im2[j1offs + i];
+    for (i = 0; i < w; i++) col_sums_x[i] += im1[j1offs + i];
+    for (i = 0; i < w; i++) col_sums_y[i] += im2[j1offs + i];
   }
   ssim = _ctx->level[_l].ssim;
-  c1 = (double) (SSIM_C1 * 4096 * (1 << 4 * _l));
+  c1 = (double)(SSIM_C1 * 4096 * (1 << 4 * _l));
   for (j = 0; j < h; j++) {
     unsigned mux;
     unsigned muy;
@@ -210,8 +204,8 @@ static void fs_apply_luminance(fs_ctx *_ctx, int _l) {
       muy += col_sums_y[i1];
     }
     for (i = 0; i < w; i++) {
-      ssim[j * w + i] *= (2 * mux * (double) muy + c1)
-          / (mux * (double) mux + muy * (double) muy + c1);
+      ssim[j * w + i] *= (2 * mux * (double)muy + c1) /
+                         (mux * (double)mux + muy * (double)muy + c1);
       if (i + 1 < w) {
         i0 = FS_MAXI(0, i - 4);
         i1 = FS_MINI(i + 4, w - 1);
@@ -221,78 +215,68 @@ static void fs_apply_luminance(fs_ctx *_ctx, int _l) {
     }
     if (j + 1 < h) {
       j0offs = FS_MAXI(0, j - 4) * w;
-      for (i = 0; i < w; i++)
-        col_sums_x[i] -= im1[j0offs + i];
-      for (i = 0; i < w; i++)
-        col_sums_y[i] -= im2[j0offs + i];
+      for (i = 0; i < w; i++) col_sums_x[i] -= im1[j0offs + i];
+      for (i = 0; i < w; i++) col_sums_y[i] -= im2[j0offs + i];
       j1offs = FS_MINI(j + 4, h - 1) * w;
-      for (i = 0; i < w; i++)
-        col_sums_x[i] += im1[j1offs + i];
-      for (i = 0; i < w; i++)
-        col_sums_y[i] += im2[j1offs + i];
+      for (i = 0; i < w; i++) col_sums_x[i] += im1[j1offs + i];
+      for (i = 0; i < w; i++) col_sums_y[i] += im2[j1offs + i];
     }
   }
 }
 
-#define FS_COL_SET(_col, _joffs, _ioffs) \
-  do { \
-    unsigned gx; \
-    unsigned gy; \
+#define FS_COL_SET(_col, _joffs, _ioffs)                       \
+  do {                                                         \
+    unsigned gx;                                               \
+    unsigned gy;                                               \
     gx = gx_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
     gy = gy_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
-    col_sums_gx2[(_col)] = gx * (double)gx; \
-    col_sums_gy2[(_col)] = gy * (double)gy; \
-    col_sums_gxgy[(_col)] = gx * (double)gy; \
-  } \
-  while (0)
+    col_sums_gx2[(_col)] = gx * (double)gx;                    \
+    col_sums_gy2[(_col)] = gy * (double)gy;                    \
+    col_sums_gxgy[(_col)] = gx * (double)gy;                   \
+  } while (0)
 
-#define FS_COL_ADD(_col, _joffs, _ioffs) \
-  do { \
-    unsigned gx; \
-    unsigned gy; \
+#define FS_COL_ADD(_col, _joffs, _ioffs)                       \
+  do {                                                         \
+    unsigned gx;                                               \
+    unsigned gy;                                               \
     gx = gx_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
     gy = gy_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
-    col_sums_gx2[(_col)] += gx * (double)gx; \
-    col_sums_gy2[(_col)] += gy * (double)gy; \
-    col_sums_gxgy[(_col)] += gx * (double)gy; \
-  } \
-  while (0)
+    col_sums_gx2[(_col)] += gx * (double)gx;                   \
+    col_sums_gy2[(_col)] += gy * (double)gy;                   \
+    col_sums_gxgy[(_col)] += gx * (double)gy;                  \
+  } while (0)
 
-#define FS_COL_SUB(_col, _joffs, _ioffs) \
-  do { \
-    unsigned gx; \
-    unsigned gy; \
+#define FS_COL_SUB(_col, _joffs, _ioffs)                       \
+  do {                                                         \
+    unsigned gx;                                               \
+    unsigned gy;                                               \
     gx = gx_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
     gy = gy_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \
-    col_sums_gx2[(_col)] -= gx * (double)gx; \
-    col_sums_gy2[(_col)] -= gy * (double)gy; \
-    col_sums_gxgy[(_col)] -= gx * (double)gy; \
-  } \
-  while (0)
+    col_sums_gx2[(_col)] -= gx * (double)gx;                   \
+    col_sums_gy2[(_col)] -= gy * (double)gy;                   \
+    col_sums_gxgy[(_col)] -= gx * (double)gy;                  \
+  } while (0)
 
-#define FS_COL_COPY(_col1, _col2) \
-  do { \
-    col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)]; \
-    col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)]; \
+#define FS_COL_COPY(_col1, _col2)                    \
+  do {                                               \
+    col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)];   \
+    col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)];   \
     col_sums_gxgy[(_col1)] = col_sums_gxgy[(_col2)]; \
-  } \
-  while (0)
+  } while (0)
 
-#define FS_COL_HALVE(_col1, _col2) \
-  do { \
-    col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 0.5; \
-    col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 0.5; \
+#define FS_COL_HALVE(_col1, _col2)                         \
+  do {                                                     \
+    col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 0.5;   \
+    col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 0.5;   \
     col_sums_gxgy[(_col1)] = col_sums_gxgy[(_col2)] * 0.5; \
-  } \
-  while (0)
+  } while (0)
 
-#define FS_COL_DOUBLE(_col1, _col2) \
-  do { \
-    col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 2; \
-    col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 2; \
+#define FS_COL_DOUBLE(_col1, _col2)                      \
+  do {                                                   \
+    col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 2;   \
+    col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 2;   \
     col_sums_gxgy[(_col1)] = col_sums_gxgy[(_col2)] * 2; \
-  } \
-  while (0)
+  } while (0)
 
 static void fs_calc_structure(fs_ctx *_ctx, int _l) {
   uint16_t *im1;
@@ -359,14 +343,11 @@ static void fs_calc_structure(fs_ctx *_ctx, int _l) {
         double mugy2;
         double mugxgy;
         mugx2 = col_sums_gx2[0];
-        for (k = 1; k < 8; k++)
-          mugx2 += col_sums_gx2[k];
+        for (k = 1; k < 8; k++) mugx2 += col_sums_gx2[k];
         mugy2 = col_sums_gy2[0];
-        for (k = 1; k < 8; k++)
-          mugy2 += col_sums_gy2[k];
+        for (k = 1; k < 8; k++) mugy2 += col_sums_gy2[k];
         mugxgy = col_sums_gxgy[0];
-        for (k = 1; k < 8; k++)
-          mugxgy += col_sums_gxgy[k];
+        for (k = 1; k < 8; k++) mugxgy += col_sums_gxgy[k];
         ssim[(j - 4) * w + i] = (2 * mugxgy + c2) / (mugx2 + mugy2 + c2);
         if (i + 1 < w) {
           FS_COL_SET(0, -1, 1);
@@ -401,8 +382,9 @@ static void fs_calc_structure(fs_ctx *_ctx, int _l) {
  Matlab implementation: {0.0448, 0.2856, 0.2363, 0.1333}.
  We drop the finest scale and renormalize the rest to sum to 1.*/
 
-static const double FS_WEIGHTS[FS_NLEVELS] = {0.2989654541015625,
-    0.3141326904296875, 0.2473602294921875, 0.1395416259765625};
+static const double FS_WEIGHTS[FS_NLEVELS] = {
+  0.2989654541015625, 0.3141326904296875, 0.2473602294921875, 0.1395416259765625
+};
 
 static double fs_average(fs_ctx *_ctx, int _l) {
   double *ssim;
@@ -416,13 +398,13 @@ static double fs_average(fs_ctx *_ctx, int _l) {
   ssim = _ctx->level[_l].ssim;
   ret = 0;
   for (j = 0; j < h; j++)
-    for (i = 0; i < w; i++)
-      ret += ssim[j * w + i];
+    for (i = 0; i < w; i++) ret += ssim[j * w + i];
   return pow(ret / (w * h), FS_WEIGHTS[_l]);
 }
 
 static double calc_ssim(const unsigned char *_src, int _systride,
-                 const unsigned char *_dst, int _dystride, int _w, int _h) {
+                        const unsigned char *_dst, int _dystride, int _w,
+                        int _h) {
   fs_ctx ctx;
   double ret;
   int l;
@@ -446,22 +428,22 @@ static double convert_ssim_db(double _ssim, double _weight) {
 }
 
 double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source,
-                         const YV12_BUFFER_CONFIG *dest,
-                         double *ssim_y, double *ssim_u, double *ssim_v) {
+                         const YV12_BUFFER_CONFIG *dest, double *ssim_y,
+                         double *ssim_u, double *ssim_v) {
   double ssimv;
   vpx_clear_system_state();
 
-  *ssim_y = calc_ssim(source->y_buffer, source->y_stride, dest->y_buffer,
-                      dest->y_stride, source->y_crop_width,
-                      source->y_crop_height);
+  *ssim_y =
+      calc_ssim(source->y_buffer, source->y_stride, dest->y_buffer,
+                dest->y_stride, source->y_crop_width, source->y_crop_height);
 
-  *ssim_u = calc_ssim(source->u_buffer, source->uv_stride, dest->u_buffer,
-                      dest->uv_stride, source->uv_crop_width,
-                      source->uv_crop_height);
+  *ssim_u =
+      calc_ssim(source->u_buffer, source->uv_stride, dest->u_buffer,
+                dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
 
-  *ssim_v = calc_ssim(source->v_buffer, source->uv_stride, dest->v_buffer,
-                      dest->uv_stride, source->uv_crop_width,
-                      source->uv_crop_height);
+  *ssim_v =
+      calc_ssim(source->v_buffer, source->uv_stride, dest->v_buffer,
+                dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
   ssimv = (*ssim_y) * .8 + .1 * ((*ssim_u) + (*ssim_v));
 
   return convert_ssim_db(ssimv, 1.0);
diff --git a/vpx_dsp/fwd_txfm.c b/vpx_dsp/fwd_txfm.c
index ba63683d592c4e581df6968f411c6f1d0835feea..9733b88daf6b458a28728e6803b1475f4c07690c 100644
--- a/vpx_dsp/fwd_txfm.c
+++ b/vpx_dsp/fwd_txfm.c
@@ -71,8 +71,7 @@ void vpx_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   {
     int i, j;
     for (i = 0; i < 4; ++i) {
-      for (j = 0; j < 4; ++j)
-        output[j + i * 4] = (output[j + i * 4] + 1) >> 2;
+      for (j = 0; j < 4; ++j) output[j + i * 4] = (output[j + i * 4] + 1) >> 2;
     }
   }
 }
@@ -81,8 +80,7 @@ void vpx_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 4; ++r)
-    for (c = 0; c < 4; ++c)
-      sum += input[r * stride + c];
+    for (c = 0; c < 4; ++c) sum += input[r * stride + c];
 
   output[0] = sum << 1;
   output[1] = 0;
@@ -133,8 +131,8 @@ void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
       x3 = s0 - s3;
       t0 = (x0 + x1) * cospi_16_64;
       t1 = (x0 - x1) * cospi_16_64;
-      t2 =  x2 * cospi_24_64 + x3 *  cospi_8_64;
-      t3 = -x2 * cospi_8_64  + x3 * cospi_24_64;
+      t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+      t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
       output[0] = (tran_low_t)fdct_round_shift(t0);
       output[2] = (tran_low_t)fdct_round_shift(t2);
       output[4] = (tran_low_t)fdct_round_shift(t1);
@@ -153,24 +151,23 @@ void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
       x3 = s7 + t3;
 
       // Stage 4
-      t0 = x0 * cospi_28_64 + x3 *   cospi_4_64;
-      t1 = x1 * cospi_12_64 + x2 *  cospi_20_64;
+      t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+      t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
       t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
-      t3 = x3 * cospi_28_64 + x0 *  -cospi_4_64;
+      t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
       output[1] = (tran_low_t)fdct_round_shift(t0);
       output[3] = (tran_low_t)fdct_round_shift(t2);
       output[5] = (tran_low_t)fdct_round_shift(t1);
       output[7] = (tran_low_t)fdct_round_shift(t3);
       output += 8;
     }
-    in  = intermediate;
+    in = intermediate;
     output = final_output;
   }
 
   // Rows
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      final_output[j + i * 8] /= 2;
+    for (j = 0; j < 8; ++j) final_output[j + i * 8] /= 2;
   }
 }
 
@@ -178,8 +175,7 @@ void vpx_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 8; ++r)
-    for (c = 0; c < 8; ++c)
-      sum += input[r * stride + c];
+    for (c = 0; c < 8; ++c) sum += input[r * stride + c];
 
   output[0] = sum;
   output[1] = 0;
@@ -215,11 +211,11 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         input[3] = (in_pass0[3 * stride] + in_pass0[12 * stride]) * 4;
         input[4] = (in_pass0[4 * stride] + in_pass0[11 * stride]) * 4;
         input[5] = (in_pass0[5 * stride] + in_pass0[10 * stride]) * 4;
-        input[6] = (in_pass0[6 * stride] + in_pass0[ 9 * stride]) * 4;
-        input[7] = (in_pass0[7 * stride] + in_pass0[ 8 * stride]) * 4;
+        input[6] = (in_pass0[6 * stride] + in_pass0[9 * stride]) * 4;
+        input[7] = (in_pass0[7 * stride] + in_pass0[8 * stride]) * 4;
         // Calculate input for the next 8 results.
-        step1[0] = (in_pass0[7 * stride] - in_pass0[ 8 * stride]) * 4;
-        step1[1] = (in_pass0[6 * stride] - in_pass0[ 9 * stride]) * 4;
+        step1[0] = (in_pass0[7 * stride] - in_pass0[8 * stride]) * 4;
+        step1[1] = (in_pass0[6 * stride] - in_pass0[9 * stride]) * 4;
         step1[2] = (in_pass0[5 * stride] - in_pass0[10 * stride]) * 4;
         step1[3] = (in_pass0[4 * stride] - in_pass0[11 * stride]) * 4;
         step1[4] = (in_pass0[3 * stride] - in_pass0[12 * stride]) * 4;
@@ -234,11 +230,11 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         input[3] = ((in[3 * 16] + 1) >> 2) + ((in[12 * 16] + 1) >> 2);
         input[4] = ((in[4 * 16] + 1) >> 2) + ((in[11 * 16] + 1) >> 2);
         input[5] = ((in[5 * 16] + 1) >> 2) + ((in[10 * 16] + 1) >> 2);
-        input[6] = ((in[6 * 16] + 1) >> 2) + ((in[ 9 * 16] + 1) >> 2);
-        input[7] = ((in[7 * 16] + 1) >> 2) + ((in[ 8 * 16] + 1) >> 2);
+        input[6] = ((in[6 * 16] + 1) >> 2) + ((in[9 * 16] + 1) >> 2);
+        input[7] = ((in[7 * 16] + 1) >> 2) + ((in[8 * 16] + 1) >> 2);
         // Calculate input for the next 8 results.
-        step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[ 8 * 16] + 1) >> 2);
-        step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[ 9 * 16] + 1) >> 2);
+        step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[8 * 16] + 1) >> 2);
+        step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[9 * 16] + 1) >> 2);
         step1[2] = ((in[5 * 16] + 1) >> 2) - ((in[10 * 16] + 1) >> 2);
         step1[3] = ((in[4 * 16] + 1) >> 2) - ((in[11 * 16] + 1) >> 2);
         step1[4] = ((in[3 * 16] + 1) >> 2) - ((in[12 * 16] + 1) >> 2);
@@ -269,7 +265,7 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         x3 = s0 - s3;
         t0 = (x0 + x1) * cospi_16_64;
         t1 = (x0 - x1) * cospi_16_64;
-        t2 = x3 * cospi_8_64  + x2 * cospi_24_64;
+        t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
         t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
         out[0] = (tran_low_t)fdct_round_shift(t0);
         out[4] = (tran_low_t)fdct_round_shift(t2);
@@ -289,10 +285,10 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         x3 = s7 + t3;
 
         // Stage 4
-        t0 = x0 * cospi_28_64 + x3 *   cospi_4_64;
-        t1 = x1 * cospi_12_64 + x2 *  cospi_20_64;
+        t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+        t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
         t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
-        t3 = x3 * cospi_28_64 + x0 *  -cospi_4_64;
+        t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
         out[2] = (tran_low_t)fdct_round_shift(t0);
         out[6] = (tran_low_t)fdct_round_shift(t2);
         out[10] = (tran_low_t)fdct_round_shift(t1);
@@ -319,12 +315,12 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         step3[6] = step1[6] + step2[5];
         step3[7] = step1[7] + step2[4];
         // step 4
-        temp1 = step3[1] *  -cospi_8_64 + step3[6] * cospi_24_64;
-        temp2 = step3[2] * cospi_24_64 + step3[5] *  cospi_8_64;
+        temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
+        temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64;
         step2[1] = fdct_round_shift(temp1);
         step2[2] = fdct_round_shift(temp2);
         temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64;
-        temp2 = step3[1] * cospi_24_64 + step3[6] *  cospi_8_64;
+        temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
         step2[5] = fdct_round_shift(temp1);
         step2[6] = fdct_round_shift(temp2);
         // step 5
@@ -337,20 +333,20 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
         step1[6] = step3[7] - step2[6];
         step1[7] = step3[7] + step2[6];
         // step 6
-        temp1 = step1[0] * cospi_30_64 + step1[7] *  cospi_2_64;
+        temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
         temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
         out[1] = (tran_low_t)fdct_round_shift(temp1);
         out[9] = (tran_low_t)fdct_round_shift(temp2);
         temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
-        temp2 = step1[3] *  cospi_6_64 + step1[4] * cospi_26_64;
+        temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
         out[5] = (tran_low_t)fdct_round_shift(temp1);
         out[13] = (tran_low_t)fdct_round_shift(temp2);
-        temp1 = step1[3] * -cospi_26_64 + step1[4] *  cospi_6_64;
+        temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
         temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
         out[3] = (tran_low_t)fdct_round_shift(temp1);
         out[11] = (tran_low_t)fdct_round_shift(temp2);
         temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
-        temp2 = step1[0] *  -cospi_2_64 + step1[7] * cospi_30_64;
+        temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
         out[7] = (tran_low_t)fdct_round_shift(temp1);
         out[15] = (tran_low_t)fdct_round_shift(temp2);
       }
@@ -369,8 +365,7 @@ void vpx_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 16; ++r)
-    for (c = 0; c < 16; ++c)
-      sum += input[r * stride + c];
+    for (c = 0; c < 16; ++c) sum += input[r * stride + c];
 
   output[0] = sum >> 1;
   output[1] = 0;
@@ -677,36 +672,36 @@ void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
   step[31] = output[31] + output[30];
 
   // Final stage --- outputs indices are bit-reversed.
-  output[0]  = step[0];
+  output[0] = step[0];
   output[16] = step[1];
-  output[8]  = step[2];
+  output[8] = step[2];
   output[24] = step[3];
-  output[4]  = step[4];
+  output[4] = step[4];
   output[20] = step[5];
   output[12] = step[6];
   output[28] = step[7];
-  output[2]  = step[8];
+  output[2] = step[8];
   output[18] = step[9];
   output[10] = step[10];
   output[26] = step[11];
-  output[6]  = step[12];
+  output[6] = step[12];
   output[22] = step[13];
   output[14] = step[14];
   output[30] = step[15];
 
-  output[1]  = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64);
+  output[1] = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64);
   output[17] = dct_32_round(step[17] * cospi_15_64 + step[30] * cospi_17_64);
-  output[9]  = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64);
+  output[9] = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64);
   output[25] = dct_32_round(step[19] * cospi_7_64 + step[28] * cospi_25_64);
-  output[5]  = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64);
+  output[5] = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64);
   output[21] = dct_32_round(step[21] * cospi_11_64 + step[26] * cospi_21_64);
   output[13] = dct_32_round(step[22] * cospi_19_64 + step[25] * cospi_13_64);
   output[29] = dct_32_round(step[23] * cospi_3_64 + step[24] * cospi_29_64);
-  output[3]  = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64);
+  output[3] = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64);
   output[19] = dct_32_round(step[25] * cospi_19_64 + step[22] * -cospi_13_64);
   output[11] = dct_32_round(step[26] * cospi_11_64 + step[21] * -cospi_21_64);
   output[27] = dct_32_round(step[27] * cospi_27_64 + step[20] * -cospi_5_64);
-  output[7]  = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64);
+  output[7] = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64);
   output[23] = dct_32_round(step[29] * cospi_23_64 + step[18] * -cospi_9_64);
   output[15] = dct_32_round(step[30] * cospi_15_64 + step[17] * -cospi_17_64);
   output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
@@ -719,8 +714,7 @@ void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   // Columns
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = input[j * stride + i] * 4;
+    for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
     vpx_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
@@ -729,8 +723,7 @@ void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   // Rows
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = output[j + i * 32];
+    for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
     vpx_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
@@ -748,8 +741,7 @@ void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   // Columns
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = input[j * stride + i] * 4;
+    for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
     vpx_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       // TODO(cd): see quality impact of only doing
@@ -761,11 +753,9 @@ void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   // Rows
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = output[j + i * 32];
+    for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
     vpx_fdct32(temp_in, temp_out, 1);
-    for (j = 0; j < 32; ++j)
-      out[j + i * 32] = (tran_low_t)temp_out[j];
+    for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
 
@@ -773,8 +763,7 @@ void vpx_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 32; ++r)
-    for (c = 0; c < 32; ++c)
-      sum += input[r * stride + c];
+    for (c = 0; c < 32; ++c) sum += input[r * stride + c];
 
   output[0] = sum >> 3;
   output[1] = 0;
diff --git a/vpx_dsp/intrapred.c b/vpx_dsp/intrapred.c
index eb98fdbaff773c9c680e3eef6a7bd7fa01b35dad..3a2a8e3a13854ccac7965b1467a4e7ce67c1bd7c 100644
--- a/vpx_dsp/intrapred.c
+++ b/vpx_dsp/intrapred.c
@@ -14,17 +14,16 @@
 #include "vpx_dsp/vpx_dsp_common.h"
 #include "vpx_mem/vpx_mem.h"
 
-#define DST(x, y) dst[(x) + (y) * stride]
+#define DST(x, y) dst[(x) + (y)*stride]
 #define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
 #define AVG2(a, b) (((a) + (b) + 1) >> 1)
 
 static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                   const uint8_t *above, const uint8_t *left) {
   int r, c;
-  (void) above;
+  (void)above;
   // first column
-  for (r = 0; r < bs - 1; ++r)
-    dst[r * stride] = AVG2(left[r], left[r + 1]);
+  for (r = 0; r < bs - 1; ++r) dst[r * stride] = AVG2(left[r], left[r + 1]);
   dst[(bs - 1) * stride] = left[bs - 1];
   dst++;
 
@@ -36,8 +35,7 @@ static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
   dst++;
 
   // rest of last row
-  for (c = 0; c < bs - 2; ++c)
-    dst[(bs - 1) * stride + c] = left[bs - 1];
+  for (c = 0; c < bs - 2; ++c) dst[(bs - 1) * stride + c] = left[bs - 1];
 
   for (r = bs - 2; r >= 0; --r)
     for (c = 0; c < bs - 2; ++c)
@@ -47,13 +45,13 @@ static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
 static INLINE void d207e_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                    const uint8_t *above, const uint8_t *left) {
   int r, c;
-  (void) above;
+  (void)above;
 
   for (r = 0; r < bs; ++r) {
     for (c = 0; c < bs; ++c) {
       dst[c] = c & 1 ? AVG3(left[(c >> 1) + r], left[(c >> 1) + r + 1],
                             left[(c >> 1) + r + 2])
-          : AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]);
+                     : AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]);
     }
     dst += stride;
   }
@@ -79,12 +77,12 @@ static INLINE void d63_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
 static INLINE void d63e_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                   const uint8_t *above, const uint8_t *left) {
   int r, c;
-  (void) left;
+  (void)left;
   for (r = 0; r < bs; ++r) {
     for (c = 0; c < bs; ++c) {
       dst[c] = r & 1 ? AVG3(above[(r >> 1) + c], above[(r >> 1) + c + 1],
                             above[(r >> 1) + c + 2])
-          : AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]);
+                     : AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]);
     }
     dst += stride;
   }
@@ -112,7 +110,7 @@ static INLINE void d45_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
 static INLINE void d45e_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                   const uint8_t *above, const uint8_t *left) {
   int r, c;
-  (void) left;
+  (void)left;
   for (r = 0; r < bs; ++r) {
     for (c = 0; c < bs; ++c) {
       dst[c] = AVG3(above[r + c], above[r + c + 1],
@@ -127,14 +125,12 @@ static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
   int r, c;
 
   // first row
-  for (c = 0; c < bs; c++)
-    dst[c] = AVG2(above[c - 1], above[c]);
+  for (c = 0; c < bs; c++) dst[c] = AVG2(above[c - 1], above[c]);
   dst += stride;
 
   // second row
   dst[0] = AVG3(left[0], above[-1], above[0]);
-  for (c = 1; c < bs; c++)
-    dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
+  for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
   dst += stride;
 
   // the rest of first col
@@ -144,8 +140,7 @@ static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
 
   // the rest of the block
   for (r = 2; r < bs; ++r) {
-    for (c = 1; c < bs; c++)
-      dst[c] = dst[-2 * stride + c - 1];
+    for (c = 1; c < bs; c++) dst[c] = dst[-2 * stride + c - 1];
     dst += stride;
   }
 }
@@ -154,8 +149,7 @@ static INLINE void d135_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                   const uint8_t *above, const uint8_t *left) {
   int r, c;
   dst[0] = AVG3(left[0], above[-1], above[0]);
-  for (c = 1; c < bs; c++)
-    dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
+  for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
 
   dst[stride] = AVG3(above[-1], left[0], left[1]);
   for (r = 2; r < bs; ++r)
@@ -163,8 +157,7 @@ static INLINE void d135_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
 
   dst += stride;
   for (r = 1; r < bs; ++r) {
-    for (c = 1; c < bs; c++)
-      dst[c] = dst[-stride + c - 1];
+    for (c = 1; c < bs; c++) dst[c] = dst[-stride + c - 1];
     dst += stride;
   }
 }
@@ -173,8 +166,7 @@ static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                   const uint8_t *above, const uint8_t *left) {
   int r, c;
   dst[0] = AVG2(above[-1], left[0]);
-  for (r = 1; r < bs; r++)
-    dst[r * stride] = AVG2(left[r - 1], left[r]);
+  for (r = 1; r < bs; r++) dst[r * stride] = AVG2(left[r - 1], left[r]);
   dst++;
 
   dst[0] = AVG3(left[0], above[-1], above[0]);
@@ -188,8 +180,7 @@ static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
   dst += stride;
 
   for (r = 1; r < bs; ++r) {
-    for (c = 0; c < bs - 2; c++)
-      dst[c] = dst[-stride + c - 2];
+    for (c = 0; c < bs - 2; c++) dst[c] = dst[-stride + c - 2];
     dst += stride;
   }
 }
@@ -197,7 +188,7 @@ static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
 static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                const uint8_t *above, const uint8_t *left) {
   int r;
-  (void) left;
+  (void)left;
 
   for (r = 0; r < bs; r++) {
     memcpy(dst, above, bs);
@@ -208,7 +199,7 @@ static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
 static INLINE void h_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                const uint8_t *above, const uint8_t *left) {
   int r;
-  (void) above;
+  (void)above;
 
   for (r = 0; r < bs; r++) {
     memset(dst, left[r], bs);
@@ -231,8 +222,8 @@ static INLINE void tm_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
 static INLINE void dc_128_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                     const uint8_t *above, const uint8_t *left) {
   int r;
-  (void) above;
-  (void) left;
+  (void)above;
+  (void)left;
 
   for (r = 0; r < bs; r++) {
     memset(dst, 128, bs);
@@ -244,10 +235,9 @@ static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                      const uint8_t *above,
                                      const uint8_t *left) {
   int i, r, expected_dc, sum = 0;
-  (void) above;
+  (void)above;
 
-  for (i = 0; i < bs; i++)
-    sum += left[i];
+  for (i = 0; i < bs; i++) sum += left[i];
   expected_dc = (sum + (bs >> 1)) / bs;
 
   for (r = 0; r < bs; r++) {
@@ -259,10 +249,9 @@ static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
 static INLINE void dc_top_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
                                     const uint8_t *above, const uint8_t *left) {
   int i, r, expected_dc, sum = 0;
-  (void) left;
+  (void)left;
 
-  for (i = 0; i < bs; i++)
-    sum += above[i];
+  for (i = 0; i < bs; i++) sum += above[i];
   expected_dc = (sum + (bs >> 1)) / bs;
 
   for (r = 0; r < bs; r++) {
@@ -328,14 +317,13 @@ void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
   const int K = left[2];
   const int L = left[3];
   (void)above;
-  DST(0, 0) =             AVG2(I, J);
+  DST(0, 0) = AVG2(I, J);
   DST(2, 0) = DST(0, 1) = AVG2(J, K);
   DST(2, 1) = DST(0, 2) = AVG2(K, L);
-  DST(1, 0) =             AVG3(I, J, K);
+  DST(1, 0) = AVG3(I, J, K);
   DST(3, 0) = DST(1, 1) = AVG3(J, K, L);
   DST(3, 1) = DST(1, 2) = AVG3(K, L, L);
-  DST(3, 2) = DST(2, 2) =
-      DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L;
+  DST(3, 2) = DST(2, 2) = DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L;
 }
 
 void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
@@ -348,17 +336,17 @@ void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
   const int F = above[5];
   const int G = above[6];
   (void)left;
-  DST(0, 0) =             AVG2(A, B);
+  DST(0, 0) = AVG2(A, B);
   DST(1, 0) = DST(0, 2) = AVG2(B, C);
   DST(2, 0) = DST(1, 2) = AVG2(C, D);
   DST(3, 0) = DST(2, 2) = AVG2(D, E);
-              DST(3, 2) = AVG2(E, F);  // differs from vp8
+  DST(3, 2) = AVG2(E, F);  // differs from vp8
 
-  DST(0, 1) =             AVG3(A, B, C);
+  DST(0, 1) = AVG3(A, B, C);
   DST(1, 1) = DST(0, 3) = AVG3(B, C, D);
   DST(2, 1) = DST(1, 3) = AVG3(C, D, E);
   DST(3, 1) = DST(2, 3) = AVG3(D, E, F);
-              DST(3, 3) = AVG3(E, F, G);  // differs from vp8
+  DST(3, 3) = AVG3(E, F, G);  // differs from vp8
 }
 
 void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
@@ -372,17 +360,17 @@ void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
   const int G = above[6];
   const int H = above[7];
   (void)left;
-  DST(0, 0) =             AVG2(A, B);
+  DST(0, 0) = AVG2(A, B);
   DST(1, 0) = DST(0, 2) = AVG2(B, C);
   DST(2, 0) = DST(1, 2) = AVG2(C, D);
   DST(3, 0) = DST(2, 2) = AVG2(D, E);
-              DST(3, 2) = AVG3(E, F, G);
+  DST(3, 2) = AVG3(E, F, G);
 
-  DST(0, 1) =             AVG3(A, B, C);
+  DST(0, 1) = AVG3(A, B, C);
   DST(1, 1) = DST(0, 3) = AVG3(B, C, D);
   DST(2, 1) = DST(1, 3) = AVG3(C, D, E);
   DST(3, 1) = DST(2, 3) = AVG3(D, E, F);
-              DST(3, 3) = AVG3(F, G, H);
+  DST(3, 3) = AVG3(F, G, H);
 }
 
 void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
@@ -397,13 +385,13 @@ void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
   const int H = above[7];
   (void)stride;
   (void)left;
-  DST(0, 0)                                     = AVG3(A, B, C);
-  DST(1, 0) = DST(0, 1)                         = AVG3(B, C, D);
-  DST(2, 0) = DST(1, 1) = DST(0, 2)             = AVG3(C, D, E);
+  DST(0, 0) = AVG3(A, B, C);
+  DST(1, 0) = DST(0, 1) = AVG3(B, C, D);
+  DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E);
   DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F);
-              DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G);
-                          DST(3, 2) = DST(2, 3) = AVG3(F, G, H);
-                                      DST(3, 3) = H;  // differs from vp8
+  DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G);
+  DST(3, 2) = DST(2, 3) = AVG3(F, G, H);
+  DST(3, 3) = H;  // differs from vp8
 }
 
 void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
@@ -418,13 +406,13 @@ void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
   const int H = above[7];
   (void)stride;
   (void)left;
-  DST(0, 0)                                     = AVG3(A, B, C);
-  DST(1, 0) = DST(0, 1)                         = AVG3(B, C, D);
-  DST(2, 0) = DST(1, 1) = DST(0, 2)             = AVG3(C, D, E);
+  DST(0, 0) = AVG3(A, B, C);
+  DST(1, 0) = DST(0, 1) = AVG3(B, C, D);
+  DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E);
   DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F);
-              DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G);
-                          DST(3, 2) = DST(2, 3) = AVG3(F, G, H);
-                                      DST(3, 3) = AVG3(G, H, H);
+  DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G);
+  DST(3, 2) = DST(2, 3) = AVG3(F, G, H);
+  DST(3, 3) = AVG3(G, H, H);
 }
 
 void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
@@ -440,14 +428,14 @@ void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
   DST(0, 0) = DST(1, 2) = AVG2(X, A);
   DST(1, 0) = DST(2, 2) = AVG2(A, B);
   DST(2, 0) = DST(3, 2) = AVG2(B, C);
-  DST(3, 0)             = AVG2(C, D);
+  DST(3, 0) = AVG2(C, D);
 
-  DST(0, 3) =             AVG3(K, J, I);
-  DST(0, 2) =             AVG3(J, I, X);
+  DST(0, 3) = AVG3(K, J, I);
+  DST(0, 2) = AVG3(J, I, X);
   DST(0, 1) = DST(1, 3) = AVG3(I, X, A);
   DST(1, 1) = DST(2, 3) = AVG3(X, A, B);
   DST(2, 1) = DST(3, 3) = AVG3(A, B, C);
-  DST(3, 1) =             AVG3(B, C, D);
+  DST(3, 1) = AVG3(B, C, D);
 }
 
 void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
@@ -462,13 +450,13 @@ void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
   const int C = above[2];
   const int D = above[3];
   (void)stride;
-  DST(0, 3)                                     = AVG3(J, K, L);
-  DST(1, 3) = DST(0, 2)                         = AVG3(I, J, K);
-  DST(2, 3) = DST(1, 2) = DST(0, 1)             = AVG3(X, I, J);
+  DST(0, 3) = AVG3(J, K, L);
+  DST(1, 3) = DST(0, 2) = AVG3(I, J, K);
+  DST(2, 3) = DST(1, 2) = DST(0, 1) = AVG3(X, I, J);
   DST(3, 3) = DST(2, 2) = DST(1, 1) = DST(0, 0) = AVG3(A, X, I);
-              DST(3, 2) = DST(2, 1) = DST(1, 0) = AVG3(B, A, X);
-                          DST(3, 1) = DST(2, 0) = AVG3(C, B, A);
-                                      DST(3, 0) = AVG3(D, C, B);
+  DST(3, 2) = DST(2, 1) = DST(1, 0) = AVG3(B, A, X);
+  DST(3, 1) = DST(2, 0) = AVG3(C, B, A);
+  DST(3, 0) = AVG3(D, C, B);
 }
 
 void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
@@ -485,14 +473,14 @@ void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
   DST(0, 0) = DST(2, 1) = AVG2(I, X);
   DST(0, 1) = DST(2, 2) = AVG2(J, I);
   DST(0, 2) = DST(2, 3) = AVG2(K, J);
-  DST(0, 3)             = AVG2(L, K);
+  DST(0, 3) = AVG2(L, K);
 
-  DST(3, 0)             = AVG3(A, B, C);
-  DST(2, 0)             = AVG3(X, A, B);
+  DST(3, 0) = AVG3(A, B, C);
+  DST(2, 0) = AVG3(X, A, B);
   DST(1, 0) = DST(3, 1) = AVG3(I, X, A);
   DST(1, 1) = DST(3, 2) = AVG3(J, I, X);
   DST(1, 2) = DST(3, 3) = AVG3(K, J, I);
-  DST(1, 3)             = AVG3(L, K, J);
+  DST(1, 3) = AVG3(L, K, J);
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -500,8 +488,8 @@ static INLINE void highbd_d207_predictor(uint16_t *dst, ptrdiff_t stride,
                                          int bs, const uint16_t *above,
                                          const uint16_t *left, int bd) {
   int r, c;
-  (void) above;
-  (void) bd;
+  (void)above;
+  (void)bd;
 
   // First column.
   for (r = 0; r < bs - 1; ++r) {
@@ -519,8 +507,7 @@ static INLINE void highbd_d207_predictor(uint16_t *dst, ptrdiff_t stride,
   dst++;
 
   // Rest of last row.
-  for (c = 0; c < bs - 2; ++c)
-    dst[(bs - 1) * stride + c] = left[bs - 1];
+  for (c = 0; c < bs - 2; ++c) dst[(bs - 1) * stride + c] = left[bs - 1];
 
   for (r = bs - 2; r >= 0; --r) {
     for (c = 0; c < bs - 2; ++c)
@@ -532,30 +519,30 @@ static INLINE void highbd_d207e_predictor(uint16_t *dst, ptrdiff_t stride,
                                           int bs, const uint16_t *above,
                                           const uint16_t *left, int bd) {
   int r, c;
-  (void) above;
-  (void) bd;
+  (void)above;
+  (void)bd;
 
   for (r = 0; r < bs; ++r) {
     for (c = 0; c < bs; ++c) {
       dst[c] = c & 1 ? AVG3(left[(c >> 1) + r], left[(c >> 1) + r + 1],
                             left[(c >> 1) + r + 2])
-          : AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]);
+                     : AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]);
     }
     dst += stride;
   }
 }
 
-static INLINE void highbd_d63_predictor(uint16_t *dst, ptrdiff_t stride,
-                                        int bs, const uint16_t *above,
+static INLINE void highbd_d63_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
+                                        const uint16_t *above,
                                         const uint16_t *left, int bd) {
   int r, c;
-  (void) left;
-  (void) bd;
+  (void)left;
+  (void)bd;
   for (r = 0; r < bs; ++r) {
     for (c = 0; c < bs; ++c) {
       dst[c] = r & 1 ? AVG3(above[(r >> 1) + c], above[(r >> 1) + c + 1],
                             above[(r >> 1) + c + 2])
-          : AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]);
+                     : AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]);
     }
     dst += stride;
   }
@@ -567,13 +554,13 @@ static INLINE void highbd_d45_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
                                         const uint16_t *above,
                                         const uint16_t *left, int bd) {
   int r, c;
-  (void) left;
-  (void) bd;
+  (void)left;
+  (void)bd;
   for (r = 0; r < bs; ++r) {
     for (c = 0; c < bs; ++c) {
-      dst[c] = r + c + 2 < bs * 2 ? AVG3(above[r + c], above[r + c + 1],
-                                         above[r + c + 2])
-          : above[bs * 2 - 1];
+      dst[c] = r + c + 2 < bs * 2
+                   ? AVG3(above[r + c], above[r + c + 1], above[r + c + 2])
+                   : above[bs * 2 - 1];
     }
     dst += stride;
   }
@@ -583,8 +570,8 @@ static INLINE void highbd_d45e_predictor(uint16_t *dst, ptrdiff_t stride,
                                          int bs, const uint16_t *above,
                                          const uint16_t *left, int bd) {
   int r, c;
-  (void) left;
-  (void) bd;
+  (void)left;
+  (void)bd;
   for (r = 0; r < bs; ++r) {
     for (c = 0; c < bs; ++c) {
       dst[c] = AVG3(above[r + c], above[r + c + 1],
@@ -598,17 +585,15 @@ static INLINE void highbd_d117_predictor(uint16_t *dst, ptrdiff_t stride,
                                          int bs, const uint16_t *above,
                                          const uint16_t *left, int bd) {
   int r, c;
-  (void) bd;
+  (void)bd;
 
   // first row
-  for (c = 0; c < bs; c++)
-    dst[c] = AVG2(above[c - 1], above[c]);
+  for (c = 0; c < bs; c++) dst[c] = AVG2(above[c - 1], above[c]);
   dst += stride;
 
   // second row
   dst[0] = AVG3(left[0], above[-1], above[0]);
-  for (c = 1; c < bs; c++)
-    dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
+  for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
   dst += stride;
 
   // the rest of first col
@@ -618,8 +603,7 @@ static INLINE void highbd_d117_predictor(uint16_t *dst, ptrdiff_t stride,
 
   // the rest of the block
   for (r = 2; r < bs; ++r) {
-    for (c = 1; c < bs; c++)
-      dst[c] = dst[-2 * stride + c - 1];
+    for (c = 1; c < bs; c++) dst[c] = dst[-2 * stride + c - 1];
     dst += stride;
   }
 }
@@ -628,10 +612,9 @@ static INLINE void highbd_d135_predictor(uint16_t *dst, ptrdiff_t stride,
                                          int bs, const uint16_t *above,
                                          const uint16_t *left, int bd) {
   int r, c;
-  (void) bd;
+  (void)bd;
   dst[0] = AVG3(left[0], above[-1], above[0]);
-  for (c = 1; c < bs; c++)
-    dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
+  for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
 
   dst[stride] = AVG3(above[-1], left[0], left[1]);
   for (r = 2; r < bs; ++r)
@@ -639,8 +622,7 @@ static INLINE void highbd_d135_predictor(uint16_t *dst, ptrdiff_t stride,
 
   dst += stride;
   for (r = 1; r < bs; ++r) {
-    for (c = 1; c < bs; c++)
-      dst[c] = dst[-stride + c - 1];
+    for (c = 1; c < bs; c++) dst[c] = dst[-stride + c - 1];
     dst += stride;
   }
 }
@@ -649,10 +631,9 @@ static INLINE void highbd_d153_predictor(uint16_t *dst, ptrdiff_t stride,
                                          int bs, const uint16_t *above,
                                          const uint16_t *left, int bd) {
   int r, c;
-  (void) bd;
+  (void)bd;
   dst[0] = AVG2(above[-1], left[0]);
-  for (r = 1; r < bs; r++)
-    dst[r * stride] = AVG2(left[r - 1], left[r]);
+  for (r = 1; r < bs; r++) dst[r * stride] = AVG2(left[r - 1], left[r]);
   dst++;
 
   dst[0] = AVG3(left[0], above[-1], above[0]);
@@ -666,42 +647,41 @@ static INLINE void highbd_d153_predictor(uint16_t *dst, ptrdiff_t stride,
   dst += stride;
 
   for (r = 1; r < bs; ++r) {
-    for (c = 0; c < bs - 2; c++)
-      dst[c] = dst[-stride + c - 2];
+    for (c = 0; c < bs - 2; c++) dst[c] = dst[-stride + c - 2];
     dst += stride;
   }
 }
 
-static INLINE void highbd_v_predictor(uint16_t *dst, ptrdiff_t stride,
-                                      int bs, const uint16_t *above,
+static INLINE void highbd_v_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
+                                      const uint16_t *above,
                                       const uint16_t *left, int bd) {
   int r;
-  (void) left;
-  (void) bd;
+  (void)left;
+  (void)bd;
   for (r = 0; r < bs; r++) {
     memcpy(dst, above, bs * sizeof(uint16_t));
     dst += stride;
   }
 }
 
-static INLINE void highbd_h_predictor(uint16_t *dst, ptrdiff_t stride,
-                                      int bs, const uint16_t *above,
+static INLINE void highbd_h_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
+                                      const uint16_t *above,
                                       const uint16_t *left, int bd) {
   int r;
-  (void) above;
-  (void) bd;
+  (void)above;
+  (void)bd;
   for (r = 0; r < bs; r++) {
     vpx_memset16(dst, left[r], bs);
     dst += stride;
   }
 }
 
-static INLINE void highbd_tm_predictor(uint16_t *dst, ptrdiff_t stride,
-                                       int bs, const uint16_t *above,
+static INLINE void highbd_tm_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
+                                       const uint16_t *above,
                                        const uint16_t *left, int bd) {
   int r, c;
   int ytop_left = above[-1];
-  (void) bd;
+  (void)bd;
 
   for (r = 0; r < bs; r++) {
     for (c = 0; c < bs; c++)
@@ -714,8 +694,8 @@ static INLINE void highbd_dc_128_predictor(uint16_t *dst, ptrdiff_t stride,
                                            int bs, const uint16_t *above,
                                            const uint16_t *left, int bd) {
   int r;
-  (void) above;
-  (void) left;
+  (void)above;
+  (void)left;
 
   for (r = 0; r < bs; r++) {
     vpx_memset16(dst, 128 << (bd - 8), bs);
@@ -727,11 +707,10 @@ static INLINE void highbd_dc_left_predictor(uint16_t *dst, ptrdiff_t stride,
                                             int bs, const uint16_t *above,
                                             const uint16_t *left, int bd) {
   int i, r, expected_dc, sum = 0;
-  (void) above;
-  (void) bd;
+  (void)above;
+  (void)bd;
 
-  for (i = 0; i < bs; i++)
-    sum += left[i];
+  for (i = 0; i < bs; i++) sum += left[i];
   expected_dc = (sum + (bs >> 1)) / bs;
 
   for (r = 0; r < bs; r++) {
@@ -744,11 +723,10 @@ static INLINE void highbd_dc_top_predictor(uint16_t *dst, ptrdiff_t stride,
                                            int bs, const uint16_t *above,
                                            const uint16_t *left, int bd) {
   int i, r, expected_dc, sum = 0;
-  (void) left;
-  (void) bd;
+  (void)left;
+  (void)bd;
 
-  for (i = 0; i < bs; i++)
-    sum += above[i];
+  for (i = 0; i < bs; i++) sum += above[i];
   expected_dc = (sum + (bs >> 1)) / bs;
 
   for (r = 0; r < bs; r++) {
@@ -757,12 +735,12 @@ static INLINE void highbd_dc_top_predictor(uint16_t *dst, ptrdiff_t stride,
   }
 }
 
-static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride,
-                                       int bs, const uint16_t *above,
+static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
+                                       const uint16_t *above,
                                        const uint16_t *left, int bd) {
   int i, r, expected_dc, sum = 0;
   const int count = 2 * bs;
-  (void) bd;
+  (void)bd;
 
   for (i = 0; i < bs; i++) {
     sum += above[i];
@@ -781,54 +759,45 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride,
 // This serves as a wrapper function, so that all the prediction functions
 // can be unified and accessed as a pointer array. Note that the boundary
 // above and left are not necessarily used all the time.
-#define intra_pred_sized(type, size) \
-  void vpx_##type##_predictor_##size##x##size##_c(uint8_t *dst, \
-                                                  ptrdiff_t stride, \
-                                                  const uint8_t *above, \
-                                                  const uint8_t *left) { \
-    type##_predictor(dst, stride, size, above, left); \
+#define intra_pred_sized(type, size)                        \
+  void vpx_##type##_predictor_##size##x##size##_c(          \
+      uint8_t *dst, ptrdiff_t stride, const uint8_t *above, \
+      const uint8_t *left) {                                \
+    type##_predictor(dst, stride, size, above, left);       \
   }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-#define intra_pred_highbd_sized(type, size) \
-  void vpx_highbd_##type##_predictor_##size##x##size##_c( \
-      uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \
-      const uint16_t *left, int bd) { \
+#define intra_pred_highbd_sized(type, size)                        \
+  void vpx_highbd_##type##_predictor_##size##x##size##_c(          \
+      uint16_t *dst, ptrdiff_t stride, const uint16_t *above,      \
+      const uint16_t *left, int bd) {                              \
     highbd_##type##_predictor(dst, stride, size, above, left, bd); \
   }
 
-#define intra_pred_allsizes(type) \
-  intra_pred_sized(type, 4) \
-  intra_pred_sized(type, 8) \
-  intra_pred_sized(type, 16) \
-  intra_pred_sized(type, 32) \
-  intra_pred_highbd_sized(type, 4) \
-  intra_pred_highbd_sized(type, 8) \
-  intra_pred_highbd_sized(type, 16) \
-  intra_pred_highbd_sized(type, 32)
-
-#define intra_pred_no_4x4(type) \
-  intra_pred_sized(type, 8) \
-  intra_pred_sized(type, 16) \
-  intra_pred_sized(type, 32) \
-  intra_pred_highbd_sized(type, 4) \
-  intra_pred_highbd_sized(type, 8) \
-  intra_pred_highbd_sized(type, 16) \
-  intra_pred_highbd_sized(type, 32)
+#define intra_pred_allsizes(type)                                           \
+  intra_pred_sized(type, 4) intra_pred_sized(type, 8)                       \
+      intra_pred_sized(type, 16) intra_pred_sized(type, 32)                 \
+          intra_pred_highbd_sized(type, 4) intra_pred_highbd_sized(type, 8) \
+              intra_pred_highbd_sized(type, 16)                             \
+                  intra_pred_highbd_sized(type, 32)
+
+#define intra_pred_no_4x4(type)                                              \
+  intra_pred_sized(type, 8) intra_pred_sized(type, 16)                       \
+      intra_pred_sized(type, 32) intra_pred_highbd_sized(type, 4)            \
+          intra_pred_highbd_sized(type, 8) intra_pred_highbd_sized(type, 16) \
+              intra_pred_highbd_sized(type, 32)
 
 #else
-#define intra_pred_allsizes(type) \
-  intra_pred_sized(type, 4) \
-  intra_pred_sized(type, 8) \
-  intra_pred_sized(type, 16) \
-  intra_pred_sized(type, 32)
-
-#define intra_pred_no_4x4(type) \
-  intra_pred_sized(type, 8) \
-  intra_pred_sized(type, 16) \
-  intra_pred_sized(type, 32)
+#define intra_pred_allsizes(type)                     \
+  intra_pred_sized(type, 4) intra_pred_sized(type, 8) \
+      intra_pred_sized(type, 16) intra_pred_sized(type, 32)
+
+#define intra_pred_no_4x4(type)                        \
+  intra_pred_sized(type, 8) intra_pred_sized(type, 16) \
+      intra_pred_sized(type, 32)
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
+/* clang-format off */
 intra_pred_no_4x4(d207)
 intra_pred_no_4x4(d63)
 intra_pred_no_4x4(d45)
@@ -848,3 +817,4 @@ intra_pred_allsizes(dc_left)
 intra_pred_allsizes(dc_top)
 intra_pred_allsizes(dc)
 #undef intra_pred_allsizes
+    /* clang-format on */
diff --git a/vpx_dsp/inv_txfm.c b/vpx_dsp/inv_txfm.c
index f6f86d629b22f0b5f17ea2fc7036959cc990b040..484cfa6e29b76da4d5052e058d1e96489a7c6d90 100644
--- a/vpx_dsp/inv_txfm.c
+++ b/vpx_dsp/inv_txfm.c
@@ -14,8 +14,8 @@
 #include "vpx_dsp/inv_txfm.h"
 
 void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
-/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
-   0.5 shifts per pixel. */
+  /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
+     0.5 shifts per pixel. */
   int i;
   tran_low_t output[16];
   tran_high_t a1, b1, c1, d1, e1;
@@ -126,8 +126,7 @@ void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
 
   // Columns
   for (i = 0; i < 4; ++i) {
-    for (j = 0; j < 4; ++j)
-      temp_in[j] = out[j * 4 + i];
+    for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
     idct4_c(temp_in, temp_out);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -222,8 +221,7 @@ void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
 
   // Then transform columns
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -239,8 +237,7 @@ void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8);
   a1 = ROUND_POWER_OF_TWO(out, 5);
   for (j = 0; j < 8; ++j) {
-    for (i = 0; i < 8; ++i)
-      dest[i] = clip_pixel_add(dest[i], a1);
+    for (i = 0; i < 8; ++i) dest[i] = clip_pixel_add(dest[i], a1);
     dest += stride;
   }
 }
@@ -295,20 +292,20 @@ void iadst8_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t x7 = input[6];
 
   if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
-    output[0] = output[1] = output[2] = output[3] = output[4]
-              = output[5] = output[6] = output[7] = 0;
+    output[0] = output[1] = output[2] = output[3] = output[4] = output[5] =
+        output[6] = output[7] = 0;
     return;
   }
 
   // stage 1
-  s0 = (int)(cospi_2_64  * x0 + cospi_30_64 * x1);
-  s1 = (int)(cospi_30_64 * x0 - cospi_2_64  * x1);
+  s0 = (int)(cospi_2_64 * x0 + cospi_30_64 * x1);
+  s1 = (int)(cospi_30_64 * x0 - cospi_2_64 * x1);
   s2 = (int)(cospi_10_64 * x2 + cospi_22_64 * x3);
   s3 = (int)(cospi_22_64 * x2 - cospi_10_64 * x3);
   s4 = (int)(cospi_18_64 * x4 + cospi_14_64 * x5);
   s5 = (int)(cospi_14_64 * x4 - cospi_18_64 * x5);
-  s6 = (int)(cospi_26_64 * x6 + cospi_6_64  * x7);
-  s7 = (int)(cospi_6_64  * x6 - cospi_26_64 * x7);
+  s6 = (int)(cospi_26_64 * x6 + cospi_6_64 * x7);
+  s7 = (int)(cospi_6_64 * x6 - cospi_26_64 * x7);
 
   x0 = WRAPLOW(dct_const_round_shift(s0 + s4), 8);
   x1 = WRAPLOW(dct_const_round_shift(s1 + s5), 8);
@@ -375,8 +372,7 @@ void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
 
   // Then transform columns
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -390,22 +386,22 @@ void idct16_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t temp1, temp2;
 
   // stage 1
-  step1[0] = input[0/2];
-  step1[1] = input[16/2];
-  step1[2] = input[8/2];
-  step1[3] = input[24/2];
-  step1[4] = input[4/2];
-  step1[5] = input[20/2];
-  step1[6] = input[12/2];
-  step1[7] = input[28/2];
-  step1[8] = input[2/2];
-  step1[9] = input[18/2];
-  step1[10] = input[10/2];
-  step1[11] = input[26/2];
-  step1[12] = input[6/2];
-  step1[13] = input[22/2];
-  step1[14] = input[14/2];
-  step1[15] = input[30/2];
+  step1[0] = input[0 / 2];
+  step1[1] = input[16 / 2];
+  step1[2] = input[8 / 2];
+  step1[3] = input[24 / 2];
+  step1[4] = input[4 / 2];
+  step1[5] = input[20 / 2];
+  step1[6] = input[12 / 2];
+  step1[7] = input[28 / 2];
+  step1[8] = input[2 / 2];
+  step1[9] = input[18 / 2];
+  step1[10] = input[10 / 2];
+  step1[11] = input[26 / 2];
+  step1[12] = input[6 / 2];
+  step1[13] = input[22 / 2];
+  step1[14] = input[14 / 2];
+  step1[15] = input[30 / 2];
 
   // stage 2
   step2[0] = step1[0];
@@ -566,8 +562,7 @@ void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
 
   // Then transform columns
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j * 16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -597,21 +592,20 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t x14 = input[1];
   tran_high_t x15 = input[14];
 
-  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
-           | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
-    output[0] = output[1] = output[2] = output[3] = output[4]
-              = output[5] = output[6] = output[7] = output[8]
-              = output[9] = output[10] = output[11] = output[12]
-              = output[13] = output[14] = output[15] = 0;
+  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
+        x13 | x14 | x15)) {
+    output[0] = output[1] = output[2] = output[3] = output[4] = output[5] =
+        output[6] = output[7] = output[8] = output[9] = output[10] =
+            output[11] = output[12] = output[13] = output[14] = output[15] = 0;
     return;
   }
 
   // stage 1
-  s0 = x0 * cospi_1_64  + x1 * cospi_31_64;
+  s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
   s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
-  s2 = x2 * cospi_5_64  + x3 * cospi_27_64;
+  s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
   s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
-  s4 = x4 * cospi_9_64  + x5 * cospi_23_64;
+  s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
   s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
   s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
   s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -620,9 +614,9 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
   s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
   s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
   s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
-  s13 = x12 * cospi_7_64  - x13 * cospi_25_64;
+  s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
   s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
-  s15 = x14 * cospi_3_64  - x15 * cospi_29_64;
+  s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
 
   x0 = WRAPLOW(dct_const_round_shift(s0 + s8), 8);
   x1 = WRAPLOW(dct_const_round_shift(s1 + s9), 8);
@@ -650,14 +644,14 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
   s5 = x5;
   s6 = x6;
   s7 = x7;
-  s8 =    x8 * cospi_4_64   + x9 * cospi_28_64;
-  s9 =    x8 * cospi_28_64  - x9 * cospi_4_64;
-  s10 =   x10 * cospi_20_64 + x11 * cospi_12_64;
-  s11 =   x10 * cospi_12_64 - x11 * cospi_20_64;
-  s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
-  s13 =   x12 * cospi_4_64  + x13 * cospi_28_64;
-  s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
-  s15 =   x14 * cospi_20_64 + x15 * cospi_12_64;
+  s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+  s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+  s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+  s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+  s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+  s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+  s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+  s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
 
   x0 = WRAPLOW(s0 + s4, 8);
   x1 = WRAPLOW(s1 + s5, 8);
@@ -681,18 +675,18 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 = x4 * cospi_8_64  + x5 * cospi_24_64;
+  s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
   s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
-  s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
-  s7 =   x6 * cospi_8_64  + x7 * cospi_24_64;
+  s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+  s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
   s8 = x8;
   s9 = x9;
   s10 = x10;
   s11 = x11;
-  s12 = x12 * cospi_8_64  + x13 * cospi_24_64;
+  s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
   s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
-  s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
-  s15 =   x14 * cospi_8_64  + x15 * cospi_24_64;
+  s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+  s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
 
   x0 = WRAPLOW(check_range(s0 + s2), 8);
   x1 = WRAPLOW(check_range(s1 + s3), 8);
@@ -712,13 +706,13 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
   x15 = WRAPLOW(dct_const_round_shift(s13 - s15), 8);
 
   // stage 4
-  s2 = (- cospi_16_64) * (x2 + x3);
+  s2 = (-cospi_16_64) * (x2 + x3);
   s3 = cospi_16_64 * (x2 - x3);
   s6 = cospi_16_64 * (x6 + x7);
-  s7 = cospi_16_64 * (- x6 + x7);
+  s7 = cospi_16_64 * (-x6 + x7);
   s10 = cospi_16_64 * (x10 + x11);
-  s11 = cospi_16_64 * (- x10 + x11);
-  s14 = (- cospi_16_64) * (x14 + x15);
+  s11 = cospi_16_64 * (-x10 + x11);
+  s14 = (-cospi_16_64) * (x14 + x15);
   s15 = cospi_16_64 * (x14 - x15);
 
   x2 = WRAPLOW(dct_const_round_shift(s2), 8);
@@ -765,8 +759,7 @@ void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
 
   // Then transform columns
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j*16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -782,8 +775,7 @@ void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8);
   a1 = ROUND_POWER_OF_TWO(out, 6);
   for (j = 0; j < 16; ++j) {
-    for (i = 0; i < 16; ++i)
-      dest[i] = clip_pixel_add(dest[i], a1);
+    for (i = 0; i < 16; ++i) dest[i] = clip_pixel_add(dest[i], a1);
     dest += stride;
   }
 }
@@ -1165,8 +1157,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
   // Rows
   for (i = 0; i < 32; ++i) {
     int16_t zero_coeff[16];
-    for (j = 0; j < 16; ++j)
-      zero_coeff[j] = input[2 * j] | input[2 * j + 1];
+    for (j = 0; j < 16; ++j) zero_coeff[j] = input[2 * j] | input[2 * j + 1];
     for (j = 0; j < 8; ++j)
       zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
     for (j = 0; j < 4; ++j)
@@ -1184,8 +1175,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
 
   // Columns
   for (i = 0; i < 32; ++i) {
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = out[j * 32 + i];
+    for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
     idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -1196,7 +1186,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
 
 void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
                              int stride) {
-  tran_low_t out[32 * 32] = {0};
+  tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
   tran_low_t temp_in[32], temp_out[32];
@@ -1211,8 +1201,7 @@ void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
 
   // Columns
   for (i = 0; i < 32; ++i) {
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = out[j * 32 + i];
+    for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
     idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -1223,7 +1212,7 @@ void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
 
 void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
                             int stride) {
-  tran_low_t out[32 * 32] = {0};
+  tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
   tran_low_t temp_in[32], temp_out[32];
@@ -1238,8 +1227,7 @@ void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
 
   // Columns
   for (i = 0; i < 32; ++i) {
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = out[j * 32 + i];
+    for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
     idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -1257,8 +1245,7 @@ void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   a1 = ROUND_POWER_OF_TWO(out, 6);
 
   for (j = 0; j < 32; ++j) {
-    for (i = 0; i < 32; ++i)
-      dest[i] = clip_pixel_add(dest[i], a1);
+    for (i = 0; i < 32; ++i) dest[i] = clip_pixel_add(dest[i], a1);
     dest += stride;
   }
 }
@@ -1326,7 +1313,7 @@ void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
   const tran_low_t *ip = in;
   tran_low_t *op = tmp;
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
-  (void) bd;
+  (void)bd;
 
   a1 = ip[0] >> UNIT_QUANT_SHIFT;
   e1 = a1 >> 1;
@@ -1338,14 +1325,14 @@ void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
   for (i = 0; i < 4; i++) {
     e1 = ip[0] >> 1;
     a1 = ip[0] - e1;
-    dest[dest_stride * 0] = highbd_clip_pixel_add(
-        dest[dest_stride * 0], a1, bd);
-    dest[dest_stride * 1] = highbd_clip_pixel_add(
-        dest[dest_stride * 1], e1, bd);
-    dest[dest_stride * 2] = highbd_clip_pixel_add(
-        dest[dest_stride * 2], e1, bd);
-    dest[dest_stride * 3] = highbd_clip_pixel_add(
-        dest[dest_stride * 3], e1, bd);
+    dest[dest_stride * 0] =
+        highbd_clip_pixel_add(dest[dest_stride * 0], a1, bd);
+    dest[dest_stride * 1] =
+        highbd_clip_pixel_add(dest[dest_stride * 1], e1, bd);
+    dest[dest_stride * 2] =
+        highbd_clip_pixel_add(dest[dest_stride * 2], e1, bd);
+    dest[dest_stride * 3] =
+        highbd_clip_pixel_add(dest[dest_stride * 3], e1, bd);
     ip++;
     dest++;
   }
@@ -1354,7 +1341,7 @@ void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
 void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
-  (void) bd;
+  (void)bd;
   // stage 1
   temp1 = (input[0] + input[2]) * cospi_16_64;
   temp2 = (input[0] - input[2]) * cospi_16_64;
@@ -1389,8 +1376,7 @@ void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Columns
   for (i = 0; i < 4; ++i) {
-    for (j = 0; j < 4; ++j)
-      temp_in[j] = out[j * 4 + i];
+    for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
     vpx_highbd_idct4_c(temp_in, temp_out, bd);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1403,8 +1389,8 @@ void vpx_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                 int dest_stride, int bd) {
   int i;
   tran_high_t a1;
-  tran_low_t out = WRAPLOW(
-      highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
+  tran_low_t out =
+      WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
 
   out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
@@ -1481,8 +1467,7 @@ void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     vpx_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1495,14 +1480,13 @@ void vpx_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                 int stride, int bd) {
   int i, j;
   tran_high_t a1;
-  tran_low_t out = WRAPLOW(
-      highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
+  tran_low_t out =
+      WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
   out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
   a1 = ROUND_POWER_OF_TWO(out, 5);
   for (j = 0; j < 8; ++j) {
-    for (i = 0; i < 8; ++i)
-      dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
+    for (i = 0; i < 8; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
     dest += stride;
   }
 }
@@ -1514,7 +1498,7 @@ void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t x1 = input[1];
   tran_low_t x2 = input[2];
   tran_low_t x3 = input[3];
-  (void) bd;
+  (void)bd;
 
   if (!(x0 | x1 | x2 | x3)) {
     memset(output, 0, 4 * sizeof(*output));
@@ -1556,7 +1540,7 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t x5 = input[4];
   tran_low_t x6 = input[1];
   tran_low_t x7 = input[6];
-  (void) bd;
+  (void)bd;
 
   if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
     memset(output, 0, 8 * sizeof(*output));
@@ -1564,14 +1548,14 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   }
 
   // stage 1
-  s0 = cospi_2_64  * x0 + cospi_30_64 * x1;
-  s1 = cospi_30_64 * x0 - cospi_2_64  * x1;
+  s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+  s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
   s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
   s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
   s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
   s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
-  s6 = cospi_26_64 * x6 + cospi_6_64  * x7;
-  s7 = cospi_6_64  * x6 - cospi_26_64 * x7;
+  s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+  s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
 
   x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s4, bd), bd);
   x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s5, bd), bd);
@@ -1587,10 +1571,10 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 =  cospi_8_64  * x4 + cospi_24_64 * x5;
-  s5 =  cospi_24_64 * x4 - cospi_8_64  * x5;
-  s6 = -cospi_24_64 * x6 + cospi_8_64  * x7;
-  s7 =  cospi_8_64  * x6 + cospi_24_64 * x7;
+  s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+  s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+  s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+  s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
 
   x0 = WRAPLOW(s0 + s2, bd);
   x1 = WRAPLOW(s1 + s3, bd);
@@ -1639,8 +1623,7 @@ void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
   }
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
-    for (j = 0; j < 8; ++j)
-      temp_in[j] = out[j * 8 + i];
+    for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
     vpx_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1652,25 +1635,25 @@ void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
 void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
-  (void) bd;
+  (void)bd;
 
   // stage 1
-  step1[0] = input[0/2];
-  step1[1] = input[16/2];
-  step1[2] = input[8/2];
-  step1[3] = input[24/2];
-  step1[4] = input[4/2];
-  step1[5] = input[20/2];
-  step1[6] = input[12/2];
-  step1[7] = input[28/2];
-  step1[8] = input[2/2];
-  step1[9] = input[18/2];
-  step1[10] = input[10/2];
-  step1[11] = input[26/2];
-  step1[12] = input[6/2];
-  step1[13] = input[22/2];
-  step1[14] = input[14/2];
-  step1[15] = input[30/2];
+  step1[0] = input[0 / 2];
+  step1[1] = input[16 / 2];
+  step1[2] = input[8 / 2];
+  step1[3] = input[24 / 2];
+  step1[4] = input[4 / 2];
+  step1[5] = input[20 / 2];
+  step1[6] = input[12 / 2];
+  step1[7] = input[28 / 2];
+  step1[8] = input[2 / 2];
+  step1[9] = input[18 / 2];
+  step1[10] = input[10 / 2];
+  step1[11] = input[26 / 2];
+  step1[12] = input[6 / 2];
+  step1[13] = input[22 / 2];
+  step1[14] = input[14 / 2];
+  step1[15] = input[30 / 2];
 
   // stage 2
   step2[0] = step1[0];
@@ -1832,8 +1815,7 @@ void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j * 16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     vpx_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1862,20 +1844,20 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t x13 = input[12];
   tran_low_t x14 = input[1];
   tran_low_t x15 = input[14];
-  (void) bd;
+  (void)bd;
 
-  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
-           | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
+  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
+        x13 | x14 | x15)) {
     memset(output, 0, 16 * sizeof(*output));
     return;
   }
 
   // stage 1
-  s0 = x0 * cospi_1_64  + x1 * cospi_31_64;
+  s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
   s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
-  s2 = x2 * cospi_5_64  + x3 * cospi_27_64;
+  s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
   s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
-  s4 = x4 * cospi_9_64  + x5 * cospi_23_64;
+  s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
   s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
   s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
   s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -1884,9 +1866,9 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
   s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
   s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
-  s13 = x12 * cospi_7_64  - x13 * cospi_25_64;
+  s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
   s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
-  s15 = x14 * cospi_3_64  - x15 * cospi_29_64;
+  s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
 
   x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s8, bd), bd);
   x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s9, bd), bd);
@@ -1896,8 +1878,8 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s13, bd), bd);
   x6 = WRAPLOW(highbd_dct_const_round_shift(s6 + s14, bd), bd);
   x7 = WRAPLOW(highbd_dct_const_round_shift(s7 + s15, bd), bd);
-  x8  = WRAPLOW(highbd_dct_const_round_shift(s0 - s8, bd), bd);
-  x9  = WRAPLOW(highbd_dct_const_round_shift(s1 - s9, bd), bd);
+  x8 = WRAPLOW(highbd_dct_const_round_shift(s0 - s8, bd), bd);
+  x9 = WRAPLOW(highbd_dct_const_round_shift(s1 - s9, bd), bd);
   x10 = WRAPLOW(highbd_dct_const_round_shift(s2 - s10, bd), bd);
   x11 = WRAPLOW(highbd_dct_const_round_shift(s3 - s11, bd), bd);
   x12 = WRAPLOW(highbd_dct_const_round_shift(s4 - s12, bd), bd);
@@ -1976,13 +1958,13 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   x15 = WRAPLOW(highbd_dct_const_round_shift(s13 - s15, bd), bd);
 
   // stage 4
-  s2 = (- cospi_16_64) * (x2 + x3);
+  s2 = (-cospi_16_64) * (x2 + x3);
   s3 = cospi_16_64 * (x2 - x3);
   s6 = cospi_16_64 * (x6 + x7);
   s7 = cospi_16_64 * (-x6 + x7);
   s10 = cospi_16_64 * (x10 + x11);
   s11 = cospi_16_64 * (-x10 + x11);
-  s14 = (- cospi_16_64) * (x14 + x15);
+  s14 = (-cospi_16_64) * (x14 + x15);
   s15 = cospi_16_64 * (x14 - x15);
 
   x2 = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd);
@@ -2030,8 +2012,7 @@ void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
-    for (j = 0; j < 16; ++j)
-      temp_in[j] = out[j*16 + i];
+    for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
     vpx_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -2044,24 +2025,23 @@ void vpx_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int bd) {
   int i, j;
   tran_high_t a1;
-  tran_low_t out = WRAPLOW(
-      highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
+  tran_low_t out =
+      WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
 
   out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
   a1 = ROUND_POWER_OF_TWO(out, 6);
   for (j = 0; j < 16; ++j) {
-    for (i = 0; i < 16; ++i)
-      dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
+    for (i = 0; i < 16; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
     dest += stride;
   }
 }
 
-static void highbd_idct32_c(const tran_low_t *input,
-                            tran_low_t *output, int bd) {
+static void highbd_idct32_c(const tran_low_t *input, tran_low_t *output,
+                            int bd) {
   tran_low_t step1[32], step2[32];
   tran_high_t temp1, temp2;
-  (void) bd;
+  (void)bd;
 
   // stage 1
   step1[0] = input[0];
@@ -2437,8 +2417,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
   // Rows
   for (i = 0; i < 32; ++i) {
     tran_low_t zero_coeff[16];
-    for (j = 0; j < 16; ++j)
-      zero_coeff[j] = input[2 * j] | input[2 * j + 1];
+    for (j = 0; j < 16; ++j) zero_coeff[j] = input[2 * j] | input[2 * j + 1];
     for (j = 0; j < 8; ++j)
       zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
     for (j = 0; j < 4; ++j)
@@ -2456,8 +2435,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Columns
   for (i = 0; i < 32; ++i) {
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = out[j * 32 + i];
+    for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
     highbd_idct32_c(temp_in, temp_out, bd);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -2468,7 +2446,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
 
 void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
                                    int stride, int bd) {
-  tran_low_t out[32 * 32] = {0};
+  tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
   tran_low_t temp_in[32], temp_out[32];
@@ -2483,8 +2461,7 @@ void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
   }
   // Columns
   for (i = 0; i < 32; ++i) {
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = out[j * 32 + i];
+    for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
     highbd_idct32_c(temp_in, temp_out, bd);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
@@ -2499,14 +2476,13 @@ void vpx_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
   int a1;
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
 
-  tran_low_t out = WRAPLOW(
-      highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
+  tran_low_t out =
+      WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
   out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
   a1 = ROUND_POWER_OF_TWO(out, 6);
 
   for (j = 0; j < 32; ++j) {
-    for (i = 0; i < 32; ++i)
-      dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
+    for (i = 0; i < 32; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
     dest += stride;
   }
 }
diff --git a/vpx_dsp/inv_txfm.h b/vpx_dsp/inv_txfm.h
index 3092afab13c14aaec385a4a8cbdffa3ea73368f5..b44df094e9356f9c47e36ac330a881740e1f0346 100644
--- a/vpx_dsp/inv_txfm.h
+++ b/vpx_dsp/inv_txfm.h
@@ -41,8 +41,7 @@ static INLINE tran_low_t dct_const_round_shift(tran_high_t input) {
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static INLINE tran_low_t highbd_check_range(tran_high_t input,
-                                            int bd) {
+static INLINE tran_low_t highbd_check_range(tran_high_t input, int bd) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
   // For valid highbitdepth streams, intermediate stage coefficients will
   // stay within the ranges:
@@ -53,9 +52,9 @@ static INLINE tran_low_t highbd_check_range(tran_high_t input,
   const int32_t int_min = -int_max - 1;
   assert(int_min <= input);
   assert(input <= int_max);
-  (void) int_min;
+  (void)int_min;
 #endif  // CONFIG_COEFFICIENT_RANGE_CHECKING
-  (void) bd;
+  (void)bd;
   return (tran_low_t)input;
 }
 
diff --git a/vpx_dsp/loopfilter.c b/vpx_dsp/loopfilter.c
index 7dcc7809d052564b37141d2c5afd26c575f09021..07fe51a3e6b44b51a955c599db98cf6a5fb2a729 100644
--- a/vpx_dsp/loopfilter.c
+++ b/vpx_dsp/loopfilter.c
@@ -21,23 +21,18 @@ static INLINE int8_t signed_char_clamp(int t) {
 #if CONFIG_VPX_HIGHBITDEPTH
 static INLINE int16_t signed_char_clamp_high(int t, int bd) {
   switch (bd) {
-    case 10:
-      return (int16_t)clamp(t, -128*4, 128*4-1);
-    case 12:
-      return (int16_t)clamp(t, -128*16, 128*16-1);
+    case 10: return (int16_t)clamp(t, -128 * 4, 128 * 4 - 1);
+    case 12: return (int16_t)clamp(t, -128 * 16, 128 * 16 - 1);
     case 8:
-    default:
-      return (int16_t)clamp(t, -128, 128-1);
+    default: return (int16_t)clamp(t, -128, 128 - 1);
   }
 }
 #endif
 
 // should we apply any filter at all: 11111111 yes, 00000000 no
-static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit,
-                                 uint8_t p3, uint8_t p2,
-                                 uint8_t p1, uint8_t p0,
-                                 uint8_t q0, uint8_t q1,
-                                 uint8_t q2, uint8_t q3) {
+static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit, uint8_t p3,
+                                 uint8_t p2, uint8_t p1, uint8_t p0, uint8_t q0,
+                                 uint8_t q1, uint8_t q2, uint8_t q3) {
   int8_t mask = 0;
   mask |= (abs(p3 - p2) > limit) * -1;
   mask |= (abs(p2 - p1) > limit) * -1;
@@ -45,14 +40,12 @@ static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit,
   mask |= (abs(q1 - q0) > limit) * -1;
   mask |= (abs(q2 - q1) > limit) * -1;
   mask |= (abs(q3 - q2) > limit) * -1;
-  mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
+  mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
   return ~mask;
 }
 
-static INLINE int8_t flat_mask4(uint8_t thresh,
-                                uint8_t p3, uint8_t p2,
-                                uint8_t p1, uint8_t p0,
-                                uint8_t q0, uint8_t q1,
+static INLINE int8_t flat_mask4(uint8_t thresh, uint8_t p3, uint8_t p2,
+                                uint8_t p1, uint8_t p0, uint8_t q0, uint8_t q1,
                                 uint8_t q2, uint8_t q3) {
   int8_t mask = 0;
   mask |= (abs(p1 - p0) > thresh) * -1;
@@ -64,12 +57,10 @@ static INLINE int8_t flat_mask4(uint8_t thresh,
   return ~mask;
 }
 
-static INLINE int8_t flat_mask5(uint8_t thresh,
-                                uint8_t p4, uint8_t p3,
-                                uint8_t p2, uint8_t p1,
-                                uint8_t p0, uint8_t q0,
-                                uint8_t q1, uint8_t q2,
-                                uint8_t q3, uint8_t q4) {
+static INLINE int8_t flat_mask5(uint8_t thresh, uint8_t p4, uint8_t p3,
+                                uint8_t p2, uint8_t p1, uint8_t p0, uint8_t q0,
+                                uint8_t q1, uint8_t q2, uint8_t q3,
+                                uint8_t q4) {
   int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3);
   mask |= (abs(p4 - p0) > thresh) * -1;
   mask |= (abs(q4 - q0) > thresh) * -1;
@@ -80,8 +71,8 @@ static INLINE int8_t flat_mask5(uint8_t thresh,
 static INLINE int8_t hev_mask(uint8_t thresh, uint8_t p1, uint8_t p0,
                               uint8_t q0, uint8_t q1) {
   int8_t hev = 0;
-  hev  |= (abs(p1 - p0) > thresh) * -1;
-  hev  |= (abs(q1 - q0) > thresh) * -1;
+  hev |= (abs(p1 - p0) > thresh) * -1;
+  hev |= (abs(q1 - q0) > thresh) * -1;
   return hev;
 }
 
@@ -89,10 +80,10 @@ static INLINE void filter4(int8_t mask, uint8_t thresh, uint8_t *op1,
                            uint8_t *op0, uint8_t *oq0, uint8_t *oq1) {
   int8_t filter1, filter2;
 
-  const int8_t ps1 = (int8_t) *op1 ^ 0x80;
-  const int8_t ps0 = (int8_t) *op0 ^ 0x80;
-  const int8_t qs0 = (int8_t) *oq0 ^ 0x80;
-  const int8_t qs1 = (int8_t) *oq1 ^ 0x80;
+  const int8_t ps1 = (int8_t)*op1 ^ 0x80;
+  const int8_t ps0 = (int8_t)*op0 ^ 0x80;
+  const int8_t qs0 = (int8_t)*oq0 ^ 0x80;
+  const int8_t qs1 = (int8_t)*oq1 ^ 0x80;
   const uint8_t hev = hev_mask(thresh, *op1, *op0, *oq0, *oq1);
 
   // add outer taps if we have high edge variance
@@ -126,9 +117,9 @@ void vpx_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */,
   // of 8 bit simd instructions.
   for (i = 0; i < 8 * count; ++i) {
     const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
-    const uint8_t q0 = s[0 * p],  q1 = s[1 * p],  q2 = s[2 * p],  q3 = s[3 * p];
-    const int8_t mask = filter_mask(*limit, *blimit,
-                                    p3, p2, p1, p0, q0, q1, q2, q3);
+    const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+    const int8_t mask =
+        filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3);
     filter4(mask, *thresh, s - 2 * p, s - 1 * p, s, s + 1 * p);
     ++s;
   }
@@ -151,9 +142,9 @@ void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
   // of 8 bit simd instructions.
   for (i = 0; i < 8 * count; ++i) {
     const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
-    const uint8_t q0 = s[0],  q1 = s[1],  q2 = s[2],  q3 = s[3];
-    const int8_t mask = filter_mask(*limit, *blimit,
-                                    p3, p2, p1, p0, q0, q1, q2, q3);
+    const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+    const int8_t mask =
+        filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3);
     filter4(mask, *thresh, s - 2, s - 1, s, s + 1);
     s += pitch;
   }
@@ -164,14 +155,12 @@ void vpx_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
                                const uint8_t *blimit1, const uint8_t *limit1,
                                const uint8_t *thresh1) {
   vpx_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1);
-  vpx_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1,
-                                  thresh1, 1);
+  vpx_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1);
 }
 
 static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat,
-                           uint8_t *op3, uint8_t *op2,
-                           uint8_t *op1, uint8_t *op0,
-                           uint8_t *oq0, uint8_t *oq1,
+                           uint8_t *op3, uint8_t *op2, uint8_t *op1,
+                           uint8_t *op0, uint8_t *oq0, uint8_t *oq1,
                            uint8_t *oq2, uint8_t *oq3) {
   if (flat && mask) {
     const uint8_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
@@ -185,7 +174,7 @@ static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat,
     *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3, 3);
     *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3, 3);
   } else {
-    filter4(mask, thresh, op1,  op0, oq0, oq1);
+    filter4(mask, thresh, op1, op0, oq0, oq1);
   }
 }
 
@@ -200,11 +189,11 @@ void vpx_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit,
     const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
     const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
 
-    const int8_t mask = filter_mask(*limit, *blimit,
-                                    p3, p2, p1, p0, q0, q1, q2, q3);
+    const int8_t mask =
+        filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3);
     const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
-    filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
-                                 s,         s + 1 * p, s + 2 * p, s + 3 * p);
+    filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s,
+            s + 1 * p, s + 2 * p, s + 3 * p);
     ++s;
   }
 }
@@ -225,11 +214,11 @@ void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit,
   for (i = 0; i < 8 * count; ++i) {
     const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
     const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
-    const int8_t mask = filter_mask(*limit, *blimit,
-                                    p3, p2, p1, p0, q0, q1, q2, q3);
+    const int8_t mask =
+        filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3);
     const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
-    filter8(mask, *thresh, flat, s - 4, s - 3, s - 2, s - 1,
-                                 s,     s + 1, s + 2, s + 3);
+    filter8(mask, *thresh, flat, s - 4, s - 3, s - 2, s - 1, s, s + 1, s + 2,
+            s + 3);
     s += pitch;
   }
 }
@@ -239,56 +228,58 @@ void vpx_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
                                const uint8_t *blimit1, const uint8_t *limit1,
                                const uint8_t *thresh1) {
   vpx_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1);
-  vpx_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1,
-                                    thresh1, 1);
-}
-
-static INLINE void filter16(int8_t mask, uint8_t thresh,
-                            uint8_t flat, uint8_t flat2,
-                            uint8_t *op7, uint8_t *op6,
-                            uint8_t *op5, uint8_t *op4,
-                            uint8_t *op3, uint8_t *op2,
-                            uint8_t *op1, uint8_t *op0,
-                            uint8_t *oq0, uint8_t *oq1,
-                            uint8_t *oq2, uint8_t *oq3,
-                            uint8_t *oq4, uint8_t *oq5,
+  vpx_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1);
+}
+
+static INLINE void filter16(int8_t mask, uint8_t thresh, uint8_t flat,
+                            uint8_t flat2, uint8_t *op7, uint8_t *op6,
+                            uint8_t *op5, uint8_t *op4, uint8_t *op3,
+                            uint8_t *op2, uint8_t *op1, uint8_t *op0,
+                            uint8_t *oq0, uint8_t *oq1, uint8_t *oq2,
+                            uint8_t *oq3, uint8_t *oq4, uint8_t *oq5,
                             uint8_t *oq6, uint8_t *oq7) {
   if (flat2 && flat && mask) {
-    const uint8_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4,
-                  p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
+    const uint8_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4, p3 = *op3,
+                  p2 = *op2, p1 = *op1, p0 = *op0;
 
-    const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3,
-                  q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7;
+    const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3, q4 = *oq4,
+                  q5 = *oq5, q6 = *oq6, q7 = *oq7;
 
     // 15-tap filter [1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1]
-    *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 +
-                              q0, 4);
-    *op5 = ROUND_POWER_OF_TWO(p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 +
-                              q0 + q1, 4);
-    *op4 = ROUND_POWER_OF_TWO(p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 +
-                              q0 + q1 + q2, 4);
-    *op3 = ROUND_POWER_OF_TWO(p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 +
-                              q0 + q1 + q2 + q3, 4);
-    *op2 = ROUND_POWER_OF_TWO(p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 +
-                              q0 + q1 + q2 + q3 + q4, 4);
+    *op6 = ROUND_POWER_OF_TWO(
+        p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 + q0, 4);
+    *op5 = ROUND_POWER_OF_TWO(
+        p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 + q0 + q1, 4);
+    *op4 = ROUND_POWER_OF_TWO(
+        p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 + q0 + q1 + q2, 4);
+    *op3 = ROUND_POWER_OF_TWO(
+        p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 + q0 + q1 + q2 + q3, 4);
+    *op2 = ROUND_POWER_OF_TWO(
+        p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 + q0 + q1 + q2 + q3 + q4,
+        4);
     *op1 = ROUND_POWER_OF_TWO(p7 * 2 + p6 + p5 + p4 + p3 + p2 + p1 * 2 + p0 +
-                              q0 + q1 + q2 + q3 + q4 + q5, 4);
-    *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 +
-                              q0 + q1 + q2 + q3 + q4 + q5 + q6, 4);
-    *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 +
-                              q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7, 4);
-    *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 +
-                              q0 + q1 * 2 + q2 + q3 + q4 + q5 + q6 + q7 * 2, 4);
-    *oq2 = ROUND_POWER_OF_TWO(p4 + p3 + p2 + p1 + p0 +
-                              q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, 4);
-    *oq3 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 +
-                              q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4);
-    *oq4 = ROUND_POWER_OF_TWO(p2 + p1 + p0 +
-                              q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4);
-    *oq5 = ROUND_POWER_OF_TWO(p1 + p0 +
-                              q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4);
-    *oq6 = ROUND_POWER_OF_TWO(p0 +
-                              q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4);
+                                  q0 + q1 + q2 + q3 + q4 + q5,
+                              4);
+    *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 + q0 +
+                                  q1 + q2 + q3 + q4 + q5 + q6,
+                              4);
+    *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 + q1 +
+                                  q2 + q3 + q4 + q5 + q6 + q7,
+                              4);
+    *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 + q0 + q1 * 2 + q2 +
+                                  q3 + q4 + q5 + q6 + q7 * 2,
+                              4);
+    *oq2 = ROUND_POWER_OF_TWO(
+        p4 + p3 + p2 + p1 + p0 + q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3,
+        4);
+    *oq3 = ROUND_POWER_OF_TWO(
+        p3 + p2 + p1 + p0 + q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4);
+    *oq4 = ROUND_POWER_OF_TWO(
+        p2 + p1 + p0 + q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4);
+    *oq5 = ROUND_POWER_OF_TWO(
+        p1 + p0 + q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4);
+    *oq6 = ROUND_POWER_OF_TWO(
+        p0 + q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4);
   } else {
     filter8(mask, thresh, flat, op3, op2, op1, op0, oq0, oq1, oq2, oq3);
   }
@@ -304,41 +295,38 @@ void vpx_lpf_horizontal_16_c(uint8_t *s, int p, const uint8_t *blimit,
   for (i = 0; i < 8 * count; ++i) {
     const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
     const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
-    const int8_t mask = filter_mask(*limit, *blimit,
-                                    p3, p2, p1, p0, q0, q1, q2, q3);
+    const int8_t mask =
+        filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3);
     const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
-    const int8_t flat2 = flat_mask5(1,
-                             s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0,
-                             q0, s[4 * p], s[5 * p], s[6 * p], s[7 * p]);
-
-    filter16(mask, *thresh, flat, flat2,
-             s - 8 * p, s - 7 * p, s - 6 * p, s - 5 * p,
-             s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
-             s,         s + 1 * p, s + 2 * p, s + 3 * p,
-             s + 4 * p, s + 5 * p, s + 6 * p, s + 7 * p);
+    const int8_t flat2 =
+        flat_mask5(1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0, q0,
+                   s[4 * p], s[5 * p], s[6 * p], s[7 * p]);
+
+    filter16(mask, *thresh, flat, flat2, s - 8 * p, s - 7 * p, s - 6 * p,
+             s - 5 * p, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s,
+             s + 1 * p, s + 2 * p, s + 3 * p, s + 4 * p, s + 5 * p, s + 6 * p,
+             s + 7 * p);
     ++s;
   }
 }
 
-static void mb_lpf_vertical_edge_w(uint8_t *s, int p,
-                                   const uint8_t *blimit,
-                                   const uint8_t *limit,
-                                   const uint8_t *thresh,
+static void mb_lpf_vertical_edge_w(uint8_t *s, int p, const uint8_t *blimit,
+                                   const uint8_t *limit, const uint8_t *thresh,
                                    int count) {
   int i;
 
   for (i = 0; i < count; ++i) {
     const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
-    const uint8_t q0 = s[0], q1 = s[1],  q2 = s[2], q3 = s[3];
-    const int8_t mask = filter_mask(*limit, *blimit,
-                                    p3, p2, p1, p0, q0, q1, q2, q3);
+    const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+    const int8_t mask =
+        filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3);
     const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
-    const int8_t flat2 = flat_mask5(1, s[-8], s[-7], s[-6], s[-5], p0,
-                                    q0, s[4], s[5], s[6], s[7]);
+    const int8_t flat2 = flat_mask5(1, s[-8], s[-7], s[-6], s[-5], p0, q0, s[4],
+                                    s[5], s[6], s[7]);
 
-    filter16(mask, *thresh, flat, flat2,
-             s - 8, s - 7, s - 6, s - 5, s - 4, s - 3, s - 2, s - 1,
-             s,     s + 1, s + 2, s + 3, s + 4, s + 5, s + 6, s + 7);
+    filter16(mask, *thresh, flat, flat2, s - 8, s - 7, s - 6, s - 5, s - 4,
+             s - 3, s - 2, s - 1, s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6,
+             s + 7);
     s += p;
   }
 }
@@ -356,9 +344,8 @@ void vpx_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
 #if CONFIG_VPX_HIGHBITDEPTH
 // Should we apply any filter at all: 11111111 yes, 00000000 no ?
 static INLINE int8_t highbd_filter_mask(uint8_t limit, uint8_t blimit,
-                                        uint16_t p3, uint16_t p2,
-                                        uint16_t p1, uint16_t p0,
-                                        uint16_t q0, uint16_t q1,
+                                        uint16_t p3, uint16_t p2, uint16_t p1,
+                                        uint16_t p0, uint16_t q0, uint16_t q1,
                                         uint16_t q2, uint16_t q3, int bd) {
   int8_t mask = 0;
   int16_t limit16 = (uint16_t)limit << (bd - 8);
@@ -369,15 +356,14 @@ static INLINE int8_t highbd_filter_mask(uint8_t limit, uint8_t blimit,
   mask |= (abs(q1 - q0) > limit16) * -1;
   mask |= (abs(q2 - q1) > limit16) * -1;
   mask |= (abs(q3 - q2) > limit16) * -1;
-  mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit16) * -1;
+  mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit16) * -1;
   return ~mask;
 }
 
-static INLINE int8_t highbd_flat_mask4(uint8_t thresh,
-                                       uint16_t p3, uint16_t p2,
-                                       uint16_t p1, uint16_t p0,
-                                       uint16_t q0, uint16_t q1,
-                                       uint16_t q2, uint16_t q3, int bd) {
+static INLINE int8_t highbd_flat_mask4(uint8_t thresh, uint16_t p3, uint16_t p2,
+                                       uint16_t p1, uint16_t p0, uint16_t q0,
+                                       uint16_t q1, uint16_t q2, uint16_t q3,
+                                       int bd) {
   int8_t mask = 0;
   int16_t thresh16 = (uint16_t)thresh << (bd - 8);
   mask |= (abs(p1 - p0) > thresh16) * -1;
@@ -389,11 +375,9 @@ static INLINE int8_t highbd_flat_mask4(uint8_t thresh,
   return ~mask;
 }
 
-static INLINE int8_t highbd_flat_mask5(uint8_t thresh,
-                                       uint16_t p4, uint16_t p3,
-                                       uint16_t p2, uint16_t p1,
-                                       uint16_t p0, uint16_t q0,
-                                       uint16_t q1, uint16_t q2,
+static INLINE int8_t highbd_flat_mask5(uint8_t thresh, uint16_t p4, uint16_t p3,
+                                       uint16_t p2, uint16_t p1, uint16_t p0,
+                                       uint16_t q0, uint16_t q1, uint16_t q2,
                                        uint16_t q3, uint16_t q4, int bd) {
   int8_t mask = ~highbd_flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3, bd);
   int16_t thresh16 = (uint16_t)thresh << (bd - 8);
@@ -464,21 +448,17 @@ void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */,
     const uint16_t q1 = s[1 * p];
     const uint16_t q2 = s[2 * p];
     const uint16_t q3 = s[3 * p];
-    const int8_t mask = highbd_filter_mask(*limit, *blimit,
-                                           p3, p2, p1, p0, q0, q1, q2, q3, bd);
+    const int8_t mask =
+        highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd);
     highbd_filter4(mask, *thresh, s - 2 * p, s - 1 * p, s, s + 1 * p, bd);
     ++s;
   }
 }
 
-void vpx_highbd_lpf_horizontal_4_dual_c(uint16_t *s, int p,
-                                        const uint8_t *blimit0,
-                                        const uint8_t *limit0,
-                                        const uint8_t *thresh0,
-                                        const uint8_t *blimit1,
-                                        const uint8_t *limit1,
-                                        const uint8_t *thresh1,
-                                        int bd) {
+void vpx_highbd_lpf_horizontal_4_dual_c(
+    uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+    const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+    const uint8_t *thresh1, int bd) {
   vpx_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, 1, bd);
   vpx_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, 1, bd);
 }
@@ -492,31 +472,26 @@ void vpx_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit,
   // of 8 bit simd instructions.
   for (i = 0; i < 8 * count; ++i) {
     const uint16_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
-    const uint16_t q0 = s[0],  q1 = s[1],  q2 = s[2],  q3 = s[3];
-    const int8_t mask = highbd_filter_mask(*limit, *blimit,
-                                           p3, p2, p1, p0, q0, q1, q2, q3, bd);
+    const uint16_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+    const int8_t mask =
+        highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd);
     highbd_filter4(mask, *thresh, s - 2, s - 1, s, s + 1, bd);
     s += pitch;
   }
 }
 
-void vpx_highbd_lpf_vertical_4_dual_c(uint16_t *s, int pitch,
-                                      const uint8_t *blimit0,
-                                      const uint8_t *limit0,
-                                      const uint8_t *thresh0,
-                                      const uint8_t *blimit1,
-                                      const uint8_t *limit1,
-                                      const uint8_t *thresh1,
-                                      int bd) {
+void vpx_highbd_lpf_vertical_4_dual_c(
+    uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
+    const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+    const uint8_t *thresh1, int bd) {
   vpx_highbd_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1, bd);
-  vpx_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1,
-                              thresh1, 1, bd);
+  vpx_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1,
+                              bd);
 }
 
 static INLINE void highbd_filter8(int8_t mask, uint8_t thresh, uint8_t flat,
-                                  uint16_t *op3, uint16_t *op2,
-                                  uint16_t *op1, uint16_t *op0,
-                                  uint16_t *oq0, uint16_t *oq1,
+                                  uint16_t *op3, uint16_t *op2, uint16_t *op1,
+                                  uint16_t *op0, uint16_t *oq0, uint16_t *oq1,
                                   uint16_t *oq2, uint16_t *oq3, int bd) {
   if (flat && mask) {
     const uint16_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
@@ -530,7 +505,7 @@ static INLINE void highbd_filter8(int8_t mask, uint8_t thresh, uint8_t flat,
     *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3, 3);
     *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3, 3);
   } else {
-    highbd_filter4(mask, thresh, op1,  op0, oq0, oq1, bd);
+    highbd_filter4(mask, thresh, op1, op0, oq0, oq1, bd);
   }
 }
 
@@ -545,25 +520,20 @@ void vpx_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit,
     const uint16_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
     const uint16_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
 
-    const int8_t mask = highbd_filter_mask(*limit, *blimit,
-                                         p3, p2, p1, p0, q0, q1, q2, q3, bd);
-    const int8_t flat = highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3,
-                                          bd);
-    highbd_filter8(mask, *thresh, flat,
-                 s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
-                 s, s + 1 * p, s + 2 * p, s + 3 * p, bd);
+    const int8_t mask =
+        highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd);
+    const int8_t flat =
+        highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd);
+    highbd_filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p,
+                   s - 1 * p, s, s + 1 * p, s + 2 * p, s + 3 * p, bd);
     ++s;
   }
 }
 
-void vpx_highbd_lpf_horizontal_8_dual_c(uint16_t *s, int p,
-                                        const uint8_t *blimit0,
-                                        const uint8_t *limit0,
-                                        const uint8_t *thresh0,
-                                        const uint8_t *blimit1,
-                                        const uint8_t *limit1,
-                                        const uint8_t *thresh1,
-                                        int bd) {
+void vpx_highbd_lpf_horizontal_8_dual_c(
+    uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+    const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+    const uint8_t *thresh1, int bd) {
   vpx_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, 1, bd);
   vpx_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, 1, bd);
 }
@@ -576,40 +546,31 @@ void vpx_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit,
   for (i = 0; i < 8 * count; ++i) {
     const uint16_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
     const uint16_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
-    const int8_t mask = highbd_filter_mask(*limit, *blimit,
-                                           p3, p2, p1, p0, q0, q1, q2, q3, bd);
-    const int8_t flat = highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3,
-                                          bd);
-    highbd_filter8(mask, *thresh, flat,
-                 s - 4, s - 3, s - 2, s - 1,
-                 s, s + 1, s + 2, s + 3,
-                 bd);
+    const int8_t mask =
+        highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd);
+    const int8_t flat =
+        highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd);
+    highbd_filter8(mask, *thresh, flat, s - 4, s - 3, s - 2, s - 1, s, s + 1,
+                   s + 2, s + 3, bd);
     s += pitch;
   }
 }
 
-void vpx_highbd_lpf_vertical_8_dual_c(uint16_t *s, int pitch,
-                                      const uint8_t *blimit0,
-                                      const uint8_t *limit0,
-                                      const uint8_t *thresh0,
-                                      const uint8_t *blimit1,
-                                      const uint8_t *limit1,
-                                      const uint8_t *thresh1,
-                                      int bd) {
+void vpx_highbd_lpf_vertical_8_dual_c(
+    uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
+    const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+    const uint8_t *thresh1, int bd) {
   vpx_highbd_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1, bd);
-  vpx_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1,
-                              thresh1, 1, bd);
-}
-
-static INLINE void highbd_filter16(int8_t mask, uint8_t thresh,
-                                   uint8_t flat, uint8_t flat2,
-                                   uint16_t *op7, uint16_t *op6,
-                                   uint16_t *op5, uint16_t *op4,
-                                   uint16_t *op3, uint16_t *op2,
-                                   uint16_t *op1, uint16_t *op0,
-                                   uint16_t *oq0, uint16_t *oq1,
-                                   uint16_t *oq2, uint16_t *oq3,
-                                   uint16_t *oq4, uint16_t *oq5,
+  vpx_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1,
+                              bd);
+}
+
+static INLINE void highbd_filter16(int8_t mask, uint8_t thresh, uint8_t flat,
+                                   uint8_t flat2, uint16_t *op7, uint16_t *op6,
+                                   uint16_t *op5, uint16_t *op4, uint16_t *op3,
+                                   uint16_t *op2, uint16_t *op1, uint16_t *op0,
+                                   uint16_t *oq0, uint16_t *oq1, uint16_t *oq2,
+                                   uint16_t *oq3, uint16_t *oq4, uint16_t *oq5,
                                    uint16_t *oq6, uint16_t *oq7, int bd) {
   if (flat2 && flat && mask) {
     const uint16_t p7 = *op7;
@@ -630,34 +591,40 @@ static INLINE void highbd_filter16(int8_t mask, uint8_t thresh,
     const uint16_t q7 = *oq7;
 
     // 15-tap filter [1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1]
-    *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 +
-                              q0, 4);
-    *op5 = ROUND_POWER_OF_TWO(p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 +
-                              q0 + q1, 4);
-    *op4 = ROUND_POWER_OF_TWO(p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 +
-                              q0 + q1 + q2, 4);
-    *op3 = ROUND_POWER_OF_TWO(p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 +
-                              q0 + q1 + q2 + q3, 4);
-    *op2 = ROUND_POWER_OF_TWO(p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 +
-                              q0 + q1 + q2 + q3 + q4, 4);
+    *op6 = ROUND_POWER_OF_TWO(
+        p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 + q0, 4);
+    *op5 = ROUND_POWER_OF_TWO(
+        p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 + q0 + q1, 4);
+    *op4 = ROUND_POWER_OF_TWO(
+        p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 + q0 + q1 + q2, 4);
+    *op3 = ROUND_POWER_OF_TWO(
+        p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 + q0 + q1 + q2 + q3, 4);
+    *op2 = ROUND_POWER_OF_TWO(
+        p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 + q0 + q1 + q2 + q3 + q4,
+        4);
     *op1 = ROUND_POWER_OF_TWO(p7 * 2 + p6 + p5 + p4 + p3 + p2 + p1 * 2 + p0 +
-                              q0 + q1 + q2 + q3 + q4 + q5, 4);
-    *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 +
-                              q0 + q1 + q2 + q3 + q4 + q5 + q6, 4);
-    *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 +
-                              q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7, 4);
-    *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 +
-                              q0 + q1 * 2 + q2 + q3 + q4 + q5 + q6 + q7 * 2, 4);
-    *oq2 = ROUND_POWER_OF_TWO(p4 + p3 + p2 + p1 + p0 +
-                              q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, 4);
-    *oq3 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 +
-                              q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4);
-    *oq4 = ROUND_POWER_OF_TWO(p2 + p1 + p0 +
-                              q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4);
-    *oq5 = ROUND_POWER_OF_TWO(p1 + p0 +
-                              q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4);
-    *oq6 = ROUND_POWER_OF_TWO(p0 +
-                              q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4);
+                                  q0 + q1 + q2 + q3 + q4 + q5,
+                              4);
+    *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 + q0 +
+                                  q1 + q2 + q3 + q4 + q5 + q6,
+                              4);
+    *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 + q1 +
+                                  q2 + q3 + q4 + q5 + q6 + q7,
+                              4);
+    *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 + q0 + q1 * 2 + q2 +
+                                  q3 + q4 + q5 + q6 + q7 * 2,
+                              4);
+    *oq2 = ROUND_POWER_OF_TWO(
+        p4 + p3 + p2 + p1 + p0 + q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3,
+        4);
+    *oq3 = ROUND_POWER_OF_TWO(
+        p3 + p2 + p1 + p0 + q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4);
+    *oq4 = ROUND_POWER_OF_TWO(
+        p2 + p1 + p0 + q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4);
+    *oq5 = ROUND_POWER_OF_TWO(
+        p1 + p0 + q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4);
+    *oq6 = ROUND_POWER_OF_TWO(
+        p0 + q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4);
   } else {
     highbd_filter8(mask, thresh, flat, op3, op2, op1, op0, oq0, oq1, oq2, oq3,
                    bd);
@@ -680,20 +647,18 @@ void vpx_highbd_lpf_horizontal_16_c(uint16_t *s, int p, const uint8_t *blimit,
     const uint16_t q1 = s[1 * p];
     const uint16_t q2 = s[2 * p];
     const uint16_t q3 = s[3 * p];
-    const int8_t mask = highbd_filter_mask(*limit, *blimit,
-                                           p3, p2, p1, p0, q0, q1, q2, q3, bd);
-    const int8_t flat = highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3,
-                                          bd);
-    const int8_t flat2 = highbd_flat_mask5(
-        1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0,
-        q0, s[4 * p], s[5 * p], s[6 * p], s[7 * p], bd);
-
-    highbd_filter16(mask, *thresh, flat, flat2,
-                    s - 8 * p, s - 7 * p, s - 6 * p, s - 5 * p,
-                    s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
-                    s, s + 1 * p, s + 2 * p, s + 3 * p,
-                    s + 4 * p, s + 5 * p, s + 6 * p, s + 7 * p,
-                    bd);
+    const int8_t mask =
+        highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd);
+    const int8_t flat =
+        highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd);
+    const int8_t flat2 =
+        highbd_flat_mask5(1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0, q0,
+                          s[4 * p], s[5 * p], s[6 * p], s[7 * p], bd);
+
+    highbd_filter16(mask, *thresh, flat, flat2, s - 8 * p, s - 7 * p, s - 6 * p,
+                    s - 5 * p, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s,
+                    s + 1 * p, s + 2 * p, s + 3 * p, s + 4 * p, s + 5 * p,
+                    s + 6 * p, s + 7 * p, bd);
     ++s;
   }
 }
@@ -701,8 +666,8 @@ void vpx_highbd_lpf_horizontal_16_c(uint16_t *s, int p, const uint8_t *blimit,
 static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int p,
                                           const uint8_t *blimit,
                                           const uint8_t *limit,
-                                          const uint8_t *thresh,
-                                          int count, int bd) {
+                                          const uint8_t *thresh, int count,
+                                          int bd) {
   int i;
 
   for (i = 0; i < count; ++i) {
@@ -714,17 +679,16 @@ static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int p,
     const uint16_t q1 = s[1];
     const uint16_t q2 = s[2];
     const uint16_t q3 = s[3];
-    const int8_t mask = highbd_filter_mask(*limit, *blimit,
-                                           p3, p2, p1, p0, q0, q1, q2, q3, bd);
-    const int8_t flat = highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3,
-                                          bd);
+    const int8_t mask =
+        highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd);
+    const int8_t flat =
+        highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd);
     const int8_t flat2 = highbd_flat_mask5(1, s[-8], s[-7], s[-6], s[-5], p0,
                                            q0, s[4], s[5], s[6], s[7], bd);
 
-    highbd_filter16(mask, *thresh, flat, flat2,
-                    s - 8, s - 7, s - 6, s - 5, s - 4, s - 3, s - 2, s - 1,
-                    s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6, s + 7,
-                    bd);
+    highbd_filter16(mask, *thresh, flat, flat2, s - 8, s - 7, s - 6, s - 5,
+                    s - 4, s - 3, s - 2, s - 1, s, s + 1, s + 2, s + 3, s + 4,
+                    s + 5, s + 6, s + 7, bd);
     s += p;
   }
 }
@@ -738,8 +702,7 @@ void vpx_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit,
 void vpx_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p,
                                        const uint8_t *blimit,
                                        const uint8_t *limit,
-                                       const uint8_t *thresh,
-                                       int bd) {
+                                       const uint8_t *thresh, int bd) {
   highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16, bd);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vpx_dsp/mips/common_dspr2.h b/vpx_dsp/mips/common_dspr2.h
index 7a10bf1c4050719bec76e0fef47d88d47ce86f01..0a42f5cec21f88c1564a228d45e9dd2ee2729d07 100644
--- a/vpx_dsp/mips/common_dspr2.h
+++ b/vpx_dsp/mips/common_dspr2.h
@@ -24,37 +24,21 @@ extern "C" {
 extern uint8_t *vpx_ff_cropTbl;  // From "vpx_dsp/mips/intrapred4_dspr2.c"
 
 static INLINE void prefetch_load(const unsigned char *src) {
-  __asm__ __volatile__ (
-      "pref   0,  0(%[src])   \n\t"
-      :
-      : [src] "r" (src)
-  );
+  __asm__ __volatile__("pref   0,  0(%[src])   \n\t" : : [src] "r"(src));
 }
 
 /* prefetch data for store */
 static INLINE void prefetch_store(unsigned char *dst) {
-  __asm__ __volatile__ (
-      "pref   1,  0(%[dst])   \n\t"
-      :
-      : [dst] "r" (dst)
-  );
+  __asm__ __volatile__("pref   1,  0(%[dst])   \n\t" : : [dst] "r"(dst));
 }
 
 static INLINE void prefetch_load_streamed(const unsigned char *src) {
-  __asm__ __volatile__ (
-      "pref   4,  0(%[src])   \n\t"
-      :
-      : [src] "r" (src)
-  );
+  __asm__ __volatile__("pref   4,  0(%[src])   \n\t" : : [src] "r"(src));
 }
 
 /* prefetch data for store */
 static INLINE void prefetch_store_streamed(unsigned char *dst) {
-  __asm__ __volatile__ (
-      "pref   5,  0(%[dst])   \n\t"
-      :
-      : [dst] "r" (dst)
-  );
+  __asm__ __volatile__("pref   5,  0(%[dst])   \n\t" : : [dst] "r"(dst));
 }
 #endif  // #if HAVE_DSPR2
 #ifdef __cplusplus
diff --git a/vpx_dsp/mips/convolve2_avg_dspr2.c b/vpx_dsp/mips/convolve2_avg_dspr2.c
index 3c767672fbd23afdae79668dc6f08c1c20ed34a9..ae88eddfd61dcf38c7be78e3c7fc85120b94bd45 100644
--- a/vpx_dsp/mips/convolve2_avg_dspr2.c
+++ b/vpx_dsp/mips/convolve2_avg_dspr2.c
@@ -18,25 +18,22 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_DSPR2
-static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src,
-                                         int32_t src_stride,
-                                         uint8_t *dst,
-                                         int32_t dst_stride,
-                                         const int16_t *filter_y,
-                                         int32_t w,
+static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
+                                         uint8_t *dst, int32_t dst_stride,
+                                         const int16_t *filter_y, int32_t w,
                                          int32_t h) {
-  int32_t       x, y;
+  int32_t x, y;
   const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2;
-  uint32_t      p1, p2;
-  uint32_t      scratch1, scratch2;
-  uint32_t      store1, store2;
-  int32_t       Temp1, Temp2;
+  uint8_t *dst_ptr;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  uint32_t load1, load2;
+  uint32_t p1, p2;
+  uint32_t scratch1, scratch2;
+  uint32_t store1, store2;
+  int32_t Temp1, Temp2;
   const int16_t *filter = &filter_y[3];
-  uint32_t      filter45;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -48,7 +45,7 @@ static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src,
       src_ptr = src + x;
       dst_ptr = dst + x;
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
@@ -105,16 +102,13 @@ static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src,
           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
 
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride), [cm] "r" (cm),
-            [dst_ptr] "r" (dst_ptr)
-      );
+          : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
+            [p2] "=&r"(p2), [scratch1] "=&r"(scratch1),
+            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
+            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
+          : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
+            [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
     }
 
     /* Next row... */
@@ -124,23 +118,21 @@ static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src,
 }
 
 static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src,
-                                          int32_t src_stride,
-                                          uint8_t *dst,
+                                          int32_t src_stride, uint8_t *dst,
                                           int32_t dst_stride,
-                                          const int16_t *filter_y,
-                                          int32_t h) {
-  int32_t       x, y;
+                                          const int16_t *filter_y, int32_t h) {
+  int32_t x, y;
   const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2;
-  uint32_t      p1, p2;
-  uint32_t      scratch1, scratch2;
-  uint32_t      store1, store2;
-  int32_t       Temp1, Temp2;
+  uint8_t *dst_ptr;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  uint32_t load1, load2;
+  uint32_t p1, p2;
+  uint32_t scratch1, scratch2;
+  uint32_t store1, store2;
+  int32_t Temp1, Temp2;
   const int16_t *filter = &filter_y[3];
-  uint32_t filter45;;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -153,7 +145,7 @@ static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src,
       src_ptr = src + x;
       dst_ptr = dst + x;
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
@@ -210,16 +202,13 @@ static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src,
           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
 
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride), [cm] "r" (cm),
-            [dst_ptr] "r" (dst_ptr)
-      );
+          : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
+            [p2] "=&r"(p2), [scratch1] "=&r"(scratch1),
+            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
+            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
+          : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
+            [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
     }
 
     /* Next row... */
@@ -231,18 +220,16 @@ static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src,
 void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                   uint8_t *dst, ptrdiff_t dst_stride,
                                   const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4,
-                                  int w, int h) {
+                                  const int16_t *filter_y, int y_step_q4, int w,
+                                  int h) {
   uint32_t pos = 38;
 
   assert(y_step_q4 == 16);
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
   prefetch_store(dst);
 
@@ -251,22 +238,17 @@ void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
     case 8:
     case 16:
     case 32:
-      convolve_bi_avg_vert_4_dspr2(src, src_stride,
-                                   dst, dst_stride,
-                                   filter_y, w, h);
+      convolve_bi_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y,
+                                   w, h);
       break;
     case 64:
       prefetch_store(dst + 32);
-      convolve_bi_avg_vert_64_dspr2(src, src_stride,
-                                    dst, dst_stride,
-                                    filter_y, h);
+      convolve_bi_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y,
+                                    h);
       break;
     default:
-      vpx_convolve8_avg_vert_c(src, src_stride,
-                               dst, dst_stride,
-                               filter_x, x_step_q4,
-                               filter_y, y_step_q4,
-                               w, h);
+      vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+                               x_step_q4, filter_y, y_step_q4, w, h);
       break;
   }
 }
diff --git a/vpx_dsp/mips/convolve2_avg_horiz_dspr2.c b/vpx_dsp/mips/convolve2_avg_horiz_dspr2.c
index 932a73d39b17beda50b0ac1e59fafb62d5cf0a2b..e944207b6ee79761320e76e7360d7788a0696095 100644
--- a/vpx_dsp/mips/convolve2_avg_horiz_dspr2.c
+++ b/vpx_dsp/mips/convolve2_avg_horiz_dspr2.c
@@ -19,20 +19,18 @@
 
 #if HAVE_DSPR2
 static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src,
-                                          int32_t src_stride,
-                                          uint8_t *dst,
+                                          int32_t src_stride, uint8_t *dst,
                                           int32_t dst_stride,
-                                          const int16_t *filter_x0,
-                                          int32_t h) {
+                                          const int16_t *filter_x0, int32_t h) {
   int32_t y;
   uint8_t *cm = vpx_ff_cropTbl;
-  int32_t  Temp1, Temp2, Temp3, Temp4;
+  int32_t Temp1, Temp2, Temp3, Temp4;
   uint32_t vector4a = 64;
   uint32_t tp1, tp2;
   uint32_t p1, p2, p3;
   uint32_t tn1, tn2;
   const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -42,7 +40,7 @@ static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride + 32);
     prefetch_store(dst + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],         0(%[src])                      \n\t"
         "ulw              %[tp2],         4(%[src])                      \n\t"
 
@@ -61,51 +59,49 @@ static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src,
         "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
         "extp             %[Temp3],       $ac2,           31             \n\t"
 
-        "lbu              %[p2],          3(%[dst])                      \n\t"  /* load odd 2 */
+        "lbu              %[p2],          3(%[dst])                      \n\t" /* load odd 2 */
 
         /* odd 1. pixel */
-        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"  /* even 1 */
+        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t" /* even 1 */
         "mtlo             %[vector4a],    $ac3                           \n\t"
         "mthi             $zero,          $ac3                           \n\t"
-        "lbu              %[Temp1],       1(%[dst])                      \n\t"  /* load odd 1 */
+        "lbu              %[Temp1],       1(%[dst])                      \n\t" /* load odd 1 */
         "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
         "preceu.ph.qbl    %[p3],          %[tp2]                         \n\t"
         "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
         "extp             %[Temp2],       $ac3,           31             \n\t"
 
-        "lbu              %[tn2],         0(%[dst])                      \n\t"  /* load even 1 */
+        "lbu              %[tn2],         0(%[dst])                      \n\t" /* load even 1 */
 
         /* odd 2. pixel */
-        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t"  /* even 2 */
+        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t" /* even 2 */
         "mtlo             %[vector4a],    $ac2                           \n\t"
         "mthi             $zero,          $ac2                           \n\t"
-        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t"  /* odd 1 */
-        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t"  /* average even 1 */
+        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t" /* odd 1 */
+        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t" /* average even 1 */
         "dpa.w.ph         $ac2,           %[p3],          %[filter45]    \n\t"
         "extp             %[Temp4],       $ac2,           31             \n\t"
 
-        "lbu              %[tp1],         2(%[dst])                      \n\t"  /* load even 2 */
-        "sb               %[tn2],         0(%[dst])                      \n\t"  /* store even 1 */
+        "lbu              %[tp1],         2(%[dst])                      \n\t" /* load even 2 */
+        "sb               %[tn2],         0(%[dst])                      \n\t" /* store even 1 */
 
         /* clamp */
-        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t"  /* average odd 1 */
-        "lbux             %[p3],          %[Temp4](%[cm])                \n\t"  /* odd 2 */
-        "sb               %[Temp1],       1(%[dst])                      \n\t"  /* store odd 1 */
+        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t" /* average odd 1 */
+        "lbux             %[p3],          %[Temp4](%[cm])                \n\t" /* odd 2 */
+        "sb               %[Temp1],       1(%[dst])                      \n\t" /* store odd 1 */
 
-        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t"  /* average even 2 */
-        "sb               %[tp1],         2(%[dst])                      \n\t"  /* store even 2 */
+        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t" /* average even 2 */
+        "sb               %[tp1],         2(%[dst])                      \n\t" /* store even 2 */
 
-        "addqh_r.w        %[p2],          %[p2],          %[p3]          \n\t"  /* average odd 2 */
-        "sb               %[p2],          3(%[dst])                      \n\t"  /* store odd 2 */
+        "addqh_r.w        %[p2],          %[p2],          %[p3]          \n\t" /* average odd 2 */
+        "sb               %[p2],          3(%[dst])                      \n\t" /* store odd 2 */
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
-        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
+          [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+          [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+          [Temp4] "=&r"(Temp4)
+        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
+          [dst] "r"(dst), [src] "r"(src));
 
     /* Next row... */
     src += src_stride;
@@ -114,11 +110,9 @@ static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src,
 }
 
 static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src,
-                                         int32_t src_stride,
-                                         uint8_t *dst,
-                                         int32_t dst_stride,
-                                         const int16_t *filter_x0,
-                                         int32_t h) {
+                                          int32_t src_stride, uint8_t *dst,
+                                          int32_t dst_stride,
+                                          const int16_t *filter_x0, int32_t h) {
   int32_t y;
   uint8_t *cm = vpx_ff_cropTbl;
   uint32_t vector4a = 64;
@@ -127,7 +121,7 @@ static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src,
   uint32_t p1, p2, p3, p4, n1;
   uint32_t st0, st1;
   const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -137,7 +131,7 @@ static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride + 32);
     prefetch_store(dst + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],         0(%[src])                      \n\t"
         "ulw              %[tp2],         4(%[src])                      \n\t"
 
@@ -246,15 +240,12 @@ static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src,
         "sb               %[tp4],         5(%[dst])                      \n\t"
         "sb               %[tp1],         7(%[dst])                      \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-          [st0] "=&r" (st0), [st1] "=&r" (st1),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+          [tp4] "=&r"(tp4), [st0] "=&r"(st0), [st1] "=&r"(st1), [p1] "=&r"(p1),
+          [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [n1] "=&r"(n1),
+          [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
+        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
+          [dst] "r"(dst), [src] "r"(src));
 
     /* Next row... */
     src += src_stride;
@@ -263,12 +254,10 @@ static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src,
 }
 
 static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr,
-                                          int32_t src_stride,
-                                          uint8_t *dst_ptr,
-                                          int32_t dst_stride,
-                                          const int16_t *filter_x0,
-                                          int32_t h,
-                                          int32_t count) {
+                                           int32_t src_stride, uint8_t *dst_ptr,
+                                           int32_t dst_stride,
+                                           const int16_t *filter_x0, int32_t h,
+                                           int32_t count) {
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
@@ -279,7 +268,7 @@ static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr,
   uint32_t p1, p2, p3, p4, p5;
   uint32_t st1, st2, st3;
   const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -293,7 +282,7 @@ static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr,
     prefetch_store(dst_ptr + dst_stride);
 
     for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[qload1],    0(%[src])                    \n\t"
           "ulw              %[qload2],    4(%[src])                    \n\t"
 
@@ -493,14 +482,13 @@ static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr,
           "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
           "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
 
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
+            [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
+            [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
+            [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+            [Temp3] "=&r"(Temp3)
+          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
+            [dst] "r"(dst), [src] "r"(src));
 
       src += 16;
       dst += 16;
@@ -513,11 +501,10 @@ static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr,
 }
 
 static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr,
-                                          int32_t src_stride,
-                                          uint8_t *dst_ptr,
-                                          int32_t dst_stride,
-                                          const int16_t *filter_x0,
-                                          int32_t h) {
+                                           int32_t src_stride, uint8_t *dst_ptr,
+                                           int32_t dst_stride,
+                                           const int16_t *filter_x0,
+                                           int32_t h) {
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
@@ -528,7 +515,7 @@ static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr,
   uint32_t p1, p2, p3, p4, p5;
   uint32_t st1, st2, st3;
   const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -544,7 +531,7 @@ static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr,
     prefetch_store(dst_ptr + dst_stride + 32);
 
     for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[qload1],    0(%[src])                    \n\t"
           "ulw              %[qload2],    4(%[src])                    \n\t"
 
@@ -744,14 +731,13 @@ static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr,
           "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
           "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
 
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
+            [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
+            [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
+            [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+            [Temp3] "=&r"(Temp3)
+          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
+            [dst] "r"(dst), [src] "r"(src));
 
       src += 16;
       dst += 16;
@@ -773,11 +759,9 @@ void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
   assert(x_step_q4 == 16);
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
   /* prefetch data to cache memory */
   prefetch_load(src);
@@ -786,39 +770,31 @@ void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 
   switch (w) {
     case 4:
-      convolve_bi_avg_horiz_4_dspr2(src, src_stride,
-                                   dst, dst_stride,
-                                   filter_x, h);
+      convolve_bi_avg_horiz_4_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                    h);
       break;
     case 8:
-      convolve_bi_avg_horiz_8_dspr2(src, src_stride,
-                                   dst, dst_stride,
-                                   filter_x, h);
+      convolve_bi_avg_horiz_8_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                    h);
       break;
     case 16:
-      convolve_bi_avg_horiz_16_dspr2(src, src_stride,
-                                    dst, dst_stride,
-                                    filter_x, h, 1);
+      convolve_bi_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                     h, 1);
       break;
     case 32:
-      convolve_bi_avg_horiz_16_dspr2(src, src_stride,
-                                    dst, dst_stride,
-                                    filter_x, h, 2);
+      convolve_bi_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                     h, 2);
       break;
     case 64:
       prefetch_load(src + 64);
       prefetch_store(dst + 32);
 
-      convolve_bi_avg_horiz_64_dspr2(src, src_stride,
-                                    dst, dst_stride,
-                                    filter_x, h);
+      convolve_bi_avg_horiz_64_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                     h);
       break;
     default:
-      vpx_convolve8_avg_horiz_c(src, src_stride,
-                                dst, dst_stride,
-                                filter_x, x_step_q4,
-                                filter_y, y_step_q4,
-                                w, h);
+      vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+                                x_step_q4, filter_y, y_step_q4, w, h);
       break;
   }
 }
diff --git a/vpx_dsp/mips/convolve2_dspr2.c b/vpx_dsp/mips/convolve2_dspr2.c
index d111029d42a59858dccc7329d3298f2876826f57..e355ba3a06cb24a498bbed89a63e7ee75eb780d1 100644
--- a/vpx_dsp/mips/convolve2_dspr2.c
+++ b/vpx_dsp/mips/convolve2_dspr2.c
@@ -18,21 +18,18 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_DSPR2
-static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const int16_t *filter_x0,
-                                                 int32_t h) {
-  int32_t       y;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint8_t       *dst_ptr;
-  int32_t       Temp1, Temp2;
-  uint32_t      vector4a = 64;
-  uint32_t      tp1, tp2;
-  uint32_t      p1, p2;
+static void convolve_bi_horiz_4_transposed_dspr2(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    const int16_t *filter_x0, int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *dst_ptr;
+  int32_t Temp1, Temp2;
+  uint32_t vector4a = 64;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2;
   const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -42,7 +39,7 @@ static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride);
     prefetch_load(src + src_stride + 32);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],         0(%[src])                      \n\t"
         "ulw              %[tp2],         4(%[src])                      \n\t"
 
@@ -94,13 +91,10 @@ static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src,
         "sb               %[p2],          0(%[dst_ptr])                  \n\t"
         "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [dst_ptr] "+r" (dst_ptr)
-        : [filter45] "r" (filter45),[vector4a] "r" (vector4a),
-          [cm] "r" (cm), [src] "r" (src), [dst_stride] "r" (dst_stride)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [p1] "=&r"(p1), [p2] "=&r"(p2),
+          [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [dst_ptr] "+r"(dst_ptr)
+        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
+          [src] "r"(src), [dst_stride] "r"(dst_stride));
 
     /* Next row... */
     src += src_stride;
@@ -108,12 +102,9 @@ static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const int16_t *filter_x0,
-                                                 int32_t h) {
+static void convolve_bi_horiz_8_transposed_dspr2(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    const int16_t *filter_x0, int32_t h) {
   int32_t y;
   uint8_t *cm = vpx_ff_cropTbl;
   uint8_t *dst_ptr;
@@ -124,7 +115,7 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src,
   uint8_t *odd_dst;
   uint32_t dst_pitch_2 = (dst_stride << 1);
   const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -136,7 +127,7 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src,
     dst_ptr = dst;
     odd_dst = (dst_ptr + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],         0(%[src])                       \n\t"
         "ulw              %[tp2],         4(%[src])                       \n\t"
 
@@ -180,7 +171,8 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src,
         "dpa.w.ph         $ac2,           %[p4],          %[filter45]     \n\t"
         "extp             %[Temp3],       $ac2,           31              \n\t"
 
-        "lbux             %[Temp1],         %[p3](%[cm])                    \n\t"
+        "lbux             %[Temp1],         %[p3](%[cm])                    "
+        "\n\t"
 
         /* odd 1. pixel */
         "mtlo             %[vector4a],    $ac1                            \n\t"
@@ -231,13 +223,12 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src,
 
         "sb               %[p1],          0(%[odd_dst])                   \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-          [dst_ptr] "+r" (dst_ptr), [odd_dst] "+r" (odd_dst)
-        : [filter45] "r" (filter45),[vector4a] "r" (vector4a), [cm] "r" (cm),
-          [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), [p1] "=&r"(p1),
+          [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [Temp1] "=&r"(Temp1),
+          [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), [dst_ptr] "+r"(dst_ptr),
+          [odd_dst] "+r"(odd_dst)
+        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
+          [src] "r"(src), [dst_pitch_2] "r"(dst_pitch_2));
 
     /* Next row... */
     src += src_stride;
@@ -245,26 +236,22 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_bi_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst_ptr,
-                                                  int32_t dst_stride,
-                                                  const int16_t *filter_x0,
-                                                  int32_t h,
-                                                  int32_t count) {
-  int32_t       c, y;
+static void convolve_bi_horiz_16_transposed_dspr2(
+    const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
+    int32_t dst_stride, const int16_t *filter_x0, int32_t h, int32_t count) {
+  int32_t c, y;
   const uint8_t *src;
-  uint8_t       *dst;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector_64 = 64;
-  int32_t       Temp1, Temp2, Temp3;
-  uint32_t      qload1, qload2;
-  uint32_t      p1, p2, p3, p4, p5;
-  uint32_t      st1, st2, st3;
-  uint32_t      dst_pitch_2 = (dst_stride << 1);
-  uint8_t       *odd_dst;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  uint32_t dst_pitch_2 = (dst_stride << 1);
+  uint8_t *odd_dst;
   const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -279,193 +266,329 @@ static void convolve_bi_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
     odd_dst = (dst + dst_stride);
 
     for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],        0(%[src])                       \n\t"
-          "ulw              %[qload2],        4(%[src])                       \n\t"
+      __asm__ __volatile__(
+          "ulw              %[qload1],        0(%[src])                       "
+          "\n\t"
+          "ulw              %[qload2],        4(%[src])                       "
+          "\n\t"
 
           /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "ulw              %[qload1],        8(%[src])                       \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 1 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 2 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       "
+          "\n\t"
+          "ulw              %[qload1],        8(%[src])                       "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     "
+          "\n\t" /* even 1 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 1 */
 
           /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload1]                       \n\t"
-          "ulw              %[qload2],        12(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     \n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* even 3 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p5],            %[qload1]                       "
+          "\n\t"
+          "ulw              %[qload2],        12(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     "
+          "\n\t" /* even 1 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 1 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 1 */
 
           /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     \n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 4 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p2],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 1 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]   "
+          "          \n\t"
+          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     "
+          "\n\t" /* even 3 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* even 3 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 1 */
 
           /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     \n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 5 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p3],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 2 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     "
+          "\n\t" /* even 4 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 4 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* even 3 */
 
           /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     \n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* even 6 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "sb               %[st3],           0(%[dst])                       "
+          "\n\t" /* even 3 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     "
+          "\n\t" /* even 5 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 5 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 4 */
 
           /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        20(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     \n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 7 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 4 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload1],        20(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     "
+          "\n\t" /* even 6 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* even 6 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 5 */
 
           /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 8 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 5 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     "
+          "\n\t" /* even 7 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 7 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* even 6 */
 
           /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 1 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     "
+          "\n\t" /* even 8 */
+          "sb               %[st3],           0(%[dst])                       "
+          "\n\t" /* even 6 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 8 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 7 */
 
           /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       \n\t"
-          "ulw              %[qload2],        5(%[src])                       \n\t"
+          "ulw              %[qload1],        1(%[src])                       "
+          "\n\t"
+          "ulw              %[qload2],        5(%[src])                       "
+          "\n\t"
 
           /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        9(%[src])                       \n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     \n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 2 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 7 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload2],        9(%[src])                       "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     "
+          "\n\t" /* odd 1 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 1 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 8 */
 
           /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* odd 3 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 8 */
+          "ulw              %[qload1],        13(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     "
+          "\n\t" /* odd 2 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 2 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 1 */
 
           /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 4 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 1 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     "
+          "\n\t" /* odd 3 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* odd 3 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 2 */
 
           /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     \n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 5 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 2 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     "
+          "\n\t" /* odd 4 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 4 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* odd 3 */
 
           /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* odd 6 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "sb               %[st2],           0(%[odd_dst])                   "
+          "\n\t" /* odd 3 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     "
+          "\n\t" /* odd 5 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 5 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 4 */
 
           /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        21(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     \n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 7 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 4 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload1],        21(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     "
+          "\n\t" /* odd 6 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* odd 6 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 5 */
 
           /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     \n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 8 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 5 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     "
+          "\n\t" /* odd 7 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 7 */
 
           /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     \n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm),
-            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-      );
+          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     "
+          "\n\t" /* odd 8 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 8 */
+
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* odd 6 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 7 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 8 */
+
+          "sb               %[st2],           0(%[odd_dst])                   "
+          "\n\t" /* odd 6 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 7 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 8 */
+
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
+            [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
+            [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
+            [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+            [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
+          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
+            [src] "r"(src), [dst_pitch_2] "r"(dst_pitch_2));
 
       src += 16;
       dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
@@ -478,25 +601,22 @@ static void convolve_bi_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
   }
 }
 
-static void convolve_bi_horiz_64_transposed_dspr2(const uint8_t *src_ptr,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst_ptr,
-                                                  int32_t dst_stride,
-                                                  const int16_t *filter_x0,
-                                                  int32_t h) {
-  int32_t       c, y;
+static void convolve_bi_horiz_64_transposed_dspr2(
+    const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
+    int32_t dst_stride, const int16_t *filter_x0, int32_t h) {
+  int32_t c, y;
   const uint8_t *src;
-  uint8_t       *dst;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector_64 = 64;
-  int32_t       Temp1, Temp2, Temp3;
-  uint32_t      qload1, qload2;
-  uint32_t      p1, p2, p3, p4, p5;
-  uint32_t      st1, st2, st3;
-  uint32_t      dst_pitch_2 = (dst_stride << 1);
-  uint8_t       *odd_dst;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  uint32_t dst_pitch_2 = (dst_stride << 1);
+  uint8_t *odd_dst;
   const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -512,193 +632,329 @@ static void convolve_bi_horiz_64_transposed_dspr2(const uint8_t *src_ptr,
     odd_dst = (dst + dst_stride);
 
     for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],        0(%[src])                       \n\t"
-          "ulw              %[qload2],        4(%[src])                       \n\t"
+      __asm__ __volatile__(
+          "ulw              %[qload1],        0(%[src])                       "
+          "\n\t"
+          "ulw              %[qload2],        4(%[src])                       "
+          "\n\t"
 
           /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "ulw              %[qload1],        8(%[src])                       \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 1 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 2 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       "
+          "\n\t"
+          "ulw              %[qload1],        8(%[src])                       "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     "
+          "\n\t" /* even 1 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 1 */
 
           /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload1]                       \n\t"
-          "ulw              %[qload2],        12(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     \n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* even 3 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p5],            %[qload1]                       "
+          "\n\t"
+          "ulw              %[qload2],        12(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     "
+          "\n\t" /* even 1 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 1 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 1 */
 
           /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     \n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 4 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p2],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 1 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]   "
+          "          \n\t"
+          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     "
+          "\n\t" /* even 3 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* even 3 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 1 */
 
           /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     \n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 5 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p3],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 2 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     "
+          "\n\t" /* even 4 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 4 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* even 3 */
 
           /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     \n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* even 6 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "sb               %[st3],           0(%[dst])                       "
+          "\n\t" /* even 3 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     "
+          "\n\t" /* even 5 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 5 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 4 */
 
           /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        20(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     \n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 7 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 4 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload1],        20(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     "
+          "\n\t" /* even 6 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* even 6 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 5 */
 
           /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 8 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 5 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     "
+          "\n\t" /* even 7 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 7 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* even 6 */
 
           /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 1 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     "
+          "\n\t" /* even 8 */
+          "sb               %[st3],           0(%[dst])                       "
+          "\n\t" /* even 6 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 8 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 7 */
 
           /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       \n\t"
-          "ulw              %[qload2],        5(%[src])                       \n\t"
+          "ulw              %[qload1],        1(%[src])                       "
+          "\n\t"
+          "ulw              %[qload2],        5(%[src])                       "
+          "\n\t"
 
           /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        9(%[src])                       \n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     \n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 2 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 7 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload2],        9(%[src])                       "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     "
+          "\n\t" /* odd 1 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 1 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 8 */
 
           /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* odd 3 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 8 */
+          "ulw              %[qload1],        13(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     "
+          "\n\t" /* odd 2 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 2 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 1 */
 
           /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 4 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 1 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     "
+          "\n\t" /* odd 3 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* odd 3 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 2 */
 
           /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     \n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 5 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 2 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     "
+          "\n\t" /* odd 4 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 4 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* odd 3 */
 
           /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* odd 6 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "sb               %[st2],           0(%[odd_dst])                   "
+          "\n\t" /* odd 3 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     "
+          "\n\t" /* odd 5 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 5 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 4 */
 
           /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        21(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     \n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 7 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 4 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload1],        21(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     "
+          "\n\t" /* odd 6 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* odd 6 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 5 */
 
           /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     \n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 8 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 5 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     "
+          "\n\t" /* odd 7 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 7 */
 
           /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     \n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm),
-            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-      );
+          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     "
+          "\n\t" /* odd 8 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 8 */
+
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* odd 6 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 7 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 8 */
+
+          "sb               %[st2],           0(%[odd_dst])                   "
+          "\n\t" /* odd 6 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 7 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 8 */
+
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
+            [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
+            [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
+            [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+            [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
+          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
+            [src] "r"(src), [dst_pitch_2] "r"(dst_pitch_2));
 
       src += 16;
       dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
@@ -731,18 +987,15 @@ void convolve_bi_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
   }
 }
 
-void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                         uint8_t *dst, ptrdiff_t dst_stride,
-                         const int16_t *filter,
-                         int w, int h) {
+void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                         ptrdiff_t dst_stride, const int16_t *filter, int w,
+                         int h) {
   uint32_t pos = 38;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
   /* prefetch data to cache memory */
   prefetch_load(src);
@@ -750,32 +1003,26 @@ void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 
   switch (w) {
     case 4:
-      convolve_bi_horiz_4_transposed_dspr2(src, src_stride,
-                                           dst, dst_stride,
+      convolve_bi_horiz_4_transposed_dspr2(src, src_stride, dst, dst_stride,
                                            filter, h);
       break;
     case 8:
-      convolve_bi_horiz_8_transposed_dspr2(src, src_stride,
-                                           dst, dst_stride,
+      convolve_bi_horiz_8_transposed_dspr2(src, src_stride, dst, dst_stride,
                                            filter, h);
       break;
     case 16:
     case 32:
-      convolve_bi_horiz_16_transposed_dspr2(src, src_stride,
-                                            dst, dst_stride,
-                                            filter, h,
-                                            (w/16));
+      convolve_bi_horiz_16_transposed_dspr2(src, src_stride, dst, dst_stride,
+                                            filter, h, (w / 16));
       break;
     case 64:
       prefetch_load(src + 32);
-      convolve_bi_horiz_64_transposed_dspr2(src, src_stride,
-                                            dst, dst_stride,
+      convolve_bi_horiz_64_transposed_dspr2(src, src_stride, dst, dst_stride,
                                             filter, h);
       break;
     default:
-      convolve_bi_horiz_transposed(src, src_stride,
-                                   dst, dst_stride,
-                                   filter, w, h);
+      convolve_bi_horiz_transposed(src, src_stride, dst, dst_stride, filter, w,
+                                   h);
       break;
   }
 }
diff --git a/vpx_dsp/mips/convolve2_horiz_dspr2.c b/vpx_dsp/mips/convolve2_horiz_dspr2.c
index 9fe1a3454b7e43fbdb0bf6175767eecd9b60e0c7..5cc06b5f26007da6b0d5ade8e11c1b4c8e0083fd 100644
--- a/vpx_dsp/mips/convolve2_horiz_dspr2.c
+++ b/vpx_dsp/mips/convolve2_horiz_dspr2.c
@@ -18,12 +18,9 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_DSPR2
-static void convolve_bi_horiz_4_dspr2(const uint8_t *src,
-                                      int32_t src_stride,
-                                      uint8_t *dst,
-                                      int32_t dst_stride,
-                                      const int16_t *filter_x0,
-                                      int32_t h) {
+static void convolve_bi_horiz_4_dspr2(const uint8_t *src, int32_t src_stride,
+                                      uint8_t *dst, int32_t dst_stride,
+                                      const int16_t *filter_x0, int32_t h) {
   int32_t y;
   uint8_t *cm = vpx_ff_cropTbl;
   int32_t Temp1, Temp2, Temp3, Temp4;
@@ -31,7 +28,7 @@ static void convolve_bi_horiz_4_dspr2(const uint8_t *src,
   uint32_t tp1, tp2;
   uint32_t p1, p2;
   const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -41,7 +38,7 @@ static void convolve_bi_horiz_4_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride + 32);
     prefetch_store(dst + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],      0(%[src])                      \n\t"
         "ulw              %[tp2],      4(%[src])                      \n\t"
 
@@ -86,13 +83,11 @@ static void convolve_bi_horiz_4_dspr2(const uint8_t *src,
         "sb               %[tp2],      2(%[dst])                      \n\t"
         "sb               %[p2],       3(%[dst])                      \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
-        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [p1] "=&r"(p1), [p2] "=&r"(p2),
+          [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+          [Temp4] "=&r"(Temp4)
+        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
+          [dst] "r"(dst), [src] "r"(src));
 
     /* Next row... */
     src += src_stride;
@@ -100,12 +95,9 @@ static void convolve_bi_horiz_4_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_bi_horiz_8_dspr2(const uint8_t *src,
-                                      int32_t src_stride,
-                                      uint8_t *dst,
-                                      int32_t dst_stride,
-                                      const int16_t *filter_x0,
-                                      int32_t h) {
+static void convolve_bi_horiz_8_dspr2(const uint8_t *src, int32_t src_stride,
+                                      uint8_t *dst, int32_t dst_stride,
+                                      const int16_t *filter_x0, int32_t h) {
   int32_t y;
   uint8_t *cm = vpx_ff_cropTbl;
   uint32_t vector4a = 64;
@@ -114,7 +106,7 @@ static void convolve_bi_horiz_8_dspr2(const uint8_t *src,
   uint32_t p1, p2, p3, p4;
   uint32_t st0, st1;
   const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -124,7 +116,7 @@ static void convolve_bi_horiz_8_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride + 32);
     prefetch_store(dst + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],      0(%[src])                      \n\t"
         "ulw              %[tp2],      4(%[src])                      \n\t"
 
@@ -210,13 +202,12 @@ static void convolve_bi_horiz_8_dspr2(const uint8_t *src,
         "sb               %[p2],       5(%[dst])                      \n\t"
         "sb               %[p1],       7(%[dst])                      \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
-          [st0] "=&r" (st0), [st1] "=&r" (st1),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+          [st0] "=&r"(st0), [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2),
+          [p3] "=&r"(p3), [p4] "=&r"(p4), [Temp1] "=&r"(Temp1),
+          [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
+        : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm),
+          [dst] "r"(dst), [src] "r"(src));
 
     /* Next row... */
     src += src_stride;
@@ -225,11 +216,9 @@ static void convolve_bi_horiz_8_dspr2(const uint8_t *src,
 }
 
 static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr,
-                                       int32_t src_stride,
-                                       uint8_t *dst_ptr,
+                                       int32_t src_stride, uint8_t *dst_ptr,
                                        int32_t dst_stride,
-                                       const int16_t *filter_x0,
-                                       int32_t h,
+                                       const int16_t *filter_x0, int32_t h,
                                        int32_t count) {
   int32_t y, c;
   const uint8_t *src;
@@ -241,7 +230,7 @@ static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr,
   uint32_t p1, p2, p3, p4, p5;
   uint32_t st1, st2, st3;
   const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -255,7 +244,7 @@ static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr,
     prefetch_store(dst_ptr + dst_stride);
 
     for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[qload1],    0(%[src])                    \n\t"
           "ulw              %[qload2],    4(%[src])                    \n\t"
 
@@ -413,14 +402,13 @@ static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr,
           "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
           "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
 
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
+            [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
+            [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+            [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
+          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
+            [dst] "r"(dst), [src] "r"(src));
 
       src += 16;
       dst += 16;
@@ -433,11 +421,9 @@ static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr,
 }
 
 static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr,
-                                       int32_t src_stride,
-                                       uint8_t *dst_ptr,
+                                       int32_t src_stride, uint8_t *dst_ptr,
                                        int32_t dst_stride,
-                                       const int16_t *filter_x0,
-                                       int32_t h) {
+                                       const int16_t *filter_x0, int32_t h) {
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
@@ -448,7 +434,7 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr,
   uint32_t p1, p2, p3, p4, p5;
   uint32_t st1, st2, st3;
   const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -464,7 +450,7 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr,
     prefetch_store(dst_ptr + dst_stride + 32);
 
     for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[qload1],    0(%[src])                    \n\t"
           "ulw              %[qload2],    4(%[src])                    \n\t"
 
@@ -622,14 +608,13 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr,
           "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
           "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
 
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
+            [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
+            [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+            [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
+          : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm),
+            [dst] "r"(dst), [src] "r"(src));
 
       src += 16;
       dst += 16;
@@ -644,8 +629,8 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr,
 void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                uint8_t *dst, ptrdiff_t dst_stride,
                                const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4,
-                               int w, int h) {
+                               const int16_t *filter_y, int y_step_q4, int w,
+                               int h) {
   uint32_t pos = 38;
 
   assert(x_step_q4 == 16);
@@ -653,11 +638,9 @@ void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
   prefetch_load((const uint8_t *)filter_x);
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
   /* prefetch data to cache memory */
   prefetch_load(src);
@@ -666,39 +649,31 @@ void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 
   switch (w) {
     case 4:
-      convolve_bi_horiz_4_dspr2(src, (int32_t)src_stride,
-                                dst, (int32_t)dst_stride,
-                                filter_x, (int32_t)h);
+      convolve_bi_horiz_4_dspr2(src, (int32_t)src_stride, dst,
+                                (int32_t)dst_stride, filter_x, (int32_t)h);
       break;
     case 8:
-      convolve_bi_horiz_8_dspr2(src, (int32_t)src_stride,
-                                dst, (int32_t)dst_stride,
-                                filter_x, (int32_t)h);
+      convolve_bi_horiz_8_dspr2(src, (int32_t)src_stride, dst,
+                                (int32_t)dst_stride, filter_x, (int32_t)h);
       break;
     case 16:
-      convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride,
-                                 dst, (int32_t)dst_stride,
-                                 filter_x, (int32_t)h, 1);
+      convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride, dst,
+                                 (int32_t)dst_stride, filter_x, (int32_t)h, 1);
       break;
     case 32:
-      convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride,
-                                 dst, (int32_t)dst_stride,
-                                 filter_x, (int32_t)h, 2);
+      convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride, dst,
+                                 (int32_t)dst_stride, filter_x, (int32_t)h, 2);
       break;
     case 64:
       prefetch_load(src + 64);
       prefetch_store(dst + 32);
 
-      convolve_bi_horiz_64_dspr2(src, (int32_t)src_stride,
-                                 dst, (int32_t)dst_stride,
-                                 filter_x, (int32_t)h);
+      convolve_bi_horiz_64_dspr2(src, (int32_t)src_stride, dst,
+                                 (int32_t)dst_stride, filter_x, (int32_t)h);
       break;
     default:
-      vpx_convolve8_horiz_c(src, src_stride,
-                            dst, dst_stride,
-                            filter_x, x_step_q4,
-                            filter_y, y_step_q4,
-                            w, h);
+      vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+                            x_step_q4, filter_y, y_step_q4, w, h);
       break;
   }
 }
diff --git a/vpx_dsp/mips/convolve2_vert_dspr2.c b/vpx_dsp/mips/convolve2_vert_dspr2.c
index dde6ffd54f8182bad34f3128079da4d94f87a0b4..eb1975e4475ac4883ae0b73391595361b35000d7 100644
--- a/vpx_dsp/mips/convolve2_vert_dspr2.c
+++ b/vpx_dsp/mips/convolve2_vert_dspr2.c
@@ -18,25 +18,22 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_DSPR2
-static void convolve_bi_vert_4_dspr2(const uint8_t *src,
-                                     int32_t src_stride,
-                                     uint8_t *dst,
-                                     int32_t dst_stride,
-                                     const int16_t *filter_y,
-                                     int32_t w,
+static void convolve_bi_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
+                                     uint8_t *dst, int32_t dst_stride,
+                                     const int16_t *filter_y, int32_t w,
                                      int32_t h) {
-  int32_t       x, y;
+  int32_t x, y;
   const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2;
-  uint32_t      p1, p2;
-  uint32_t      scratch1;
-  uint32_t      store1, store2;
-  int32_t       Temp1, Temp2;
+  uint8_t *dst_ptr;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  uint32_t load1, load2;
+  uint32_t p1, p2;
+  uint32_t scratch1;
+  uint32_t store1, store2;
+  int32_t Temp1, Temp2;
   const int16_t *filter = &filter_y[3];
-  uint32_t      filter45;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -48,7 +45,7 @@ static void convolve_bi_vert_4_dspr2(const uint8_t *src,
       src_ptr = src + x;
       dst_ptr = dst + x;
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
@@ -98,16 +95,12 @@ static void convolve_bi_vert_4_dspr2(const uint8_t *src,
           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
 
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [scratch1] "=&r" (scratch1),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [filter45] "r" (filter45),[vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride),
-            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
+          : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
+            [p2] "=&r"(p2), [scratch1] "=&r"(scratch1), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
+            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
+          : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
+            [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
     }
 
     /* Next row... */
@@ -116,24 +109,21 @@ static void convolve_bi_vert_4_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_bi_vert_64_dspr2(const uint8_t *src,
-                                      int32_t src_stride,
-                                      uint8_t *dst,
-                                      int32_t dst_stride,
-                                      const int16_t *filter_y,
-                                      int32_t h) {
-  int32_t       x, y;
+static void convolve_bi_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
+                                      uint8_t *dst, int32_t dst_stride,
+                                      const int16_t *filter_y, int32_t h) {
+  int32_t x, y;
   const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2;
-  uint32_t      p1, p2;
-  uint32_t      scratch1;
-  uint32_t      store1, store2;
-  int32_t       Temp1, Temp2;
+  uint8_t *dst_ptr;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  uint32_t load1, load2;
+  uint32_t p1, p2;
+  uint32_t scratch1;
+  uint32_t store1, store2;
+  int32_t Temp1, Temp2;
   const int16_t *filter = &filter_y[3];
-  uint32_t      filter45;
+  uint32_t filter45;
 
   filter45 = ((const int32_t *)filter)[0];
 
@@ -145,7 +135,7 @@ static void convolve_bi_vert_64_dspr2(const uint8_t *src,
       src_ptr = src + x;
       dst_ptr = dst + x;
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
@@ -195,16 +185,12 @@ static void convolve_bi_vert_64_dspr2(const uint8_t *src,
           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
 
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [scratch1] "=&r" (scratch1),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [filter45] "r" (filter45),[vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride),
-            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
+          : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
+            [p2] "=&r"(p2), [scratch1] "=&r"(scratch1), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
+            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
+          : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
+            [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
     }
 
     /* Next row... */
@@ -216,42 +202,34 @@ static void convolve_bi_vert_64_dspr2(const uint8_t *src,
 void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4,
-                              int w, int h) {
+                              const int16_t *filter_y, int y_step_q4, int w,
+                              int h) {
   uint32_t pos = 38;
 
   assert(y_step_q4 == 16);
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
   prefetch_store(dst);
 
   switch (w) {
-    case 4 :
-    case 8 :
-    case 16 :
-    case 32 :
-      convolve_bi_vert_4_dspr2(src, src_stride,
-                               dst, dst_stride,
-                               filter_y, w, h);
+    case 4:
+    case 8:
+    case 16:
+    case 32:
+      convolve_bi_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w,
+                               h);
       break;
-    case 64 :
+    case 64:
       prefetch_store(dst + 32);
-      convolve_bi_vert_64_dspr2(src, src_stride,
-                                dst, dst_stride,
-                                filter_y, h);
+      convolve_bi_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h);
       break;
     default:
-      vpx_convolve8_vert_c(src, src_stride,
-                           dst, dst_stride,
-                           filter_x, x_step_q4,
-                           filter_y, y_step_q4,
-                           w, h);
+      vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+                           x_step_q4, filter_y, y_step_q4, w, h);
       break;
   }
 }
diff --git a/vpx_dsp/mips/convolve8_avg_dspr2.c b/vpx_dsp/mips/convolve8_avg_dspr2.c
index 43da9e54fb2f7b88578d38a476c3660e6de7d884..31812299c34e1d17d909ed5b550f6aacf64ef97b 100644
--- a/vpx_dsp/mips/convolve8_avg_dspr2.c
+++ b/vpx_dsp/mips/convolve8_avg_dspr2.c
@@ -18,25 +18,22 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_DSPR2
-static void convolve_avg_vert_4_dspr2(const uint8_t *src,
-                                      int32_t src_stride,
-                                      uint8_t *dst,
-                                      int32_t dst_stride,
-                                      const int16_t *filter_y,
-                                      int32_t w,
+static void convolve_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
+                                      uint8_t *dst, int32_t dst_stride,
+                                      const int16_t *filter_y, int32_t w,
                                       int32_t h) {
-  int32_t       x, y;
+  int32_t x, y;
   const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2, load3, load4;
-  uint32_t      p1, p2;
-  uint32_t      n1, n2;
-  uint32_t      scratch1, scratch2;
-  uint32_t      store1, store2;
-  int32_t       vector1b, vector2b, vector3b, vector4b;
-  int32_t       Temp1, Temp2;
+  uint8_t *dst_ptr;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  uint32_t load1, load2, load3, load4;
+  uint32_t p1, p2;
+  uint32_t n1, n2;
+  uint32_t scratch1, scratch2;
+  uint32_t store1, store2;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2;
 
   vector1b = ((const int32_t *)filter_y)[0];
   vector2b = ((const int32_t *)filter_y)[1];
@@ -53,7 +50,7 @@ static void convolve_avg_vert_4_dspr2(const uint8_t *src,
       src_ptr = src + x;
       dst_ptr = dst + x;
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
@@ -160,18 +157,16 @@ static void convolve_avg_vert_4_dspr2(const uint8_t *src,
           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
 
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [load3] "=&r" (load3), [load4] "=&r" (load4),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [n1] "=&r" (n1), [n2] "=&r" (n2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-            [vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
+          : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+            [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
+            [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
+            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
+            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
+          : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+            [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+            [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
+            [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
     }
 
     /* Next row... */
@@ -180,24 +175,21 @@ static void convolve_avg_vert_4_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_avg_vert_64_dspr2(const uint8_t *src,
-                                       int32_t src_stride,
-                                       uint8_t *dst,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_y,
-                                       int32_t h) {
-  int32_t       x, y;
+static void convolve_avg_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
+                                       uint8_t *dst, int32_t dst_stride,
+                                       const int16_t *filter_y, int32_t h) {
+  int32_t x, y;
   const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2, load3, load4;
-  uint32_t      p1, p2;
-  uint32_t      n1, n2;
-  uint32_t      scratch1, scratch2;
-  uint32_t      store1, store2;
-  int32_t       vector1b, vector2b, vector3b, vector4b;
-  int32_t       Temp1, Temp2;
+  uint8_t *dst_ptr;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  uint32_t load1, load2, load3, load4;
+  uint32_t p1, p2;
+  uint32_t n1, n2;
+  uint32_t scratch1, scratch2;
+  uint32_t store1, store2;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2;
 
   vector1b = ((const int32_t *)filter_y)[0];
   vector2b = ((const int32_t *)filter_y)[1];
@@ -215,7 +207,7 @@ static void convolve_avg_vert_64_dspr2(const uint8_t *src,
       src_ptr = src + x;
       dst_ptr = dst + x;
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
@@ -322,18 +314,16 @@ static void convolve_avg_vert_64_dspr2(const uint8_t *src,
           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
 
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [load3] "=&r" (load3), [load4] "=&r" (load4),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [n1] "=&r" (n1), [n2] "=&r" (n2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-            [vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
+          : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+            [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
+            [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
+            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
+            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
+          : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+            [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+            [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
+            [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
     }
 
     /* Next row... */
@@ -345,26 +335,21 @@ static void convolve_avg_vert_64_dspr2(const uint8_t *src,
 void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                   uint8_t *dst, ptrdiff_t dst_stride,
                                   const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4,
-                                  int w, int h) {
+                                  const int16_t *filter_y, int y_step_q4, int w,
+                                  int h) {
   assert(y_step_q4 == 16);
   assert(((const int32_t *)filter_y)[1] != 0x800000);
 
   if (((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve2_avg_vert_dspr2(src, src_stride,
-                                 dst, dst_stride,
-                                 filter_x, x_step_q4,
-                                 filter_y, y_step_q4,
-                                 w, h);
+    vpx_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                 x_step_q4, filter_y, y_step_q4, w, h);
   } else {
     uint32_t pos = 38;
 
     /* bit positon for extract from acc */
-    __asm__ __volatile__ (
-      "wrdsp      %[pos],     1           \n\t"
-      :
-      : [pos] "r" (pos)
-    );
+    __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                         :
+                         : [pos] "r"(pos));
 
     prefetch_store(dst);
 
@@ -373,22 +358,17 @@ void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
       case 8:
       case 16:
       case 32:
-        convolve_avg_vert_4_dspr2(src, src_stride,
-                                  dst, dst_stride,
-                                  filter_y, w, h);
+        convolve_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w,
+                                  h);
         break;
       case 64:
         prefetch_store(dst + 32);
-        convolve_avg_vert_64_dspr2(src, src_stride,
-                                   dst, dst_stride,
-                                   filter_y, h);
+        convolve_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y,
+                                   h);
         break;
       default:
-        vpx_convolve8_avg_vert_c(src, src_stride,
-                                 dst, dst_stride,
-                                 filter_x, x_step_q4,
-                                 filter_y, y_step_q4,
-                                 w, h);
+        vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+                                 x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   }
@@ -397,8 +377,8 @@ void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                              uint8_t *dst, ptrdiff_t dst_stride,
                              const int16_t *filter_x, int x_step_q4,
-                             const int16_t *filter_y, int y_step_q4,
-                             int w, int h) {
+                             const int16_t *filter_y, int y_step_q4, int w,
+                             int h) {
   /* Fixed size intermediate buffer places limits on parameters. */
   DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
   int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
@@ -408,27 +388,20 @@ void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
   assert(x_step_q4 == 16);
   assert(y_step_q4 == 16);
 
-  if (intermediate_height < h)
-    intermediate_height = h;
+  if (intermediate_height < h) intermediate_height = h;
 
-  vpx_convolve8_horiz(src - (src_stride * 3), src_stride,
-                      temp, 64,
-                      filter_x, x_step_q4,
-                      filter_y, y_step_q4,
-                      w, intermediate_height);
+  vpx_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x,
+                      x_step_q4, filter_y, y_step_q4, w, intermediate_height);
 
-  vpx_convolve8_avg_vert(temp + 64 * 3, 64,
-                         dst, dst_stride,
-                         filter_x, x_step_q4,
-                         filter_y, y_step_q4,
-                         w, h);
+  vpx_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+                         x_step_q4, filter_y, y_step_q4, w, h);
 }
 
 void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int filter_x_stride,
-                            const int16_t *filter_y, int filter_y_stride,
-                            int w, int h) {
+                            const int16_t *filter_y, int filter_y_stride, int w,
+                            int h) {
   int x, y;
   uint32_t tp1, tp2, tn1;
   uint32_t tp3, tp4, tn2;
@@ -441,21 +414,19 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
   switch (w) {
     case 4:
       /* 1 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_store(dst + dst_stride);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         0(%[src])      \n\t"
             "ulw              %[tp2],         0(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
+            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
 
-            : [tn1] "=&r" (tn1), [tp1] "=&r" (tp1),
-              [tp2] "=&r" (tp2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            : [tn1] "=&r"(tn1), [tp1] "=&r"(tp1), [tp2] "=&r"(tp2)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
@@ -463,26 +434,24 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
       break;
     case 8:
       /* 2 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_store(dst + dst_stride);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         0(%[src])      \n\t"
             "ulw              %[tp2],         0(%[dst])      \n\t"
             "ulw              %[tp3],         4(%[src])      \n\t"
             "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
+            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         4(%[dst])      \n\t" /* store */
 
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+              [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
@@ -490,34 +459,32 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
       break;
     case 16:
       /* 4 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_store(dst + dst_stride);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         0(%[src])      \n\t"
             "ulw              %[tp2],         0(%[dst])      \n\t"
             "ulw              %[tp3],         4(%[src])      \n\t"
             "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         8(%[src])      \n\t"
             "ulw              %[tp2],         8(%[dst])      \n\t"
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
+            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         4(%[dst])      \n\t" /* store */
             "ulw              %[tp3],         12(%[src])     \n\t"
             "ulw              %[tp4],         12(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         8(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         12(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
+            "sw               %[tn1],         8(%[dst])      \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         12(%[dst])     \n\t" /* store */
 
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+              [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
@@ -525,50 +492,48 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
       break;
     case 32:
       /* 8 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_store(dst + dst_stride);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         0(%[src])      \n\t"
             "ulw              %[tp2],         0(%[dst])      \n\t"
             "ulw              %[tp3],         4(%[src])      \n\t"
             "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         8(%[src])      \n\t"
             "ulw              %[tp2],         8(%[dst])      \n\t"
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
+            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         4(%[dst])      \n\t" /* store */
             "ulw              %[tp3],         12(%[src])     \n\t"
             "ulw              %[tp4],         12(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         16(%[src])     \n\t"
             "ulw              %[tp2],         16(%[dst])     \n\t"
-            "sw               %[tn1],         8(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         12(%[dst])     \n\t"  /* store */
+            "sw               %[tn1],         8(%[dst])      \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         12(%[dst])     \n\t" /* store */
             "ulw              %[tp3],         20(%[src])     \n\t"
             "ulw              %[tp4],         20(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         24(%[src])     \n\t"
             "ulw              %[tp2],         24(%[dst])     \n\t"
-            "sw               %[tn1],         16(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         20(%[dst])     \n\t"  /* store */
+            "sw               %[tn1],         16(%[dst])     \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         20(%[dst])     \n\t" /* store */
             "ulw              %[tp3],         28(%[src])     \n\t"
             "ulw              %[tp4],         28(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         24(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         28(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
+            "sw               %[tn1],         24(%[dst])     \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         28(%[dst])     \n\t" /* store */
 
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+              [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
@@ -579,84 +544,82 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
       prefetch_store(dst + 32);
 
       /* 16 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_load(src + src_stride + 64);
         prefetch_store(dst + dst_stride);
         prefetch_store(dst + dst_stride + 32);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         0(%[src])      \n\t"
             "ulw              %[tp2],         0(%[dst])      \n\t"
             "ulw              %[tp3],         4(%[src])      \n\t"
             "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         8(%[src])      \n\t"
             "ulw              %[tp2],         8(%[dst])      \n\t"
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
+            "sw               %[tn1],         0(%[dst])      \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         4(%[dst])      \n\t" /* store */
             "ulw              %[tp3],         12(%[src])     \n\t"
             "ulw              %[tp4],         12(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         16(%[src])     \n\t"
             "ulw              %[tp2],         16(%[dst])     \n\t"
-            "sw               %[tn1],         8(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         12(%[dst])     \n\t"  /* store */
+            "sw               %[tn1],         8(%[dst])      \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         12(%[dst])     \n\t" /* store */
             "ulw              %[tp3],         20(%[src])     \n\t"
             "ulw              %[tp4],         20(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         24(%[src])     \n\t"
             "ulw              %[tp2],         24(%[dst])     \n\t"
-            "sw               %[tn1],         16(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         20(%[dst])     \n\t"  /* store */
+            "sw               %[tn1],         16(%[dst])     \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         20(%[dst])     \n\t" /* store */
             "ulw              %[tp3],         28(%[src])     \n\t"
             "ulw              %[tp4],         28(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         32(%[src])     \n\t"
             "ulw              %[tp2],         32(%[dst])     \n\t"
-            "sw               %[tn1],         24(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         28(%[dst])     \n\t"  /* store */
+            "sw               %[tn1],         24(%[dst])     \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         28(%[dst])     \n\t" /* store */
             "ulw              %[tp3],         36(%[src])     \n\t"
             "ulw              %[tp4],         36(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         40(%[src])     \n\t"
             "ulw              %[tp2],         40(%[dst])     \n\t"
-            "sw               %[tn1],         32(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         36(%[dst])     \n\t"  /* store */
+            "sw               %[tn1],         32(%[dst])     \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         36(%[dst])     \n\t" /* store */
             "ulw              %[tp3],         44(%[src])     \n\t"
             "ulw              %[tp4],         44(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         48(%[src])     \n\t"
             "ulw              %[tp2],         48(%[dst])     \n\t"
-            "sw               %[tn1],         40(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         44(%[dst])     \n\t"  /* store */
+            "sw               %[tn1],         40(%[dst])     \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         44(%[dst])     \n\t" /* store */
             "ulw              %[tp3],         52(%[src])     \n\t"
             "ulw              %[tp4],         52(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
             "ulw              %[tp1],         56(%[src])     \n\t"
             "ulw              %[tp2],         56(%[dst])     \n\t"
-            "sw               %[tn1],         48(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         52(%[dst])     \n\t"  /* store */
+            "sw               %[tn1],         48(%[dst])     \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         52(%[dst])     \n\t" /* store */
             "ulw              %[tp3],         60(%[src])     \n\t"
             "ulw              %[tp4],         60(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         56(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         60(%[dst])     \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t" /* average */
+            "sw               %[tn1],         56(%[dst])     \n\t" /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t" /* average */
+            "sw               %[tn2],         60(%[dst])     \n\t" /* store */
+
+            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+              [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
diff --git a/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c b/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c
index db0c2a4da5ad6c317ffe17acdef0e357df73e9a6..9a9bab25a59a79a087a121e3e4095d5f395aaa2b 100644
--- a/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c
+++ b/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c
@@ -18,16 +18,13 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_DSPR2
-static void convolve_avg_horiz_4_dspr2(const uint8_t *src,
-                                       int32_t src_stride,
-                                       uint8_t *dst,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_x0,
-                                       int32_t h) {
+static void convolve_avg_horiz_4_dspr2(const uint8_t *src, int32_t src_stride,
+                                       uint8_t *dst, int32_t dst_stride,
+                                       const int16_t *filter_x0, int32_t h) {
   int32_t y;
   uint8_t *cm = vpx_ff_cropTbl;
-  int32_t  vector1b, vector2b, vector3b, vector4b;
-  int32_t  Temp1, Temp2, Temp3, Temp4;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2, Temp3, Temp4;
   uint32_t vector4a = 64;
   uint32_t tp1, tp2;
   uint32_t p1, p2, p3, p4;
@@ -45,7 +42,7 @@ static void convolve_avg_horiz_4_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride + 32);
     prefetch_store(dst + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],         0(%[src])                      \n\t"
         "ulw              %[tp2],         4(%[src])                      \n\t"
 
@@ -76,13 +73,13 @@ static void convolve_avg_horiz_4_dspr2(const uint8_t *src,
         "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
         "extp             %[Temp3],       $ac2,           31             \n\t"
 
-        "lbu              %[p2],          3(%[dst])                      \n\t"  /* load odd 2 */
+        "lbu              %[p2],          3(%[dst])                      \n\t" /* load odd 2 */
 
         /* odd 1. pixel */
-        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"  /* even 1 */
+        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t" /* even 1 */
         "mtlo             %[vector4a],    $ac3                           \n\t"
         "mthi             $zero,          $ac3                           \n\t"
-        "lbu              %[Temp1],       1(%[dst])                      \n\t"  /* load odd 1 */
+        "lbu              %[Temp1],       1(%[dst])                      \n\t" /* load odd 1 */
         "preceu.ph.qbr    %[n1],          %[tp2]                         \n\t"
         "preceu.ph.qbl    %[n2],          %[tp2]                         \n\t"
         "preceu.ph.qbr    %[n3],          %[tn2]                         \n\t"
@@ -93,46 +90,44 @@ static void convolve_avg_horiz_4_dspr2(const uint8_t *src,
         "dpa.w.ph         $ac3,           %[n4],          %[vector4b]    \n\t"
         "extp             %[Temp2],       $ac3,           31             \n\t"
 
-        "lbu              %[tn2],         0(%[dst])                      \n\t"  /* load even 1 */
+        "lbu              %[tn2],         0(%[dst])                      \n\t" /* load even 1 */
 
         /* odd 2. pixel */
-        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t"  /* even 2 */
+        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t" /* even 2 */
         "mtlo             %[vector4a],    $ac2                           \n\t"
         "mthi             $zero,          $ac2                           \n\t"
         "preceu.ph.qbr    %[n1],          %[tn1]                         \n\t"
-        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t"  /* odd 1 */
-        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t"  /* average even 1 */
+        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t" /* odd 1 */
+        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t" /* average even 1 */
         "dpa.w.ph         $ac2,           %[n2],          %[vector1b]    \n\t"
         "dpa.w.ph         $ac2,           %[n3],          %[vector2b]    \n\t"
         "dpa.w.ph         $ac2,           %[n4],          %[vector3b]    \n\t"
         "dpa.w.ph         $ac2,           %[n1],          %[vector4b]    \n\t"
         "extp             %[Temp4],       $ac2,           31             \n\t"
 
-        "lbu              %[tp1],         2(%[dst])                      \n\t"  /* load even 2 */
-        "sb               %[tn2],         0(%[dst])                      \n\t"  /* store even 1 */
+        "lbu              %[tp1],         2(%[dst])                      \n\t" /* load even 2 */
+        "sb               %[tn2],         0(%[dst])                      \n\t" /* store even 1 */
 
         /* clamp */
-        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t"  /* average odd 1 */
-        "lbux             %[n2],          %[Temp4](%[cm])                \n\t"  /* odd 2 */
-        "sb               %[Temp1],       1(%[dst])                      \n\t"  /* store odd 1 */
-
-        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t"  /* average even 2 */
-        "sb               %[tp1],         2(%[dst])                      \n\t"  /* store even 2 */
-
-        "addqh_r.w        %[p2],          %[p2],          %[n2]          \n\t"  /* average odd 2 */
-        "sb               %[p2],          3(%[dst])                      \n\t"  /* store odd 2 */
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
+        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t" /* average odd 1 */
+        "lbux             %[n2],          %[Temp4](%[cm])                \n\t" /* odd 2 */
+        "sb               %[Temp1],       1(%[dst])                      \n\t" /* store odd 1 */
+
+        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t" /* average even 2 */
+        "sb               %[tp1],         2(%[dst])                      \n\t" /* store even 2 */
+
+        "addqh_r.w        %[p2],          %[p2],          %[n2]          \n\t" /* average odd 2 */
+        "sb               %[p2],          3(%[dst])                      \n\t" /* store odd 2 */
+
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
+          [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+          [p4] "=&r"(p4), [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3),
+          [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+          [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4)
+        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+          [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
+          [src] "r"(src));
 
     /* Next row... */
     src += src_stride;
@@ -140,12 +135,9 @@ static void convolve_avg_horiz_4_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_avg_horiz_8_dspr2(const uint8_t *src,
-                                       int32_t src_stride,
-                                       uint8_t *dst,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_x0,
-                                       int32_t h) {
+static void convolve_avg_horiz_8_dspr2(const uint8_t *src, int32_t src_stride,
+                                       uint8_t *dst, int32_t dst_stride,
+                                       const int16_t *filter_x0, int32_t h) {
   int32_t y;
   uint8_t *cm = vpx_ff_cropTbl;
   uint32_t vector4a = 64;
@@ -167,7 +159,7 @@ static void convolve_avg_horiz_8_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride + 32);
     prefetch_store(dst + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],         0(%[src])                      \n\t"
         "ulw              %[tp2],         4(%[src])                      \n\t"
 
@@ -309,17 +301,15 @@ static void convolve_avg_horiz_8_dspr2(const uint8_t *src,
         "sb               %[tn3],         5(%[dst])                      \n\t"
         "sb               %[tn1],         7(%[dst])                      \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), [tn3] "=&r" (tn3),
-          [st0] "=&r" (st0), [st1] "=&r" (st1),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
+          [tn2] "=&r"(tn2), [tn3] "=&r"(tn3), [st0] "=&r"(st0),
+          [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+          [p4] "=&r"(p4), [n1] "=&r"(n1), [Temp1] "=&r"(Temp1),
+          [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
+        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+          [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
+          [src] "r"(src));
 
     /* Next row... */
     src += src_stride;
@@ -328,11 +318,9 @@ static void convolve_avg_horiz_8_dspr2(const uint8_t *src,
 }
 
 static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr,
-                                        int32_t src_stride,
-                                        uint8_t *dst_ptr,
+                                        int32_t src_stride, uint8_t *dst_ptr,
                                         int32_t dst_stride,
-                                        const int16_t *filter_x0,
-                                        int32_t h,
+                                        const int16_t *filter_x0, int32_t h,
                                         int32_t count) {
   int32_t y, c;
   const uint8_t *src;
@@ -360,7 +348,7 @@ static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr,
     prefetch_store(dst_ptr + dst_stride);
 
     for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[qload1],    0(%[src])                    \n\t"
           "ulw              %[qload2],    4(%[src])                    \n\t"
 
@@ -618,16 +606,15 @@ static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr,
           "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
           "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
 
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
+            [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
+            [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
+            [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+            [Temp3] "=&r"(Temp3)
+          : [filter12] "r"(filter12), [filter34] "r"(filter34),
+            [filter56] "r"(filter56), [filter78] "r"(filter78),
+            [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
+            [src] "r"(src));
 
       src += 16;
       dst += 16;
@@ -640,11 +627,9 @@ static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr,
 }
 
 static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr,
-                                        int32_t src_stride,
-                                        uint8_t *dst_ptr,
+                                        int32_t src_stride, uint8_t *dst_ptr,
                                         int32_t dst_stride,
-                                        const int16_t *filter_x0,
-                                        int32_t h) {
+                                        const int16_t *filter_x0, int32_t h) {
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
@@ -673,7 +658,7 @@ static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr,
     prefetch_store(dst_ptr + dst_stride + 32);
 
     for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[qload1],    0(%[src])                    \n\t"
           "ulw              %[qload2],    4(%[src])                    \n\t"
 
@@ -931,16 +916,15 @@ static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr,
           "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
           "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
 
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
+            [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
+            [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
+            [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+            [Temp3] "=&r"(Temp3)
+          : [filter12] "r"(filter12), [filter34] "r"(filter34),
+            [filter56] "r"(filter56), [filter78] "r"(filter78),
+            [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
+            [src] "r"(src));
 
       src += 16;
       dst += 16;
@@ -961,22 +945,17 @@ void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
   assert(((const int32_t *)filter_x)[1] != 0x800000);
 
   if (((const int32_t *)filter_x)[0] == 0) {
-    vpx_convolve2_avg_horiz_dspr2(src, src_stride,
-                                  dst, dst_stride,
-                                  filter_x, x_step_q4,
-                                  filter_y, y_step_q4,
-                                  w, h);
+    vpx_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                  x_step_q4, filter_y, y_step_q4, w, h);
   } else {
     uint32_t pos = 38;
 
     src -= 3;
 
     /* bit positon for extract from acc */
-    __asm__ __volatile__ (
-      "wrdsp      %[pos],     1           \n\t"
-      :
-      : [pos] "r" (pos)
-    );
+    __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                         :
+                         : [pos] "r"(pos));
 
     /* prefetch data to cache memory */
     prefetch_load(src);
@@ -985,39 +964,32 @@ void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 
     switch (w) {
       case 4:
-        convolve_avg_horiz_4_dspr2(src, src_stride,
-                                   dst, dst_stride,
-                                   filter_x, h);
+        convolve_avg_horiz_4_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                   h);
         break;
       case 8:
-        convolve_avg_horiz_8_dspr2(src, src_stride,
-                                   dst, dst_stride,
-                                   filter_x, h);
+        convolve_avg_horiz_8_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                   h);
         break;
       case 16:
-        convolve_avg_horiz_16_dspr2(src, src_stride,
-                                    dst, dst_stride,
-                                    filter_x, h, 1);
+        convolve_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                    h, 1);
         break;
       case 32:
-        convolve_avg_horiz_16_dspr2(src, src_stride,
-                                    dst, dst_stride,
-                                    filter_x, h, 2);
+        convolve_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                    h, 2);
         break;
       case 64:
         prefetch_load(src + 64);
         prefetch_store(dst + 32);
 
-        convolve_avg_horiz_64_dspr2(src, src_stride,
-                                    dst, dst_stride,
-                                    filter_x, h);
+        convolve_avg_horiz_64_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                                    h);
         break;
       default:
-        vpx_convolve8_avg_horiz_c(src + 3, src_stride,
-                                  dst, dst_stride,
-                                  filter_x, x_step_q4,
-                                  filter_y, y_step_q4,
-                                  w, h);
+        vpx_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride,
+                                  filter_x, x_step_q4, filter_y, y_step_q4, w,
+                                  h);
         break;
     }
   }
diff --git a/vpx_dsp/mips/convolve8_dspr2.c b/vpx_dsp/mips/convolve8_dspr2.c
index ddad186922835c8933fa1c377e7e966b43dc4925..789ec8d53d8313ae7672b041a1a858d53ad85745 100644
--- a/vpx_dsp/mips/convolve8_dspr2.c
+++ b/vpx_dsp/mips/convolve8_dspr2.c
@@ -19,8 +19,7 @@
 
 #if HAVE_DSPR2
 static void convolve_horiz_4_transposed_dspr2(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               const int16_t *filter_x0,
                                               int32_t h) {
@@ -45,7 +44,7 @@ static void convolve_horiz_4_transposed_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride);
     prefetch_load(src + src_stride + 32);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],         0(%[src])                      \n\t"
         "ulw              %[tp2],         4(%[src])                      \n\t"
 
@@ -118,15 +117,14 @@ static void convolve_horiz_4_transposed_dspr2(const uint8_t *src,
         "sb               %[p2],          0(%[dst_ptr])                  \n\t"
         "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4),
-          [dst_ptr] "+r" (dst_ptr)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [src] "r" (src), [dst_stride] "r" (dst_stride)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
+          [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+          [p4] "=&r"(p4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+          [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4), [dst_ptr] "+r"(dst_ptr)
+        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+          [vector4a] "r"(vector4a), [cm] "r"(cm), [src] "r"(src),
+          [dst_stride] "r"(dst_stride));
 
     /* Next row... */
     src += src_stride;
@@ -135,8 +133,7 @@ static void convolve_horiz_4_transposed_dspr2(const uint8_t *src,
 }
 
 static void convolve_horiz_8_transposed_dspr2(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               const int16_t *filter_x0,
                                               int32_t h) {
@@ -164,7 +161,7 @@ static void convolve_horiz_8_transposed_dspr2(const uint8_t *src,
     dst_ptr = dst;
     odd_dst = (dst_ptr + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp2],         0(%[src])                       \n\t"
         "ulw              %[tp1],         4(%[src])                       \n\t"
 
@@ -293,16 +290,14 @@ static void convolve_horiz_8_transposed_dspr2(const uint8_t *src,
 
         "sb               %[n1],          0(%[odd_dst])                   \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-          [dst_ptr] "+r" (dst_ptr), [odd_dst] "+r" (odd_dst)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a), [cm] "r" (cm),
-          [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), [p1] "=&r"(p1),
+          [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [n1] "=&r"(n1),
+          [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+          [dst_ptr] "+r"(dst_ptr), [odd_dst] "+r"(odd_dst)
+        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+          [vector4a] "r"(vector4a), [cm] "r"(cm), [src] "r"(src),
+          [dst_pitch_2] "r"(dst_pitch_2));
 
     /* Next row... */
     src += src_stride;
@@ -310,25 +305,21 @@ static void convolve_horiz_8_transposed_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
-                                               int32_t src_stride,
-                                               uint8_t *dst_ptr,
-                                               int32_t dst_stride,
-                                               const int16_t *filter_x0,
-                                               int32_t h,
-                                               int32_t count) {
+static void convolve_horiz_16_transposed_dspr2(
+    const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
+    int32_t dst_stride, const int16_t *filter_x0, int32_t h, int32_t count) {
   int32_t c, y;
   const uint8_t *src;
   uint8_t *dst;
   uint8_t *cm = vpx_ff_cropTbl;
   uint32_t vector_64 = 64;
-  int32_t  filter12, filter34, filter56, filter78;
-  int32_t  Temp1, Temp2, Temp3;
+  int32_t filter12, filter34, filter56, filter78;
+  int32_t Temp1, Temp2, Temp3;
   uint32_t qload1, qload2;
   uint32_t p1, p2, p3, p4, p5;
   uint32_t st1, st2, st3;
   uint32_t dst_pitch_2 = (dst_stride << 1);
-  uint8_t  *odd_dst;
+  uint8_t *odd_dst;
 
   filter12 = ((const int32_t *)filter_x0)[0];
   filter34 = ((const int32_t *)filter_x0)[1];
@@ -346,248 +337,439 @@ static void convolve_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
     odd_dst = (dst + dst_stride);
 
     for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],        0(%[src])                       \n\t"
-          "ulw              %[qload2],        4(%[src])                       \n\t"
+      __asm__ __volatile__(
+          "ulw              %[qload1],        0(%[src])                       "
+          "\n\t"
+          "ulw              %[qload2],        4(%[src])                       "
+          "\n\t"
 
           /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "ulw              %[qload2],        8(%[src])                       \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter34]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter56]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter78]     \n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 1 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 2 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       "
+          "\n\t"
+          "ulw              %[qload2],        8(%[src])                       "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter34]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter56]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter78]     "
+          "\n\t" /* even 1 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 1 */
 
           /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "ulw              %[qload1],        12(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter12]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter34]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter56]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p1],          %[filter78]     \n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* even 3 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       "
+          "\n\t"
+          "ulw              %[qload1],        12(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p2],          %[filter12]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter34]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter56]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p1],          %[filter78]     "
+          "\n\t" /* even 1 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 1 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 1 */
 
           /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter12]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter34]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter56]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p5],          %[filter78]     \n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 4 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 1 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]   "
+          "          \n\t"
+          "dpa.w.ph         $ac3,             %[p3],          %[filter12]     "
+          "\n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter34]     "
+          "\n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter56]     "
+          "\n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p5],          %[filter78]     "
+          "\n\t" /* even 3 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* even 3 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 1 */
 
           /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        16(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter12]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter34]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter56]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter78]     \n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 5 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 2 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload2],        16(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p4],          %[filter12]     "
+          "\n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter34]     "
+          "\n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter56]     "
+          "\n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter78]     "
+          "\n\t" /* even 4 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 4 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* even 3 */
 
           /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter12]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter34]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p2],          %[filter56]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter78]     \n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* even 6 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p4],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st3],           0(%[dst])                       "
+          "\n\t" /* even 3 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter12]     "
+          "\n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter34]     "
+          "\n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p2],          %[filter56]     "
+          "\n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter78]     "
+          "\n\t" /* even 5 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 5 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 4 */
 
           /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        20(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter12]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 7 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p1],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 4 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload1],        20(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p5],          %[filter12]     "
+          "\n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     "
+          "\n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     "
+          "\n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     "
+          "\n\t" /* even 6 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* even 6 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 5 */
 
           /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 8 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 5 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     "
+          "\n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     "
+          "\n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     "
+          "\n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     "
+          "\n\t" /* even 7 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 7 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* even 6 */
 
           /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* even 8 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 1 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     "
+          "\n\t" /* even 8 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     "
+          "\n\t" /* even 8 */
+          "sb               %[st3],           0(%[dst])                       "
+          "\n\t" /* even 6 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     "
+          "\n\t" /* even 8 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     "
+          "\n\t" /* even 8 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 8 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 7 */
 
           /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       \n\t"
-          "ulw              %[qload2],        5(%[src])                       \n\t"
+          "ulw              %[qload1],        1(%[src])                       "
+          "\n\t"
+          "ulw              %[qload2],        5(%[src])                       "
+          "\n\t"
 
           /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        9(%[src])                       \n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter12]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 2 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 7 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload2],        9(%[src])                       "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p1],          %[filter12]     "
+          "\n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     "
+          "\n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     "
+          "\n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     "
+          "\n\t" /* odd 1 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 1 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 8 */
 
           /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* odd 3 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 8 */
+          "ulw              %[qload1],        13(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     "
+          "\n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     "
+          "\n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     "
+          "\n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     "
+          "\n\t" /* odd 2 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 2 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 1 */
 
           /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 4 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 1 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     "
+          "\n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     "
+          "\n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     "
+          "\n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     "
+          "\n\t" /* odd 3 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* odd 3 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 2 */
 
           /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        17(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter12]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter34]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p5],          %[filter56]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter78]     \n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 5 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 2 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload2],        17(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p4],          %[filter12]     "
+          "\n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter34]     "
+          "\n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p5],          %[filter56]     "
+          "\n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter78]     "
+          "\n\t" /* odd 4 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 4 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* odd 3 */
 
           /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter34]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter56]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter78]     \n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* odd 6 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p4],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[odd_dst])                   "
+          "\n\t" /* odd 3 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     "
+          "\n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter34]     "
+          "\n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter56]     "
+          "\n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter78]     "
+          "\n\t" /* odd 5 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 5 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 4 */
 
           /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        21(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter12]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p2],          %[filter34]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter56]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter78]     \n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 7 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p1],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 4 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload1],        21(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p5],          %[filter12]     "
+          "\n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p2],          %[filter34]     "
+          "\n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter56]     "
+          "\n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter78]     "
+          "\n\t" /* odd 6 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* odd 6 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 5 */
 
           /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter12]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter34]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter56]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter78]     \n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 8 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 5 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p2],          %[filter12]     "
+          "\n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter34]     "
+          "\n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter56]     "
+          "\n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter78]     "
+          "\n\t" /* odd 7 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 7 */
 
           /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter12]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter34]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter56]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter78]     \n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64), [cm] "r" (cm),
-            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-      );
+          "dpa.w.ph         $ac1,             %[p3],          %[filter12]     "
+          "\n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter34]     "
+          "\n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter56]     "
+          "\n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter78]     "
+          "\n\t" /* odd 8 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 8 */
+
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* odd 6 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 7 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 8 */
+
+          "sb               %[st2],           0(%[odd_dst])                   "
+          "\n\t" /* odd 6 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 7 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 8 */
+
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
+            [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
+            [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
+            [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+            [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
+          : [filter12] "r"(filter12), [filter34] "r"(filter34),
+            [filter56] "r"(filter56), [filter78] "r"(filter78),
+            [vector_64] "r"(vector_64), [cm] "r"(cm), [src] "r"(src),
+            [dst_pitch_2] "r"(dst_pitch_2));
 
       src += 16;
       dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
@@ -601,24 +783,21 @@ static void convolve_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
   }
 }
 
-static void convolve_horiz_64_transposed_dspr2(const uint8_t *src_ptr,
-                                               int32_t src_stride,
-                                               uint8_t *dst_ptr,
-                                               int32_t dst_stride,
-                                               const int16_t *filter_x0,
-                                               int32_t h) {
+static void convolve_horiz_64_transposed_dspr2(
+    const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
+    int32_t dst_stride, const int16_t *filter_x0, int32_t h) {
   int32_t c, y;
   const uint8_t *src;
   uint8_t *dst;
   uint8_t *cm = vpx_ff_cropTbl;
   uint32_t vector_64 = 64;
-  int32_t  filter12, filter34, filter56, filter78;
-  int32_t  Temp1, Temp2, Temp3;
+  int32_t filter12, filter34, filter56, filter78;
+  int32_t Temp1, Temp2, Temp3;
   uint32_t qload1, qload2;
   uint32_t p1, p2, p3, p4, p5;
   uint32_t st1, st2, st3;
   uint32_t dst_pitch_2 = (dst_stride << 1);
-  uint8_t  *odd_dst;
+  uint8_t *odd_dst;
 
   filter12 = ((const int32_t *)filter_x0)[0];
   filter34 = ((const int32_t *)filter_x0)[1];
@@ -637,248 +816,439 @@ static void convolve_horiz_64_transposed_dspr2(const uint8_t *src_ptr,
     odd_dst = (dst + dst_stride);
 
     for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],        0(%[src])                       \n\t"
-          "ulw              %[qload2],        4(%[src])                       \n\t"
+      __asm__ __volatile__(
+          "ulw              %[qload1],        0(%[src])                       "
+          "\n\t"
+          "ulw              %[qload2],        4(%[src])                       "
+          "\n\t"
 
           /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "ulw              %[qload2],        8(%[src])                       \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter34]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter56]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter78]     \n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 1 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 2 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       "
+          "\n\t"
+          "ulw              %[qload2],        8(%[src])                       "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter34]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter56]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter78]     "
+          "\n\t" /* even 1 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 1 */
 
           /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "ulw              %[qload1],        12(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter12]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter34]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter56]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p1],          %[filter78]     \n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* even 3 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       "
+          "\n\t"
+          "ulw              %[qload1],        12(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p2],          %[filter12]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter34]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter56]     "
+          "\n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p1],          %[filter78]     "
+          "\n\t" /* even 1 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 1 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 1 */
 
           /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter12]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter34]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter56]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p5],          %[filter78]     \n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 4 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 1 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]   "
+          "          \n\t"
+          "dpa.w.ph         $ac3,             %[p3],          %[filter12]     "
+          "\n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter34]     "
+          "\n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter56]     "
+          "\n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p5],          %[filter78]     "
+          "\n\t" /* even 3 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* even 3 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 1 */
 
           /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        16(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter12]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter34]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter56]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter78]     \n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 5 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 2 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload2],        16(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p4],          %[filter12]     "
+          "\n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter34]     "
+          "\n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter56]     "
+          "\n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter78]     "
+          "\n\t" /* even 4 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 4 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* even 3 */
 
           /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter12]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter34]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p2],          %[filter56]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter78]     \n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* even 6 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p4],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st3],           0(%[dst])                       "
+          "\n\t" /* even 3 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter12]     "
+          "\n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter34]     "
+          "\n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p2],          %[filter56]     "
+          "\n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter78]     "
+          "\n\t" /* even 5 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 5 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 4 */
 
           /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        20(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter12]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* even 7 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p1],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 4 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload1],        20(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p5],          %[filter12]     "
+          "\n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     "
+          "\n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     "
+          "\n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     "
+          "\n\t" /* even 6 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* even 6 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 5 */
 
           /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* even 8 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 5 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     "
+          "\n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     "
+          "\n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     "
+          "\n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     "
+          "\n\t" /* even 7 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* even 7 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* even 6 */
 
           /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* even 8 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 1 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     "
+          "\n\t" /* even 8 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     "
+          "\n\t" /* even 8 */
+          "sb               %[st3],           0(%[dst])                       "
+          "\n\t" /* even 6 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     "
+          "\n\t" /* even 8 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     "
+          "\n\t" /* even 8 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* even 8 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* even 7 */
 
           /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       \n\t"
-          "ulw              %[qload2],        5(%[src])                       \n\t"
+          "ulw              %[qload1],        1(%[src])                       "
+          "\n\t"
+          "ulw              %[qload2],        5(%[src])                       "
+          "\n\t"
 
           /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        9(%[src])                       \n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter12]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 2 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       "
+          "\n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[dst])                       "
+          "\n\t" /* even 7 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload2],        9(%[src])                       "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p1],          %[filter12]     "
+          "\n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     "
+          "\n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     "
+          "\n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     "
+          "\n\t" /* odd 1 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 1 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* even 8 */
 
           /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* odd 3 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       "
+          "\n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[dst])                       "
+          "\n\t" /* even 8 */
+          "ulw              %[qload1],        13(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     "
+          "\n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     "
+          "\n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     "
+          "\n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     "
+          "\n\t" /* odd 2 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 2 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 1 */
 
           /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 4 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 1 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     "
+          "\n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     "
+          "\n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     "
+          "\n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     "
+          "\n\t" /* odd 3 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* odd 3 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 2 */
 
           /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        17(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter12]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter34]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p5],          %[filter56]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter78]     \n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 5 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 2 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload2],        17(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p4],          %[filter12]     "
+          "\n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter34]     "
+          "\n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p5],          %[filter56]     "
+          "\n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter78]     "
+          "\n\t" /* odd 4 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 4 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* odd 3 */
 
           /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter34]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter56]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter78]     \n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
+          "mtlo             %[vector_64],     $ac2                            "
+          "\n\t" /* odd 6 */
+          "mthi             $zero,            $ac2                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p4],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st2],           0(%[odd_dst])                   "
+          "\n\t" /* odd 3 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     "
+          "\n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter34]     "
+          "\n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter56]     "
+          "\n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter78]     "
+          "\n\t" /* odd 5 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 5 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 4 */
 
           /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        21(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter12]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p2],          %[filter34]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter56]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter78]     \n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
+          "mtlo             %[vector_64],     $ac3                            "
+          "\n\t" /* odd 7 */
+          "mthi             $zero,            $ac3                            "
+          "\n\t"
+          "preceu.ph.qbl    %[p1],            %[qload2]                       "
+          "\n\t"
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 4 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "ulw              %[qload1],        21(%[src])                      "
+          "\n\t"
+          "dpa.w.ph         $ac2,             %[p5],          %[filter12]     "
+          "\n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p2],          %[filter34]     "
+          "\n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter56]     "
+          "\n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter78]     "
+          "\n\t" /* odd 6 */
+          "extp             %[Temp2],         $ac2,           31              "
+          "\n\t" /* odd 6 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 5 */
 
           /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter12]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter34]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter56]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter78]     \n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
+          "mtlo             %[vector_64],     $ac1                            "
+          "\n\t" /* odd 8 */
+          "mthi             $zero,            $ac1                            "
+          "\n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       "
+          "\n\t"
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 5 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+          "dpa.w.ph         $ac3,             %[p2],          %[filter12]     "
+          "\n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter34]     "
+          "\n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter56]     "
+          "\n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter78]     "
+          "\n\t" /* odd 7 */
+          "extp             %[Temp3],         $ac3,           31              "
+          "\n\t" /* odd 7 */
 
           /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter12]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter34]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter56]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter78]     \n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64), [cm] "r" (cm),
-            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-      );
+          "dpa.w.ph         $ac1,             %[p3],          %[filter12]     "
+          "\n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter34]     "
+          "\n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter56]     "
+          "\n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter78]     "
+          "\n\t" /* odd 8 */
+          "extp             %[Temp1],         $ac1,           31              "
+          "\n\t" /* odd 8 */
+
+          "lbux             %[st2],           %[Temp2](%[cm])                 "
+          "\n\t" /* odd 6 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 "
+          "\n\t" /* odd 7 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 "
+          "\n\t" /* odd 8 */
+
+          "sb               %[st2],           0(%[odd_dst])                   "
+          "\n\t" /* odd 6 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+
+          "sb               %[st3],           0(%[odd_dst])                   "
+          "\n\t" /* odd 7 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  "
+          "\n\t"
+
+          "sb               %[st1],           0(%[odd_dst])                   "
+          "\n\t" /* odd 8 */
+
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
+            [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
+            [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
+            [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+            [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
+          : [filter12] "r"(filter12), [filter34] "r"(filter34),
+            [filter56] "r"(filter56), [filter78] "r"(filter78),
+            [vector_64] "r"(vector_64), [cm] "r"(cm), [src] "r"(src),
+            [dst_pitch_2] "r"(dst_pitch_2));
 
       src += 16;
       dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
@@ -901,8 +1271,7 @@ void convolve_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
     for (x = 0; x < w; ++x) {
       int sum = 0;
 
-      for (k = 0; k < 8; ++k)
-        sum += src[x + k] * filter[k];
+      for (k = 0; k < 8; ++k) sum += src[x + k] * filter[k];
 
       dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
     }
@@ -913,8 +1282,7 @@ void convolve_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
 }
 
 void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
-                           uint8_t *dst, ptrdiff_t dst_stride,
-                           int w, int h) {
+                           uint8_t *dst, ptrdiff_t dst_stride, int w, int h) {
   int x, y;
 
   for (y = 0; y < h; ++y) {
@@ -927,10 +1295,9 @@ void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
   }
 }
 
-void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                         uint8_t *dst, ptrdiff_t dst_stride,
-                         const int16_t *filter_x, int x_step_q4,
-                         const int16_t *filter_y, int y_step_q4,
+void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                         ptrdiff_t dst_stride, const int16_t *filter_x,
+                         int x_step_q4, const int16_t *filter_y, int y_step_q4,
                          int w, int h) {
   DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
   int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
@@ -941,27 +1308,20 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
   assert(((const int32_t *)filter_x)[1] != 0x800000);
   assert(((const int32_t *)filter_y)[1] != 0x800000);
 
-
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
-  if (intermediate_height < h)
-    intermediate_height = h;
+  if (intermediate_height < h) intermediate_height = h;
 
   /* copy the src to dst */
   if (filter_x[3] == 0x80) {
-    copy_horiz_transposed(src - src_stride * 3, src_stride,
-                          temp, intermediate_height,
-                          w, intermediate_height);
+    copy_horiz_transposed(src - src_stride * 3, src_stride, temp,
+                          intermediate_height, w, intermediate_height);
   } else if (((const int32_t *)filter_x)[0] == 0) {
-    vpx_convolve2_dspr2(src - src_stride * 3, src_stride,
-                        temp, intermediate_height,
-                        filter_x,
-                        w, intermediate_height);
+    vpx_convolve2_dspr2(src - src_stride * 3, src_stride, temp,
+                        intermediate_height, filter_x, w, intermediate_height);
   } else {
     src -= (src_stride * 3 + 3);
 
@@ -971,31 +1331,29 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 
     switch (w) {
       case 4:
-        convolve_horiz_4_transposed_dspr2(src, src_stride,
-                                          temp, intermediate_height,
-                                          filter_x, intermediate_height);
+        convolve_horiz_4_transposed_dspr2(src, src_stride, temp,
+                                          intermediate_height, filter_x,
+                                          intermediate_height);
         break;
       case 8:
-        convolve_horiz_8_transposed_dspr2(src, src_stride,
-                                          temp, intermediate_height,
-                                          filter_x, intermediate_height);
+        convolve_horiz_8_transposed_dspr2(src, src_stride, temp,
+                                          intermediate_height, filter_x,
+                                          intermediate_height);
         break;
       case 16:
       case 32:
-        convolve_horiz_16_transposed_dspr2(src, src_stride,
-                                           temp, intermediate_height,
-                                           filter_x, intermediate_height,
-                                           (w/16));
+        convolve_horiz_16_transposed_dspr2(src, src_stride, temp,
+                                           intermediate_height, filter_x,
+                                           intermediate_height, (w / 16));
         break;
       case 64:
         prefetch_load(src + 32);
-        convolve_horiz_64_transposed_dspr2(src, src_stride,
-                                           temp, intermediate_height,
-                                           filter_x, intermediate_height);
+        convolve_horiz_64_transposed_dspr2(src, src_stride, temp,
+                                           intermediate_height, filter_x,
+                                           intermediate_height);
         break;
       default:
-        convolve_horiz_transposed(src, src_stride,
-                                  temp, intermediate_height,
+        convolve_horiz_transposed(src, src_stride, temp, intermediate_height,
                                   filter_x, w, intermediate_height);
         break;
     }
@@ -1003,40 +1361,31 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 
   /* copy the src to dst */
   if (filter_y[3] == 0x80) {
-    copy_horiz_transposed(temp + 3, intermediate_height,
-                          dst, dst_stride,
-                          h, w);
+    copy_horiz_transposed(temp + 3, intermediate_height, dst, dst_stride, h, w);
   } else if (((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve2_dspr2(temp + 3, intermediate_height,
-                        dst, dst_stride,
-                        filter_y,
-                        h, w);
+    vpx_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride,
+                        filter_y, h, w);
   } else {
     switch (h) {
       case 4:
-        convolve_horiz_4_transposed_dspr2(temp, intermediate_height,
-                                          dst, dst_stride,
-                                          filter_y, w);
+        convolve_horiz_4_transposed_dspr2(temp, intermediate_height, dst,
+                                          dst_stride, filter_y, w);
         break;
       case 8:
-        convolve_horiz_8_transposed_dspr2(temp, intermediate_height,
-                                          dst, dst_stride,
-                                          filter_y, w);
+        convolve_horiz_8_transposed_dspr2(temp, intermediate_height, dst,
+                                          dst_stride, filter_y, w);
         break;
       case 16:
       case 32:
-        convolve_horiz_16_transposed_dspr2(temp, intermediate_height,
-                                           dst, dst_stride,
-                                           filter_y, w, (h/16));
+        convolve_horiz_16_transposed_dspr2(temp, intermediate_height, dst,
+                                           dst_stride, filter_y, w, (h / 16));
         break;
       case 64:
-        convolve_horiz_64_transposed_dspr2(temp, intermediate_height,
-                                           dst, dst_stride,
-                                           filter_y, w);
+        convolve_horiz_64_transposed_dspr2(temp, intermediate_height, dst,
+                                           dst_stride, filter_y, w);
         break;
       default:
-        convolve_horiz_transposed(temp, intermediate_height,
-                                  dst, dst_stride,
+        convolve_horiz_transposed(temp, intermediate_height, dst, dst_stride,
                                   filter_y, h, w);
         break;
     }
@@ -1056,97 +1405,87 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
   prefetch_store(dst);
 
   switch (w) {
-    case 4:
-      {
+    case 4: {
       uint32_t tp1;
 
       /* 1 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_store(dst + dst_stride);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         (%[src])      \n\t"
-            "sw               %[tp1],         (%[dst])      \n\t"  /* store */
+            "sw               %[tp1],         (%[dst])      \n\t" /* store */
 
-            : [tp1] "=&r" (tp1)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            : [tp1] "=&r"(tp1)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
       }
-      }
-      break;
-    case 8:
-      {
+    } break;
+    case 8: {
       uint32_t tp1, tp2;
 
       /* 2 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_store(dst + dst_stride);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         0(%[src])      \n\t"
             "ulw              %[tp2],         4(%[src])      \n\t"
-            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
+            "sw               %[tp1],         0(%[dst])      \n\t" /* store */
+            "sw               %[tp2],         4(%[dst])      \n\t" /* store */
 
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
       }
-      }
-      break;
-    case 16:
-      {
+    } break;
+    case 16: {
       uint32_t tp1, tp2, tp3, tp4;
 
       /* 4 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_store(dst + dst_stride);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         0(%[src])      \n\t"
             "ulw              %[tp2],         4(%[src])      \n\t"
             "ulw              %[tp3],         8(%[src])      \n\t"
             "ulw              %[tp4],         12(%[src])     \n\t"
 
-            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
-            "sw               %[tp3],         8(%[dst])      \n\t"  /* store */
-            "sw               %[tp4],         12(%[dst])     \n\t"  /* store */
+            "sw               %[tp1],         0(%[dst])      \n\t" /* store */
+            "sw               %[tp2],         4(%[dst])      \n\t" /* store */
+            "sw               %[tp3],         8(%[dst])      \n\t" /* store */
+            "sw               %[tp4],         12(%[dst])     \n\t" /* store */
 
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+              [tp4] "=&r"(tp4)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
       }
-      }
-      break;
-    case 32:
-      {
+    } break;
+    case 32: {
       uint32_t tp1, tp2, tp3, tp4;
       uint32_t tp5, tp6, tp7, tp8;
 
       /* 8 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_store(dst + dst_stride);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         0(%[src])      \n\t"
             "ulw              %[tp2],         4(%[src])      \n\t"
             "ulw              %[tp3],         8(%[src])      \n\t"
@@ -1156,29 +1495,25 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
             "ulw              %[tp7],         24(%[src])     \n\t"
             "ulw              %[tp8],         28(%[src])     \n\t"
 
-            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
-            "sw               %[tp3],         8(%[dst])      \n\t"  /* store */
-            "sw               %[tp4],         12(%[dst])     \n\t"  /* store */
-            "sw               %[tp5],         16(%[dst])     \n\t"  /* store */
-            "sw               %[tp6],         20(%[dst])     \n\t"  /* store */
-            "sw               %[tp7],         24(%[dst])     \n\t"  /* store */
-            "sw               %[tp8],         28(%[dst])     \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tp5] "=&r" (tp5), [tp6] "=&r" (tp6),
-              [tp7] "=&r" (tp7), [tp8] "=&r" (tp8)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            "sw               %[tp1],         0(%[dst])      \n\t" /* store */
+            "sw               %[tp2],         4(%[dst])      \n\t" /* store */
+            "sw               %[tp3],         8(%[dst])      \n\t" /* store */
+            "sw               %[tp4],         12(%[dst])     \n\t" /* store */
+            "sw               %[tp5],         16(%[dst])     \n\t" /* store */
+            "sw               %[tp6],         20(%[dst])     \n\t" /* store */
+            "sw               %[tp7],         24(%[dst])     \n\t" /* store */
+            "sw               %[tp8],         28(%[dst])     \n\t" /* store */
+
+            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+              [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6),
+              [tp7] "=&r"(tp7), [tp8] "=&r"(tp8)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
       }
-      }
-      break;
-    case 64:
-      {
+    } break;
+    case 64: {
       uint32_t tp1, tp2, tp3, tp4;
       uint32_t tp5, tp6, tp7, tp8;
 
@@ -1186,14 +1521,14 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
       prefetch_store(dst + 32);
 
       /* 16 word storage */
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         prefetch_load(src + src_stride);
         prefetch_load(src + src_stride + 32);
         prefetch_load(src + src_stride + 64);
         prefetch_store(dst + dst_stride);
         prefetch_store(dst + dst_stride + 32);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "ulw              %[tp1],         0(%[src])      \n\t"
             "ulw              %[tp2],         4(%[src])      \n\t"
             "ulw              %[tp3],         8(%[src])      \n\t"
@@ -1203,14 +1538,14 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
             "ulw              %[tp7],         24(%[src])     \n\t"
             "ulw              %[tp8],         28(%[src])     \n\t"
 
-            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
-            "sw               %[tp3],         8(%[dst])      \n\t"  /* store */
-            "sw               %[tp4],         12(%[dst])     \n\t"  /* store */
-            "sw               %[tp5],         16(%[dst])     \n\t"  /* store */
-            "sw               %[tp6],         20(%[dst])     \n\t"  /* store */
-            "sw               %[tp7],         24(%[dst])     \n\t"  /* store */
-            "sw               %[tp8],         28(%[dst])     \n\t"  /* store */
+            "sw               %[tp1],         0(%[dst])      \n\t" /* store */
+            "sw               %[tp2],         4(%[dst])      \n\t" /* store */
+            "sw               %[tp3],         8(%[dst])      \n\t" /* store */
+            "sw               %[tp4],         12(%[dst])     \n\t" /* store */
+            "sw               %[tp5],         16(%[dst])     \n\t" /* store */
+            "sw               %[tp6],         20(%[dst])     \n\t" /* store */
+            "sw               %[tp7],         24(%[dst])     \n\t" /* store */
+            "sw               %[tp8],         28(%[dst])     \n\t" /* store */
 
             "ulw              %[tp1],         32(%[src])     \n\t"
             "ulw              %[tp2],         36(%[src])     \n\t"
@@ -1221,29 +1556,26 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
             "ulw              %[tp7],         56(%[src])     \n\t"
             "ulw              %[tp8],         60(%[src])     \n\t"
 
-            "sw               %[tp1],         32(%[dst])     \n\t"  /* store */
-            "sw               %[tp2],         36(%[dst])     \n\t"  /* store */
-            "sw               %[tp3],         40(%[dst])     \n\t"  /* store */
-            "sw               %[tp4],         44(%[dst])     \n\t"  /* store */
-            "sw               %[tp5],         48(%[dst])     \n\t"  /* store */
-            "sw               %[tp6],         52(%[dst])     \n\t"  /* store */
-            "sw               %[tp7],         56(%[dst])     \n\t"  /* store */
-            "sw               %[tp8],         60(%[dst])     \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tp5] "=&r" (tp5), [tp6] "=&r" (tp6),
-              [tp7] "=&r" (tp7), [tp8] "=&r" (tp8)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
+            "sw               %[tp1],         32(%[dst])     \n\t" /* store */
+            "sw               %[tp2],         36(%[dst])     \n\t" /* store */
+            "sw               %[tp3],         40(%[dst])     \n\t" /* store */
+            "sw               %[tp4],         44(%[dst])     \n\t" /* store */
+            "sw               %[tp5],         48(%[dst])     \n\t" /* store */
+            "sw               %[tp6],         52(%[dst])     \n\t" /* store */
+            "sw               %[tp7],         56(%[dst])     \n\t" /* store */
+            "sw               %[tp8],         60(%[dst])     \n\t" /* store */
+
+            : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+              [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6),
+              [tp7] "=&r"(tp7), [tp8] "=&r"(tp8)
+            : [src] "r"(src), [dst] "r"(dst));
 
         src += src_stride;
         dst += dst_stride;
       }
-      }
-      break;
+    } break;
     default:
-      for (y = h; y--; ) {
+      for (y = h; y--;) {
         for (x = 0; x < w; ++x) {
           dst[x] = src[x];
         }
diff --git a/vpx_dsp/mips/convolve8_horiz_dspr2.c b/vpx_dsp/mips/convolve8_horiz_dspr2.c
index ae78bab8924832ca60ec8a1452f51484f9a7a260..196a0a2f0be98384895dd6b067700b17ceabcb90 100644
--- a/vpx_dsp/mips/convolve8_horiz_dspr2.c
+++ b/vpx_dsp/mips/convolve8_horiz_dspr2.c
@@ -18,12 +18,9 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_DSPR2
-static void convolve_horiz_4_dspr2(const uint8_t *src,
-                                   int32_t src_stride,
-                                   uint8_t *dst,
-                                   int32_t dst_stride,
-                                   const int16_t *filter_x0,
-                                   int32_t h) {
+static void convolve_horiz_4_dspr2(const uint8_t *src, int32_t src_stride,
+                                   uint8_t *dst, int32_t dst_stride,
+                                   const int16_t *filter_x0, int32_t h) {
   int32_t y;
   uint8_t *cm = vpx_ff_cropTbl;
   int32_t vector1b, vector2b, vector3b, vector4b;
@@ -45,7 +42,7 @@ static void convolve_horiz_4_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride + 32);
     prefetch_store(dst + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],      0(%[src])                      \n\t"
         "ulw              %[tp2],      4(%[src])                      \n\t"
 
@@ -111,17 +108,15 @@ static void convolve_horiz_4_dspr2(const uint8_t *src,
         "sb               %[tp2],      2(%[dst])                      \n\t"
         "sb               %[n2],       3(%[dst])                      \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
+          [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+          [p4] "=&r"(p4), [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3),
+          [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+          [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4)
+        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+          [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
+          [src] "r"(src));
 
     /* Next row... */
     src += src_stride;
@@ -129,12 +124,9 @@ static void convolve_horiz_4_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_horiz_8_dspr2(const uint8_t *src,
-                                   int32_t src_stride,
-                                   uint8_t *dst,
-                                   int32_t dst_stride,
-                                   const int16_t *filter_x0,
-                                   int32_t h) {
+static void convolve_horiz_8_dspr2(const uint8_t *src, int32_t src_stride,
+                                   uint8_t *dst, int32_t dst_stride,
+                                   const int16_t *filter_x0, int32_t h) {
   int32_t y;
   uint8_t *cm = vpx_ff_cropTbl;
   uint32_t vector4a = 64;
@@ -156,7 +148,7 @@ static void convolve_horiz_8_dspr2(const uint8_t *src,
     prefetch_load(src + src_stride + 32);
     prefetch_store(dst + dst_stride);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "ulw              %[tp1],      0(%[src])                      \n\t"
         "ulw              %[tp2],      4(%[src])                      \n\t"
 
@@ -275,17 +267,15 @@ static void convolve_horiz_8_dspr2(const uint8_t *src,
         "sb               %[p2],       5(%[dst])                      \n\t"
         "sb               %[n1],       7(%[dst])                      \n\t"
 
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), [tn3] "=&r" (tn3),
-          [st0] "=&r" (st0), [st1] "=&r" (st1),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
+        : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
+          [tn2] "=&r"(tn2), [tn3] "=&r"(tn3), [st0] "=&r"(st0),
+          [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+          [p4] "=&r"(p4), [n1] "=&r"(n1), [Temp1] "=&r"(Temp1),
+          [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
+        : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+          [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+          [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
+          [src] "r"(src));
 
     /* Next row... */
     src += src_stride;
@@ -293,12 +283,9 @@ static void convolve_horiz_8_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_horiz_16_dspr2(const uint8_t *src_ptr,
-                                    int32_t src_stride,
-                                    uint8_t *dst_ptr,
-                                    int32_t dst_stride,
-                                    const int16_t *filter_x0,
-                                    int32_t h,
+static void convolve_horiz_16_dspr2(const uint8_t *src_ptr, int32_t src_stride,
+                                    uint8_t *dst_ptr, int32_t dst_stride,
+                                    const int16_t *filter_x0, int32_t h,
                                     int32_t count) {
   int32_t y, c;
   const uint8_t *src;
@@ -326,7 +313,7 @@ static void convolve_horiz_16_dspr2(const uint8_t *src_ptr,
     prefetch_store(dst_ptr + dst_stride);
 
     for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[qload1],    0(%[src])                    \n\t"
           "ulw              %[qload2],    4(%[src])                    \n\t"
 
@@ -542,17 +529,15 @@ static void convolve_horiz_16_dspr2(const uint8_t *src_ptr,
           "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
           "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
 
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst),
-            [src] "r" (src)
-      );
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
+            [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
+            [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+            [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
+          : [filter12] "r"(filter12), [filter34] "r"(filter34),
+            [filter56] "r"(filter56), [filter78] "r"(filter78),
+            [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
+            [src] "r"(src));
 
       src += 16;
       dst += 16;
@@ -564,12 +549,9 @@ static void convolve_horiz_16_dspr2(const uint8_t *src_ptr,
   }
 }
 
-static void convolve_horiz_64_dspr2(const uint8_t *src_ptr,
-                                    int32_t src_stride,
-                                    uint8_t *dst_ptr,
-                                    int32_t dst_stride,
-                                    const int16_t *filter_x0,
-                                    int32_t h) {
+static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, int32_t src_stride,
+                                    uint8_t *dst_ptr, int32_t dst_stride,
+                                    const int16_t *filter_x0, int32_t h) {
   int32_t y, c;
   const uint8_t *src;
   uint8_t *dst;
@@ -598,7 +580,7 @@ static void convolve_horiz_64_dspr2(const uint8_t *src_ptr,
     prefetch_store(dst_ptr + dst_stride + 32);
 
     for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[qload1],    0(%[src])                    \n\t"
           "ulw              %[qload2],    4(%[src])                    \n\t"
 
@@ -814,17 +796,15 @@ static void convolve_horiz_64_dspr2(const uint8_t *src_ptr,
           "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
           "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
 
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst),
-            [src] "r" (src)
-      );
+          : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
+            [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
+            [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+            [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
+          : [filter12] "r"(filter12), [filter34] "r"(filter34),
+            [filter56] "r"(filter56), [filter78] "r"(filter78),
+            [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
+            [src] "r"(src));
 
       src += 16;
       dst += 16;
@@ -839,17 +819,14 @@ static void convolve_horiz_64_dspr2(const uint8_t *src_ptr,
 void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                uint8_t *dst, ptrdiff_t dst_stride,
                                const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4,
-                               int w, int h) {
+                               const int16_t *filter_y, int y_step_q4, int w,
+                               int h) {
   assert(x_step_q4 == 16);
   assert(((const int32_t *)filter_x)[1] != 0x800000);
 
   if (((const int32_t *)filter_x)[0] == 0) {
-    vpx_convolve2_horiz_dspr2(src, src_stride,
-                              dst, dst_stride,
-                              filter_x, x_step_q4,
-                              filter_y, y_step_q4,
-                              w, h);
+    vpx_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                              x_step_q4, filter_y, y_step_q4, w, h);
   } else {
     uint32_t pos = 38;
 
@@ -857,11 +834,9 @@ void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
     src -= 3;
 
     /* bit positon for extract from acc */
-    __asm__ __volatile__ (
-      "wrdsp      %[pos],     1           \n\t"
-      :
-      : [pos] "r" (pos)
-    );
+    __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                         :
+                         : [pos] "r"(pos));
 
     /* prefetch data to cache memory */
     prefetch_load(src);
@@ -870,39 +845,31 @@ void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 
     switch (w) {
       case 4:
-        convolve_horiz_4_dspr2(src, (int32_t)src_stride,
-                               dst, (int32_t)dst_stride,
-                               filter_x, (int32_t)h);
+        convolve_horiz_4_dspr2(src, (int32_t)src_stride, dst,
+                               (int32_t)dst_stride, filter_x, (int32_t)h);
         break;
       case 8:
-        convolve_horiz_8_dspr2(src, (int32_t)src_stride,
-                               dst, (int32_t)dst_stride,
-                               filter_x, (int32_t)h);
+        convolve_horiz_8_dspr2(src, (int32_t)src_stride, dst,
+                               (int32_t)dst_stride, filter_x, (int32_t)h);
         break;
       case 16:
-        convolve_horiz_16_dspr2(src, (int32_t)src_stride,
-                                dst, (int32_t)dst_stride,
-                                filter_x, (int32_t)h, 1);
+        convolve_horiz_16_dspr2(src, (int32_t)src_stride, dst,
+                                (int32_t)dst_stride, filter_x, (int32_t)h, 1);
         break;
       case 32:
-        convolve_horiz_16_dspr2(src, (int32_t)src_stride,
-                                dst, (int32_t)dst_stride,
-                                filter_x, (int32_t)h, 2);
+        convolve_horiz_16_dspr2(src, (int32_t)src_stride, dst,
+                                (int32_t)dst_stride, filter_x, (int32_t)h, 2);
         break;
       case 64:
         prefetch_load(src + 64);
         prefetch_store(dst + 32);
 
-        convolve_horiz_64_dspr2(src, (int32_t)src_stride,
-                                dst, (int32_t)dst_stride,
-                                filter_x, (int32_t)h);
+        convolve_horiz_64_dspr2(src, (int32_t)src_stride, dst,
+                                (int32_t)dst_stride, filter_x, (int32_t)h);
         break;
       default:
-        vpx_convolve8_horiz_c(src + 3, src_stride,
-                              dst, dst_stride,
-                              filter_x, x_step_q4,
-                              filter_y, y_step_q4,
-                              w, h);
+        vpx_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x,
+                              x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   }
diff --git a/vpx_dsp/mips/convolve8_vert_dspr2.c b/vpx_dsp/mips/convolve8_vert_dspr2.c
index d553828c59a2b4a51f3cb65f0ba93281aeeff217..ad107d5c47309d8b9540ac0d43fd9464e7052482 100644
--- a/vpx_dsp/mips/convolve8_vert_dspr2.c
+++ b/vpx_dsp/mips/convolve8_vert_dspr2.c
@@ -18,12 +18,9 @@
 #include "vpx_ports/mem.h"
 
 #if HAVE_DSPR2
-static void convolve_vert_4_dspr2(const uint8_t *src,
-                                  int32_t src_stride,
-                                  uint8_t *dst,
-                                  int32_t dst_stride,
-                                  const int16_t *filter_y,
-                                  int32_t w,
+static void convolve_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
+                                  uint8_t *dst, int32_t dst_stride,
+                                  const int16_t *filter_y, int32_t w,
                                   int32_t h) {
   int32_t x, y;
   const uint8_t *src_ptr;
@@ -53,7 +50,7 @@ static void convolve_vert_4_dspr2(const uint8_t *src,
       src_ptr = src + x;
       dst_ptr = dst + x;
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
@@ -152,19 +149,16 @@ static void convolve_vert_4_dspr2(const uint8_t *src,
           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
 
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [load3] "=&r" (load3), [load4] "=&r" (load4),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [n1] "=&r" (n1), [n2] "=&r" (n2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-            [vector4a] "r" (vector4a), [src_stride] "r" (src_stride),
-            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
+          : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+            [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
+            [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
+            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
+            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
+          : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+            [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+            [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
+            [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
     }
 
     /* Next row... */
@@ -173,12 +167,9 @@ static void convolve_vert_4_dspr2(const uint8_t *src,
   }
 }
 
-static void convolve_vert_64_dspr2(const uint8_t *src,
-                                   int32_t src_stride,
-                                   uint8_t *dst,
-                                   int32_t dst_stride,
-                                   const int16_t *filter_y,
-                                   int32_t h) {
+static void convolve_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
+                                   uint8_t *dst, int32_t dst_stride,
+                                   const int16_t *filter_y, int32_t h) {
   int32_t x, y;
   const uint8_t *src_ptr;
   uint8_t *dst_ptr;
@@ -208,7 +199,7 @@ static void convolve_vert_64_dspr2(const uint8_t *src,
       src_ptr = src + x;
       dst_ptr = dst + x;
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
@@ -307,19 +298,16 @@ static void convolve_vert_64_dspr2(const uint8_t *src,
           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
 
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [load3] "=&r" (load3), [load4] "=&r" (load4),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [n1] "=&r" (n1), [n2] "=&r" (n2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-            [vector4a] "r" (vector4a), [src_stride] "r" (src_stride),
-            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
+          : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+            [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
+            [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
+            [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
+            [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
+            [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
+          : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+            [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
+            [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
+            [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
     }
 
     /* Next row... */
@@ -331,50 +319,38 @@ static void convolve_vert_64_dspr2(const uint8_t *src,
 void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4,
-                              int w, int h) {
+                              const int16_t *filter_y, int y_step_q4, int w,
+                              int h) {
   assert(y_step_q4 == 16);
   assert(((const int32_t *)filter_y)[1] != 0x800000);
 
   if (((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve2_vert_dspr2(src, src_stride,
-                             dst, dst_stride,
-                             filter_x, x_step_q4,
-                             filter_y, y_step_q4,
-                             w, h);
+    vpx_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
+                             x_step_q4, filter_y, y_step_q4, w, h);
   } else {
     uint32_t pos = 38;
 
     /* bit positon for extract from acc */
-    __asm__ __volatile__ (
-      "wrdsp      %[pos],     1           \n\t"
-      :
-      : [pos] "r" (pos)
-    );
+    __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                         :
+                         : [pos] "r"(pos));
 
     prefetch_store(dst);
 
     switch (w) {
-      case 4 :
-      case 8 :
-      case 16 :
-      case 32 :
-        convolve_vert_4_dspr2(src, src_stride,
-                              dst, dst_stride,
-                              filter_y, w, h);
+      case 4:
+      case 8:
+      case 16:
+      case 32:
+        convolve_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w, h);
         break;
-      case 64 :
+      case 64:
         prefetch_store(dst + 32);
-        convolve_vert_64_dspr2(src, src_stride,
-                               dst, dst_stride,
-                               filter_y, h);
+        convolve_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h);
         break;
       default:
-        vpx_convolve8_vert_c(src, src_stride,
-                             dst, dst_stride,
-                             filter_x, x_step_q4,
-                             filter_y, y_step_q4,
-                             w, h);
+        vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+                             x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   }
diff --git a/vpx_dsp/mips/convolve_common_dspr2.h b/vpx_dsp/mips/convolve_common_dspr2.h
index 66d77a28544b9619432f646f530943899f320968..4eee3bd5e1580f54648c49ea2b078f7777947d4f 100644
--- a/vpx_dsp/mips/convolve_common_dspr2.h
+++ b/vpx_dsp/mips/convolve_common_dspr2.h
@@ -25,8 +25,8 @@ extern "C" {
 void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                uint8_t *dst, ptrdiff_t dst_stride,
                                const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4,
-                               int w, int h);
+                               const int16_t *filter_y, int y_step_q4, int w,
+                               int h);
 
 void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                    uint8_t *dst, ptrdiff_t dst_stride,
@@ -37,19 +37,18 @@ void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                                   uint8_t *dst, ptrdiff_t dst_stride,
                                   const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4,
-                                  int w, int h);
+                                  const int16_t *filter_y, int y_step_q4, int w,
+                                  int h);
 
-void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                         uint8_t *dst, ptrdiff_t dst_stride,
-                         const int16_t *filter,
-                         int w, int h);
+void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                         ptrdiff_t dst_stride, const int16_t *filter, int w,
+                         int h);
 
 void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4,
-                              int w, int h);
+                              const int16_t *filter_y, int y_step_q4, int w,
+                              int h);
 
 #endif  // #if HAVE_DSPR2
 #ifdef __cplusplus
diff --git a/vpx_dsp/mips/fwd_dct32x32_msa.c b/vpx_dsp/mips/fwd_dct32x32_msa.c
index 2115a348c2e3a41120952cb222636856ed73b407..e06a3dc6baf8b5edd9a0962850c40b2fa9260783 100644
--- a/vpx_dsp/mips/fwd_dct32x32_msa.c
+++ b/vpx_dsp/mips/fwd_dct32x32_msa.c
@@ -27,10 +27,10 @@ static void fdct8x32_1d_column_load_butterfly(const int16_t *input,
   SLLI_4V(in4, in5, in6, in7, 2);
   SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2);
   SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2);
-  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7,
-              step0, step1, step2, step3, in4, in5, in6, in7);
-  BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-              step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
+  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2,
+              step3, in4, in5, in6, in7);
+  BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, step0_1,
+              step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
   ST_SH4(step0, step1, step2, step3, temp_buff, 8);
   ST_SH4(in4, in5, in6, in7, temp_buff + (28 * 8), 8);
   ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (4 * 8), 8);
@@ -45,10 +45,10 @@ static void fdct8x32_1d_column_load_butterfly(const int16_t *input,
   SLLI_4V(in4, in5, in6, in7, 2);
   SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2);
   SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2);
-  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7,
-              step0, step1, step2, step3, in4, in5, in6, in7);
-  BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-              step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
+  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2,
+              step3, in4, in5, in6, in7);
+  BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, step0_1,
+              step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
   ST_SH4(step0, step1, step2, step3, temp_buff + (8 * 8), 8);
   ST_SH4(in4, in5, in6, in7, temp_buff + (20 * 8), 8);
   ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (12 * 8), 8);
@@ -64,12 +64,12 @@ static void fdct8x32_1d_column_even_store(int16_t *input, int16_t *temp) {
   /* fdct even */
   LD_SH4(input, 8, in0, in1, in2, in3);
   LD_SH4(input + 96, 8, in12, in13, in14, in15);
-  BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15,
-              vec0, vec1, vec2, vec3, in12, in13, in14, in15);
+  BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15, vec0, vec1, vec2,
+              vec3, in12, in13, in14, in15);
   LD_SH4(input + 32, 8, in4, in5, in6, in7);
   LD_SH4(input + 64, 8, in8, in9, in10, in11);
-  BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11,
-              vec4, vec5, vec6, vec7, in8, in9, in10, in11);
+  BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11, vec4, vec5, vec6, vec7,
+              in8, in9, in10, in11);
 
   /* Stage 3 */
   ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
@@ -258,28 +258,26 @@ static void fdct8x32_1d_row_load_butterfly(int16_t *temp_buff,
 
   LD_SH8(temp_buff, 32, in0, in1, in2, in3, in4, in5, in6, in7);
   LD_SH8(temp_buff + 24, 32, in8, in9, in10, in11, in12, in13, in14, in15);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
-                     in8, in9, in10, in11, in12, in13, in14, in15);
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               step0, step1, step2, step3, step4, step5, step6, step7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+                     in10, in11, in12, in13, in14, in15);
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
+               in12, in13, in14, in15, step0, step1, step2, step3, step4, step5,
+               step6, step7, in8, in9, in10, in11, in12, in13, in14, in15);
   ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, output, 8);
   ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 24 * 8), 8);
 
   /* 2nd set */
   LD_SH8(temp_buff + 8, 32, in0, in1, in2, in3, in4, in5, in6, in7);
   LD_SH8(temp_buff + 16, 32, in8, in9, in10, in11, in12, in13, in14, in15);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
-                     in8, in9, in10, in11, in12, in13, in14, in15);
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               step0, step1, step2, step3, step4, step5, step6, step7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+                     in10, in11, in12, in13, in14, in15);
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
+               in12, in13, in14, in15, step0, step1, step2, step3, step4, step5,
+               step6, step7, in8, in9, in10, in11, in12, in13, in14, in15);
   ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7,
          (output + 8 * 8), 8);
   ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 16 * 8), 8);
@@ -299,10 +297,9 @@ static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr,
   LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
   LD_SH8(input + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
 
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
+               in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4, vec5, vec6,
+               vec7, in8, in9, in10, in11, in12, in13, in14, in15);
   ST_SH8(vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, interm_ptr, 8);
   ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, interm_ptr + 64, 8);
 
@@ -315,19 +312,19 @@ static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr,
   UNPCK_SH_SW(vec5, vec5_l, vec5_r);
   UNPCK_SH_SW(vec6, vec6_l, vec6_r);
   UNPCK_SH_SW(vec7, vec7_l, vec7_r);
-  ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r,
-       tmp0_w, tmp1_w, tmp2_w, tmp3_w);
+  ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r, tmp0_w,
+       tmp1_w, tmp2_w, tmp3_w);
   BUTTERFLY_4(tmp0_w, tmp1_w, tmp2_w, tmp3_w, vec4_r, vec6_r, vec7_r, vec5_r);
-  ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l,
-       vec0_r, vec1_r, vec2_r, vec3_r);
+  ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l, vec0_r,
+       vec1_r, vec2_r, vec3_r);
 
   tmp3_w = vec0_r + vec3_r;
   vec0_r = vec0_r - vec3_r;
   vec3_r = vec1_r + vec2_r;
   vec1_r = vec1_r - vec2_r;
 
-  DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64,
-                    cospi_16_64, vec4_r, tmp3_w, vec6_r, vec3_r);
+  DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64, cospi_16_64,
+                    vec4_r, tmp3_w, vec6_r, vec3_r);
   FDCT32_POSTPROC_NEG_W(vec4_r);
   FDCT32_POSTPROC_NEG_W(tmp3_w);
   FDCT32_POSTPROC_NEG_W(vec6_r);
@@ -335,8 +332,8 @@ static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr,
   PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
   ST_SH2(vec5, vec4, out, 8);
 
-  DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64,
-                    cospi_8_64, vec4_r, tmp3_w, vec6_r, vec3_r);
+  DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, cospi_8_64,
+                    vec4_r, tmp3_w, vec6_r, vec3_r);
   FDCT32_POSTPROC_NEG_W(vec4_r);
   FDCT32_POSTPROC_NEG_W(tmp3_w);
   FDCT32_POSTPROC_NEG_W(vec6_r);
@@ -401,10 +398,9 @@ static void fdct8x32_1d_row_even(int16_t *temp, int16_t *out) {
   LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7);
   LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
 
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
+               in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4, vec5, vec6,
+               vec7, in8, in9, in10, in11, in12, in13, in14, in15);
 
   /* Stage 3 */
   ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
@@ -610,8 +606,8 @@ static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) {
   in3 = LD_SH(temp + 192);
   in5 = LD_SH(temp + 216);
 
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
 
   /* 2nd set */
   in0_1 = LD_SH(temp + 16);
@@ -637,10 +633,10 @@ static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) {
   in6 = LD_SH(temp + 104);
   in7 = LD_SH(temp + 144);
 
-  ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-         output + 8, 32);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+  ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, output + 8,
+         32);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
   ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output + 16, 32);
 
   /* 4th set */
@@ -655,12 +651,11 @@ static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) {
 
   TRANSPOSE8x8_SH_SH(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
                      in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1);
-  ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-         output + 24, 32);
+  ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, output + 24,
+         32);
 }
 
-static void fdct32x8_1d_row(int16_t *temp, int16_t *temp_buf,
-                            int16_t *output) {
+static void fdct32x8_1d_row(int16_t *temp, int16_t *temp_buf, int16_t *output) {
   fdct8x32_1d_row_load_butterfly(temp, temp_buf);
   fdct8x32_1d_row_even(temp_buf, temp_buf);
   fdct8x32_1d_row_odd(temp_buf + 128, temp, temp_buf + 128);
@@ -706,10 +701,9 @@ static void fdct8x32_1d_row_even_rd(int16_t *temp, int16_t *out) {
   LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7);
   LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
 
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
+               in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4, vec5, vec6,
+               vec7, in8, in9, in10, in11, in12, in13, in14, in15);
   FDCT_POSTPROC_2V_NEG_H(vec0, vec1);
   FDCT_POSTPROC_2V_NEG_H(vec2, vec3);
   FDCT_POSTPROC_2V_NEG_H(vec4, vec5);
diff --git a/vpx_dsp/mips/fwd_txfm_msa.c b/vpx_dsp/mips/fwd_txfm_msa.c
index 5df40f9fba1510616dbc7f6909c034f6440eb99d..c0ca061c4d6c8ae5a7f78b46c00212fae700b399 100644
--- a/vpx_dsp/mips/fwd_txfm_msa.c
+++ b/vpx_dsp/mips/fwd_txfm_msa.c
@@ -18,24 +18,24 @@ void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr,
   v8i16 stp21, stp22, stp23, stp24, stp25, stp26, stp30;
   v8i16 stp31, stp32, stp33, stp34, stp35, stp36, stp37;
   v8i16 vec0, vec1, vec2, vec3, vec4, vec5, cnst0, cnst1, cnst4, cnst5;
-  v8i16 coeff = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64,
-                 -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 };
-  v8i16 coeff1 = { cospi_2_64, cospi_30_64, cospi_14_64, cospi_18_64,
-                   cospi_10_64, cospi_22_64, cospi_6_64, cospi_26_64 };
-  v8i16 coeff2 = { -cospi_2_64, -cospi_10_64, -cospi_18_64, -cospi_26_64,
-                   0, 0, 0, 0 };
-
-  LD_SH16(input, src_stride,
-          in0, in1, in2, in3, in4, in5, in6, in7,
-          in8, in9, in10, in11, in12, in13, in14, in15);
+  v8i16 coeff = { cospi_16_64, -cospi_16_64, cospi_8_64,  cospi_24_64,
+                  -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 };
+  v8i16 coeff1 = { cospi_2_64,  cospi_30_64, cospi_14_64, cospi_18_64,
+                   cospi_10_64, cospi_22_64, cospi_6_64,  cospi_26_64 };
+  v8i16 coeff2 = {
+    -cospi_2_64, -cospi_10_64, -cospi_18_64, -cospi_26_64, 0, 0, 0, 0
+  };
+
+  LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9,
+          in10, in11, in12, in13, in14, in15);
   SLLI_4V(in0, in1, in2, in3, 2);
   SLLI_4V(in4, in5, in6, in7, 2);
   SLLI_4V(in8, in9, in10, in11, 2);
   SLLI_4V(in12, in13, in14, in15, 2);
   ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3);
   ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7);
-  FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
-                tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+  FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1,
+                tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
   ST_SH8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp_ptr, 32);
   SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12);
   SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8);
@@ -137,10 +137,10 @@ void fdct16x8_1d_row(int16_t *input, int16_t *output) {
 
   LD_SH8(input, 16, in0, in1, in2, in3, in4, in5, in6, in7);
   LD_SH8((input + 8), 16, in8, in9, in10, in11, in12, in13, in14, in15);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
-                     in8, in9, in10, in11, in12, in13, in14, in15);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+                     in10, in11, in12, in13, in14, in15);
   ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3);
   ADD4(in4, 1, in5, 1, in6, 1, in7, 1, in4, in5, in6, in7);
   ADD4(in8, 1, in9, 1, in10, 1, in11, 1, in8, in9, in10, in11);
@@ -150,19 +150,19 @@ void fdct16x8_1d_row(int16_t *input, int16_t *output) {
   SRA_4V(in8, in9, in10, in11, 2);
   SRA_4V(in12, in13, in14, in15, 2);
   BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
-               in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5,
-               tmp6, tmp7, in8, in9, in10, in11, in12, in13, in14, in15);
+               in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6,
+               tmp7, in8, in9, in10, in11, in12, in13, in14, in15);
   ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, input, 16);
-  FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
-                tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+  FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1,
+                tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
   LD_SH8(input, 16, in8, in9, in10, in11, in12, in13, in14, in15);
-  FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15,
-                   in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3,
-                     tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3);
+  FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3,
+               in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, tmp0, in0,
+                     tmp1, in1, tmp2, in2, tmp3, in3);
   ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, output, 16);
-  TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7,
-                     tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7);
+  TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4,
+                     tmp5, in5, tmp6, in6, tmp7, in7);
   ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, output + 8, 16);
 }
 
@@ -203,14 +203,14 @@ void vpx_fdct8x8_msa(const int16_t *input, int16_t *output,
   LD_SH8(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7);
   SLLI_4V(in0, in1, in2, in3, 2);
   SLLI_4V(in4, in5, in6, in7, 2);
-  VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-            in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-  VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-            in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+  VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+            in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
+  VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+            in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
   SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
   ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
 }
diff --git a/vpx_dsp/mips/fwd_txfm_msa.h b/vpx_dsp/mips/fwd_txfm_msa.h
index d7bb316d5be812eb01b8441b22f5ffde6968678e..6458dec6d20f3ebc11e78675b41d463ca0e8a5c2 100644
--- a/vpx_dsp/mips/fwd_txfm_msa.h
+++ b/vpx_dsp/mips/fwd_txfm_msa.h
@@ -14,358 +14,365 @@
 #include "vpx_dsp/mips/txfm_macros_msa.h"
 #include "vpx_dsp/txfm_common.h"
 
-#define LD_HADD(psrc, stride) ({                                      \
-  v8i16 in0_m, in1_m, in2_m, in3_m, in4_m, in5_m, in6_m, in7_m;       \
-  v4i32 vec_w_m;                                                      \
-                                                                      \
-  LD_SH4((psrc), stride, in0_m, in1_m, in2_m, in3_m);                 \
-  ADD2(in0_m, in1_m, in2_m, in3_m, in0_m, in2_m);                     \
-  LD_SH4(((psrc) + 4 * stride), stride, in4_m, in5_m, in6_m, in7_m);  \
-  ADD4(in4_m, in5_m, in6_m, in7_m, in0_m, in2_m, in4_m, in6_m,        \
-       in4_m, in6_m, in0_m, in4_m);                                   \
-  in0_m += in4_m;                                                     \
-                                                                      \
-  vec_w_m = __msa_hadd_s_w(in0_m, in0_m);                             \
-  HADD_SW_S32(vec_w_m);                                               \
-})
+#define LD_HADD(psrc, stride)                                                  \
+  ({                                                                           \
+    v8i16 in0_m, in1_m, in2_m, in3_m, in4_m, in5_m, in6_m, in7_m;              \
+    v4i32 vec_w_m;                                                             \
+                                                                               \
+    LD_SH4((psrc), stride, in0_m, in1_m, in2_m, in3_m);                        \
+    ADD2(in0_m, in1_m, in2_m, in3_m, in0_m, in2_m);                            \
+    LD_SH4(((psrc) + 4 * stride), stride, in4_m, in5_m, in6_m, in7_m);         \
+    ADD4(in4_m, in5_m, in6_m, in7_m, in0_m, in2_m, in4_m, in6_m, in4_m, in6_m, \
+         in0_m, in4_m);                                                        \
+    in0_m += in4_m;                                                            \
+                                                                               \
+    vec_w_m = __msa_hadd_s_w(in0_m, in0_m);                                    \
+    HADD_SW_S32(vec_w_m);                                                      \
+  })
 
-#define VPX_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3) {     \
-  v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m;                         \
-  v8i16 vec0_m, vec1_m, vec2_m, vec3_m;                             \
-  v4i32 vec4_m, vec5_m, vec6_m, vec7_m;                             \
-  v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64,          \
-                    cospi_24_64, -cospi_8_64, 0, 0, 0 };            \
-                                                                    \
-  BUTTERFLY_4(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m);  \
-  ILVR_H2_SH(vec1_m, vec0_m, vec3_m, vec2_m, vec0_m, vec2_m);       \
-  SPLATI_H2_SH(coeff_m, 0, 1, cnst0_m, cnst1_m);                    \
-  cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                        \
-  vec5_m = __msa_dotp_s_w(vec0_m, cnst1_m);                         \
-                                                                    \
-  SPLATI_H2_SH(coeff_m, 4, 3, cnst2_m, cnst3_m);                    \
-  cnst2_m = __msa_ilvev_h(cnst3_m, cnst2_m);                        \
-  vec7_m = __msa_dotp_s_w(vec2_m, cnst2_m);                         \
-                                                                    \
-  vec4_m = __msa_dotp_s_w(vec0_m, cnst0_m);                         \
-  cnst2_m = __msa_splati_h(coeff_m, 2);                             \
-  cnst2_m = __msa_ilvev_h(cnst2_m, cnst3_m);                        \
-  vec6_m = __msa_dotp_s_w(vec2_m, cnst2_m);                         \
-                                                                    \
-  SRARI_W4_SW(vec4_m, vec5_m, vec6_m, vec7_m, DCT_CONST_BITS);      \
-  PCKEV_H4_SH(vec4_m, vec4_m, vec5_m, vec5_m, vec6_m, vec6_m,       \
-              vec7_m, vec7_m, out0, out2, out1, out3);              \
-}
+#define VPX_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3)                  \
+  {                                                                            \
+    v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m;                                  \
+    v8i16 vec0_m, vec1_m, vec2_m, vec3_m;                                      \
+    v4i32 vec4_m, vec5_m, vec6_m, vec7_m;                                      \
+    v8i16 coeff_m = {                                                          \
+      cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, -cospi_8_64, 0, 0, 0 \
+    };                                                                         \
+                                                                               \
+    BUTTERFLY_4(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m);           \
+    ILVR_H2_SH(vec1_m, vec0_m, vec3_m, vec2_m, vec0_m, vec2_m);                \
+    SPLATI_H2_SH(coeff_m, 0, 1, cnst0_m, cnst1_m);                             \
+    cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                                 \
+    vec5_m = __msa_dotp_s_w(vec0_m, cnst1_m);                                  \
+                                                                               \
+    SPLATI_H2_SH(coeff_m, 4, 3, cnst2_m, cnst3_m);                             \
+    cnst2_m = __msa_ilvev_h(cnst3_m, cnst2_m);                                 \
+    vec7_m = __msa_dotp_s_w(vec2_m, cnst2_m);                                  \
+                                                                               \
+    vec4_m = __msa_dotp_s_w(vec0_m, cnst0_m);                                  \
+    cnst2_m = __msa_splati_h(coeff_m, 2);                                      \
+    cnst2_m = __msa_ilvev_h(cnst2_m, cnst3_m);                                 \
+    vec6_m = __msa_dotp_s_w(vec2_m, cnst2_m);                                  \
+                                                                               \
+    SRARI_W4_SW(vec4_m, vec5_m, vec6_m, vec7_m, DCT_CONST_BITS);               \
+    PCKEV_H4_SH(vec4_m, vec4_m, vec5_m, vec5_m, vec6_m, vec6_m, vec7_m,        \
+                vec7_m, out0, out2, out1, out3);                               \
+  }
 
-#define SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7) {        \
-  v8i16 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;  \
-                                                                         \
-  SRLI_H4_SH(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m, 15);    \
-  SRLI_H4_SH(in4, in5, in6, in7, vec4_m, vec5_m, vec6_m, vec7_m, 15);    \
-  AVE_SH4_SH(vec0_m, in0, vec1_m, in1, vec2_m, in2, vec3_m, in3,         \
-             in0, in1, in2, in3);                                        \
-  AVE_SH4_SH(vec4_m, in4, vec5_m, in5, vec6_m, in6, vec7_m, in7,         \
-             in4, in5, in6, in7);                                        \
-}
+#define SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7)              \
+  {                                                                          \
+    v8i16 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;    \
+                                                                             \
+    SRLI_H4_SH(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m, 15);      \
+    SRLI_H4_SH(in4, in5, in6, in7, vec4_m, vec5_m, vec6_m, vec7_m, 15);      \
+    AVE_SH4_SH(vec0_m, in0, vec1_m, in1, vec2_m, in2, vec3_m, in3, in0, in1, \
+               in2, in3);                                                    \
+    AVE_SH4_SH(vec4_m, in4, vec5_m, in5, vec6_m, in6, vec7_m, in7, in4, in5, \
+               in6, in7);                                                    \
+  }
 
-#define VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,            \
-                  out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m;                    \
-  v8i16 s7_m, x0_m, x1_m, x2_m, x3_m;                                \
-  v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64,           \
-                    cospi_24_64, cospi_4_64, cospi_28_64,            \
-                    cospi_12_64, cospi_20_64 };                      \
-                                                                     \
-  /* FDCT stage1 */                                                  \
-  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7,                \
-              s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m);       \
-  BUTTERFLY_4(s0_m, s1_m, s2_m, s3_m, x0_m, x1_m, x2_m, x3_m);       \
-  ILVL_H2_SH(x1_m, x0_m, x3_m, x2_m, s0_m, s2_m);                    \
-  ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m);                    \
-  SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m);                           \
-  x1_m = __msa_ilvev_h(x1_m, x0_m);                                  \
-  out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m);                    \
-                                                                     \
-  SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m);                           \
-  x2_m = -x2_m;                                                      \
-  x2_m = __msa_ilvev_h(x3_m, x2_m);                                  \
-  out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m);                    \
-                                                                     \
-  out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m);                    \
-  x2_m = __msa_splati_h(coeff_m, 2);                                 \
-  x2_m = __msa_ilvev_h(x2_m, x3_m);                                  \
-  out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m);                    \
-                                                                     \
-  /* stage2 */                                                       \
-  ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m);                               \
-                                                                     \
-  s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m);                    \
-  s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m);                    \
-                                                                     \
-  /* stage3 */                                                       \
-  BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m);       \
-                                                                     \
-  /* stage4 */                                                       \
-  ILVL_H2_SH(x3_m, x0_m, x2_m, x1_m, s4_m, s6_m);                    \
-  ILVR_H2_SH(x3_m, x0_m, x2_m, x1_m, s5_m, s7_m);                    \
-                                                                     \
-  SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m);                           \
-  x1_m = __msa_ilvev_h(x0_m, x1_m);                                  \
-  out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m);                    \
-                                                                     \
-  SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m);                           \
-  x2_m = __msa_ilvev_h(x3_m, x2_m);                                  \
-  out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m);                    \
-                                                                     \
-  x1_m = __msa_splati_h(coeff_m, 5);                                 \
-  x0_m = -x0_m;                                                      \
-  x0_m = __msa_ilvev_h(x1_m, x0_m);                                  \
-  out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m);                    \
-                                                                     \
-  x2_m = __msa_splati_h(coeff_m, 6);                                 \
-  x3_m = -x3_m;                                                      \
-  x2_m = __msa_ilvev_h(x2_m, x3_m);                                  \
-  out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m);                    \
-}
+#define VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
+                  out3, out4, out5, out6, out7)                              \
+  {                                                                          \
+    v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m;                          \
+    v8i16 s7_m, x0_m, x1_m, x2_m, x3_m;                                      \
+    v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64,  cospi_24_64,   \
+                      cospi_4_64,  cospi_28_64,  cospi_12_64, cospi_20_64 }; \
+                                                                             \
+    /* FDCT stage1 */                                                        \
+    BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, s0_m, s1_m, s2_m,    \
+                s3_m, s4_m, s5_m, s6_m, s7_m);                               \
+    BUTTERFLY_4(s0_m, s1_m, s2_m, s3_m, x0_m, x1_m, x2_m, x3_m);             \
+    ILVL_H2_SH(x1_m, x0_m, x3_m, x2_m, s0_m, s2_m);                          \
+    ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m);                          \
+    SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m);                                 \
+    x1_m = __msa_ilvev_h(x1_m, x0_m);                                        \
+    out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m);                          \
+                                                                             \
+    SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m);                                 \
+    x2_m = -x2_m;                                                            \
+    x2_m = __msa_ilvev_h(x3_m, x2_m);                                        \
+    out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m);                          \
+                                                                             \
+    out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m);                          \
+    x2_m = __msa_splati_h(coeff_m, 2);                                       \
+    x2_m = __msa_ilvev_h(x2_m, x3_m);                                        \
+    out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m);                          \
+                                                                             \
+    /* stage2 */                                                             \
+    ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m);                                     \
+                                                                             \
+    s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m);                          \
+    s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m);                          \
+                                                                             \
+    /* stage3 */                                                             \
+    BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m);             \
+                                                                             \
+    /* stage4 */                                                             \
+    ILVL_H2_SH(x3_m, x0_m, x2_m, x1_m, s4_m, s6_m);                          \
+    ILVR_H2_SH(x3_m, x0_m, x2_m, x1_m, s5_m, s7_m);                          \
+                                                                             \
+    SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m);                                 \
+    x1_m = __msa_ilvev_h(x0_m, x1_m);                                        \
+    out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m);                          \
+                                                                             \
+    SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m);                                 \
+    x2_m = __msa_ilvev_h(x3_m, x2_m);                                        \
+    out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m);                          \
+                                                                             \
+    x1_m = __msa_splati_h(coeff_m, 5);                                       \
+    x0_m = -x0_m;                                                            \
+    x0_m = __msa_ilvev_h(x1_m, x0_m);                                        \
+    out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m);                          \
+                                                                             \
+    x2_m = __msa_splati_h(coeff_m, 6);                                       \
+    x3_m = -x3_m;                                                            \
+    x2_m = __msa_ilvev_h(x2_m, x3_m);                                        \
+    out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m);                          \
+  }
 
-#define FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7,                \
-                      out0, out1, out2, out3, out4, out5, out6, out7) {      \
-  v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m;                      \
-  v8i16 x0_m, x1_m, x2_m, x3_m;                                              \
-  v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64,      \
-                    cospi_4_64, cospi_28_64, cospi_12_64, cospi_20_64 };     \
+#define FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1,    \
+                      out2, out3, out4, out5, out6, out7)                    \
+  {                                                                          \
+    v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m;                    \
+    v8i16 x0_m, x1_m, x2_m, x3_m;                                            \
+    v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64,  cospi_24_64,   \
+                      cospi_4_64,  cospi_28_64,  cospi_12_64, cospi_20_64 }; \
                                                                              \
-  /* FDCT stage1 */                                                          \
-  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7,                        \
-              s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m);               \
-  BUTTERFLY_4(s0_m, s1_m, s2_m, s3_m, x0_m, x1_m, x2_m, x3_m);               \
-  ILVL_H2_SH(x1_m, x0_m, x3_m, x2_m, s0_m, s2_m);                            \
-  ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m);                            \
-  SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m);                                   \
-  x1_m = __msa_ilvev_h(x1_m, x0_m);                                          \
-  out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m);                            \
+    /* FDCT stage1 */                                                        \
+    BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, s0_m, s1_m, s2_m,    \
+                s3_m, s4_m, s5_m, s6_m, s7_m);                               \
+    BUTTERFLY_4(s0_m, s1_m, s2_m, s3_m, x0_m, x1_m, x2_m, x3_m);             \
+    ILVL_H2_SH(x1_m, x0_m, x3_m, x2_m, s0_m, s2_m);                          \
+    ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m);                          \
+    SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m);                                 \
+    x1_m = __msa_ilvev_h(x1_m, x0_m);                                        \
+    out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m);                          \
                                                                              \
-  SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m);                                   \
-  x2_m = -x2_m;                                                              \
-  x2_m = __msa_ilvev_h(x3_m, x2_m);                                          \
-  out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m);                            \
+    SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m);                                 \
+    x2_m = -x2_m;                                                            \
+    x2_m = __msa_ilvev_h(x3_m, x2_m);                                        \
+    out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m);                          \
                                                                              \
-  out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m);                            \
-  x2_m = __msa_splati_h(coeff_m, 2);                                         \
-  x2_m = __msa_ilvev_h(x2_m, x3_m);                                          \
-  out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m);                            \
+    out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m);                          \
+    x2_m = __msa_splati_h(coeff_m, 2);                                       \
+    x2_m = __msa_ilvev_h(x2_m, x3_m);                                        \
+    out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m);                          \
                                                                              \
-  /* stage2 */                                                               \
-  ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m);                                       \
+    /* stage2 */                                                             \
+    ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m);                                     \
                                                                              \
-  s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m);                            \
-  s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m);                            \
+    s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m);                          \
+    s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m);                          \
                                                                              \
-  /* stage3 */                                                               \
-  BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m);               \
+    /* stage3 */                                                             \
+    BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m);             \
                                                                              \
-  /* stage4 */                                                               \
-  ILVL_H2_SH(x3_m, x0_m, x2_m, x1_m, s4_m, s6_m);                            \
-  ILVR_H2_SH(x3_m, x0_m, x2_m, x1_m, s5_m, s7_m);                            \
+    /* stage4 */                                                             \
+    ILVL_H2_SH(x3_m, x0_m, x2_m, x1_m, s4_m, s6_m);                          \
+    ILVR_H2_SH(x3_m, x0_m, x2_m, x1_m, s5_m, s7_m);                          \
                                                                              \
-  SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m);                                   \
-  x1_m = __msa_ilvev_h(x0_m, x1_m);                                          \
-  out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m);                            \
+    SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m);                                 \
+    x1_m = __msa_ilvev_h(x0_m, x1_m);                                        \
+    out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m);                          \
                                                                              \
-  SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m);                                   \
-  x2_m = __msa_ilvev_h(x3_m, x2_m);                                          \
-  out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m);                            \
+    SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m);                                 \
+    x2_m = __msa_ilvev_h(x3_m, x2_m);                                        \
+    out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m);                          \
                                                                              \
-  x1_m = __msa_splati_h(coeff_m, 5);                                         \
-  x0_m = -x0_m;                                                              \
-  x0_m = __msa_ilvev_h(x1_m, x0_m);                                          \
-  out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m);                            \
+    x1_m = __msa_splati_h(coeff_m, 5);                                       \
+    x0_m = -x0_m;                                                            \
+    x0_m = __msa_ilvev_h(x1_m, x0_m);                                        \
+    out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m);                          \
                                                                              \
-  x2_m = __msa_splati_h(coeff_m, 6);                                         \
-  x3_m = -x3_m;                                                              \
-  x2_m = __msa_ilvev_h(x2_m, x3_m);                                          \
-  out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m);                            \
-}
+    x2_m = __msa_splati_h(coeff_m, 6);                                       \
+    x3_m = -x3_m;                                                            \
+    x2_m = __msa_ilvev_h(x2_m, x3_m);                                        \
+    out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m);                          \
+  }
 
-#define FDCT8x16_ODD(input0, input1, input2, input3,               \
-                     input4, input5, input6, input7,               \
-                     out1, out3, out5, out7,                       \
-                     out9, out11, out13, out15) {                  \
-  v8i16 stp21_m, stp22_m, stp23_m, stp24_m, stp25_m, stp26_m;      \
-  v8i16 stp30_m, stp31_m, stp32_m, stp33_m, stp34_m, stp35_m;      \
-  v8i16 stp36_m, stp37_m, vec0_m, vec1_m;                          \
-  v8i16 vec2_m, vec3_m, vec4_m, vec5_m, vec6_m;                    \
-  v8i16 cnst0_m, cnst1_m, cnst4_m, cnst5_m;                        \
-  v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64,         \
-                    cospi_24_64, -cospi_8_64, -cospi_24_64,        \
-                    cospi_12_64, cospi_20_64 };                    \
-  v8i16 coeff1_m = { cospi_2_64, cospi_30_64, cospi_14_64,         \
-                     cospi_18_64, cospi_10_64, cospi_22_64,        \
-                     cospi_6_64, cospi_26_64 };                    \
-  v8i16 coeff2_m = { -cospi_2_64, -cospi_10_64, -cospi_18_64,      \
-                     -cospi_26_64, 0, 0, 0, 0 };                   \
-                                                                   \
-  /* stp 1 */                                                      \
-  ILVL_H2_SH(input2, input5, input3, input4, vec2_m, vec4_m);      \
-  ILVR_H2_SH(input2, input5, input3, input4, vec3_m, vec5_m);      \
-                                                                   \
-  cnst4_m = __msa_splati_h(coeff_m, 0);                            \
-  stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst4_m);        \
-                                                                   \
-  cnst5_m = __msa_splati_h(coeff_m, 1);                            \
-  cnst5_m = __msa_ilvev_h(cnst5_m, cnst4_m);                       \
-  stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst5_m);        \
-  stp24_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst4_m);        \
-  stp23_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst5_m);        \
-                                                                   \
-  /* stp2 */                                                       \
-  BUTTERFLY_4(input0, input1, stp22_m, stp23_m,                    \
-              stp30_m, stp31_m, stp32_m, stp33_m);                 \
-  BUTTERFLY_4(input7, input6, stp25_m, stp24_m,                    \
-              stp37_m, stp36_m, stp35_m, stp34_m);                 \
-                                                                   \
-  ILVL_H2_SH(stp36_m, stp31_m, stp35_m, stp32_m, vec2_m, vec4_m);  \
-  ILVR_H2_SH(stp36_m, stp31_m, stp35_m, stp32_m, vec3_m, vec5_m);  \
-                                                                   \
-  SPLATI_H2_SH(coeff_m, 2, 3, cnst0_m, cnst1_m);                   \
-  cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                       \
-  stp26_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m);        \
-                                                                   \
-  cnst0_m = __msa_splati_h(coeff_m, 4);                            \
-  cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                       \
-  stp21_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m);        \
-                                                                   \
-  SPLATI_H2_SH(coeff_m, 5, 2, cnst0_m, cnst1_m);                   \
-  cnst1_m = __msa_ilvev_h(cnst0_m, cnst1_m);                       \
-  stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m);        \
-                                                                   \
-  cnst0_m = __msa_splati_h(coeff_m, 3);                            \
-  cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                       \
-  stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m);        \
-                                                                   \
-  /* stp4 */                                                       \
-  BUTTERFLY_4(stp30_m, stp37_m, stp26_m, stp21_m,                  \
-              vec6_m, vec2_m, vec4_m, vec5_m);                     \
-  BUTTERFLY_4(stp33_m, stp34_m, stp25_m, stp22_m,                  \
-              stp21_m, stp23_m, stp24_m, stp31_m);                 \
-                                                                   \
-  ILVRL_H2_SH(vec2_m, vec6_m, vec1_m, vec0_m);                     \
-  SPLATI_H2_SH(coeff1_m, 0, 1, cnst0_m, cnst1_m);                  \
-  cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                       \
-                                                                   \
-  out1 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);           \
-                                                                   \
-  cnst0_m = __msa_splati_h(coeff2_m, 0);                           \
-  cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                       \
-  out15 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);          \
-                                                                   \
-  ILVRL_H2_SH(vec4_m, vec5_m, vec1_m, vec0_m);                     \
-  SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m);                  \
-  cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                       \
-                                                                   \
-  out9 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);           \
-                                                                   \
-  cnst1_m = __msa_splati_h(coeff2_m, 2);                           \
-  cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                       \
-  out7 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);           \
-                                                                   \
-  ILVRL_H2_SH(stp23_m, stp21_m, vec1_m, vec0_m);                   \
-  SPLATI_H2_SH(coeff1_m, 4, 5, cnst0_m, cnst1_m);                  \
-  cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                       \
-  out5 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);           \
-                                                                   \
-  cnst0_m = __msa_splati_h(coeff2_m, 1);                           \
-  cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                       \
-  out11 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);          \
-                                                                   \
-  ILVRL_H2_SH(stp24_m, stp31_m, vec1_m, vec0_m);                   \
-  SPLATI_H2_SH(coeff1_m, 6, 7, cnst0_m, cnst1_m);                  \
-  cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                       \
-                                                                   \
-  out13 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);          \
-                                                                   \
-  cnst1_m = __msa_splati_h(coeff2_m, 3);                           \
-  cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                       \
-  out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);           \
-}
+#define FDCT8x16_ODD(input0, input1, input2, input3, input4, input5, input6,   \
+                     input7, out1, out3, out5, out7, out9, out11, out13,       \
+                     out15)                                                    \
+  {                                                                            \
+    v8i16 stp21_m, stp22_m, stp23_m, stp24_m, stp25_m, stp26_m;                \
+    v8i16 stp30_m, stp31_m, stp32_m, stp33_m, stp34_m, stp35_m;                \
+    v8i16 stp36_m, stp37_m, vec0_m, vec1_m;                                    \
+    v8i16 vec2_m, vec3_m, vec4_m, vec5_m, vec6_m;                              \
+    v8i16 cnst0_m, cnst1_m, cnst4_m, cnst5_m;                                  \
+    v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64,  cospi_24_64,     \
+                      -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 };   \
+    v8i16 coeff1_m = { cospi_2_64,  cospi_30_64, cospi_14_64, cospi_18_64,     \
+                       cospi_10_64, cospi_22_64, cospi_6_64,  cospi_26_64 };   \
+    v8i16 coeff2_m = {                                                         \
+      -cospi_2_64, -cospi_10_64, -cospi_18_64, -cospi_26_64, 0, 0, 0, 0        \
+    };                                                                         \
+                                                                               \
+    /* stp 1 */                                                                \
+    ILVL_H2_SH(input2, input5, input3, input4, vec2_m, vec4_m);                \
+    ILVR_H2_SH(input2, input5, input3, input4, vec3_m, vec5_m);                \
+                                                                               \
+    cnst4_m = __msa_splati_h(coeff_m, 0);                                      \
+    stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst4_m);                  \
+                                                                               \
+    cnst5_m = __msa_splati_h(coeff_m, 1);                                      \
+    cnst5_m = __msa_ilvev_h(cnst5_m, cnst4_m);                                 \
+    stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst5_m);                  \
+    stp24_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst4_m);                  \
+    stp23_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst5_m);                  \
+                                                                               \
+    /* stp2 */                                                                 \
+    BUTTERFLY_4(input0, input1, stp22_m, stp23_m, stp30_m, stp31_m, stp32_m,   \
+                stp33_m);                                                      \
+    BUTTERFLY_4(input7, input6, stp25_m, stp24_m, stp37_m, stp36_m, stp35_m,   \
+                stp34_m);                                                      \
+                                                                               \
+    ILVL_H2_SH(stp36_m, stp31_m, stp35_m, stp32_m, vec2_m, vec4_m);            \
+    ILVR_H2_SH(stp36_m, stp31_m, stp35_m, stp32_m, vec3_m, vec5_m);            \
+                                                                               \
+    SPLATI_H2_SH(coeff_m, 2, 3, cnst0_m, cnst1_m);                             \
+    cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                                 \
+    stp26_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m);                  \
+                                                                               \
+    cnst0_m = __msa_splati_h(coeff_m, 4);                                      \
+    cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                                 \
+    stp21_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m);                  \
+                                                                               \
+    SPLATI_H2_SH(coeff_m, 5, 2, cnst0_m, cnst1_m);                             \
+    cnst1_m = __msa_ilvev_h(cnst0_m, cnst1_m);                                 \
+    stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m);                  \
+                                                                               \
+    cnst0_m = __msa_splati_h(coeff_m, 3);                                      \
+    cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                                 \
+    stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m);                  \
+                                                                               \
+    /* stp4 */                                                                 \
+    BUTTERFLY_4(stp30_m, stp37_m, stp26_m, stp21_m, vec6_m, vec2_m, vec4_m,    \
+                vec5_m);                                                       \
+    BUTTERFLY_4(stp33_m, stp34_m, stp25_m, stp22_m, stp21_m, stp23_m, stp24_m, \
+                stp31_m);                                                      \
+                                                                               \
+    ILVRL_H2_SH(vec2_m, vec6_m, vec1_m, vec0_m);                               \
+    SPLATI_H2_SH(coeff1_m, 0, 1, cnst0_m, cnst1_m);                            \
+    cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                                 \
+                                                                               \
+    out1 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                     \
+                                                                               \
+    cnst0_m = __msa_splati_h(coeff2_m, 0);                                     \
+    cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                                 \
+    out15 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                    \
+                                                                               \
+    ILVRL_H2_SH(vec4_m, vec5_m, vec1_m, vec0_m);                               \
+    SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m);                            \
+    cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                                 \
+                                                                               \
+    out9 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);                     \
+                                                                               \
+    cnst1_m = __msa_splati_h(coeff2_m, 2);                                     \
+    cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                                 \
+    out7 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                     \
+                                                                               \
+    ILVRL_H2_SH(stp23_m, stp21_m, vec1_m, vec0_m);                             \
+    SPLATI_H2_SH(coeff1_m, 4, 5, cnst0_m, cnst1_m);                            \
+    cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                                 \
+    out5 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                     \
+                                                                               \
+    cnst0_m = __msa_splati_h(coeff2_m, 1);                                     \
+    cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                                 \
+    out11 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                    \
+                                                                               \
+    ILVRL_H2_SH(stp24_m, stp31_m, vec1_m, vec0_m);                             \
+    SPLATI_H2_SH(coeff1_m, 6, 7, cnst0_m, cnst1_m);                            \
+    cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                                 \
+                                                                               \
+    out13 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);                    \
+                                                                               \
+    cnst1_m = __msa_splati_h(coeff2_m, 3);                                     \
+    cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m);                                 \
+    out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                     \
+  }
 
-#define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) {      \
-  v8i16 tp0_m, tp1_m;                             \
-  v8i16 one_m = __msa_ldi_h(1);                   \
-                                                  \
-  tp0_m = __msa_clti_s_h(vec0, 0);                \
-  tp1_m = __msa_clti_s_h(vec1, 0);                \
-  vec0 += 1;                                      \
-  vec1 += 1;                                      \
-  tp0_m = one_m & tp0_m;                          \
-  tp1_m = one_m & tp1_m;                          \
-  vec0 += tp0_m;                                  \
-  vec1 += tp1_m;                                  \
-  vec0 >>= 2;                                     \
-  vec1 >>= 2;                                     \
-}
+#define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) \
+  {                                        \
+    v8i16 tp0_m, tp1_m;                    \
+    v8i16 one_m = __msa_ldi_h(1);          \
+                                           \
+    tp0_m = __msa_clti_s_h(vec0, 0);       \
+    tp1_m = __msa_clti_s_h(vec1, 0);       \
+    vec0 += 1;                             \
+    vec1 += 1;                             \
+    tp0_m = one_m & tp0_m;                 \
+    tp1_m = one_m & tp1_m;                 \
+    vec0 += tp0_m;                         \
+    vec1 += tp1_m;                         \
+    vec0 >>= 2;                            \
+    vec1 >>= 2;                            \
+  }
 
-#define FDCT32_POSTPROC_NEG_W(vec) {      \
-  v4i32 temp_m;                           \
-  v4i32 one_m = __msa_ldi_w(1);           \
-                                          \
-  temp_m = __msa_clti_s_w(vec, 0);        \
-  vec += 1;                               \
-  temp_m = one_m & temp_m;                \
-  vec += temp_m;                          \
-  vec >>= 2;                              \
-}
+#define FDCT32_POSTPROC_NEG_W(vec)   \
+  {                                  \
+    v4i32 temp_m;                    \
+    v4i32 one_m = __msa_ldi_w(1);    \
+                                     \
+    temp_m = __msa_clti_s_w(vec, 0); \
+    vec += 1;                        \
+    temp_m = one_m & temp_m;         \
+    vec += temp_m;                   \
+    vec >>= 2;                       \
+  }
 
-#define FDCT32_POSTPROC_2V_POS_H(vec0, vec1) {      \
-  v8i16 tp0_m, tp1_m;                               \
-  v8i16 one = __msa_ldi_h(1);                       \
+#define FDCT32_POSTPROC_2V_POS_H(vec0, vec1)        \
+  {                                                 \
+    v8i16 tp0_m, tp1_m;                             \
+    v8i16 one = __msa_ldi_h(1);                     \
                                                     \
-  tp0_m = __msa_clei_s_h(vec0, 0);                  \
-  tp1_m = __msa_clei_s_h(vec1, 0);                  \
-  tp0_m = (v8i16)__msa_xori_b((v16u8)tp0_m, 255);   \
-  tp1_m = (v8i16)__msa_xori_b((v16u8)tp1_m, 255);   \
-  vec0 += 1;                                        \
-  vec1 += 1;                                        \
-  tp0_m = one & tp0_m;                              \
-  tp1_m = one & tp1_m;                              \
-  vec0 += tp0_m;                                    \
-  vec1 += tp1_m;                                    \
-  vec0 >>= 2;                                       \
-  vec1 >>= 2;                                       \
-}
+    tp0_m = __msa_clei_s_h(vec0, 0);                \
+    tp1_m = __msa_clei_s_h(vec1, 0);                \
+    tp0_m = (v8i16)__msa_xori_b((v16u8)tp0_m, 255); \
+    tp1_m = (v8i16)__msa_xori_b((v16u8)tp1_m, 255); \
+    vec0 += 1;                                      \
+    vec1 += 1;                                      \
+    tp0_m = one & tp0_m;                            \
+    tp1_m = one & tp1_m;                            \
+    vec0 += tp0_m;                                  \
+    vec1 += tp1_m;                                  \
+    vec0 >>= 2;                                     \
+    vec1 >>= 2;                                     \
+  }
 
-#define DOTP_CONST_PAIR_W(reg0_left, reg1_left, reg0_right,      \
-                          reg1_right, const0, const1,            \
-                          out0, out1, out2, out3) {              \
-  v4i32 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m;          \
-  v2i64 tp0_m, tp1_m, tp2_m, tp3_m;                              \
-  v4i32 k0_m = __msa_fill_w((int32_t) const0);                   \
-                                                                 \
-  s0_m = __msa_fill_w((int32_t) const1);                         \
-  k0_m = __msa_ilvev_w(s0_m, k0_m);                              \
-                                                                 \
-  ILVRL_W2_SW(-reg1_left, reg0_left, s1_m, s0_m);                \
-  ILVRL_W2_SW(reg0_left, reg1_left, s3_m, s2_m);                 \
-  ILVRL_W2_SW(-reg1_right, reg0_right, s5_m, s4_m);              \
-  ILVRL_W2_SW(reg0_right, reg1_right, s7_m, s6_m);               \
-                                                                 \
-  DOTP_SW2_SD(s0_m, s1_m, k0_m, k0_m, tp0_m, tp1_m);             \
-  DOTP_SW2_SD(s4_m, s5_m, k0_m, k0_m, tp2_m, tp3_m);             \
-  tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS);                  \
-  tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS);                  \
-  tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS);                  \
-  tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS);                  \
-  out0 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m);              \
-  out1 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m);              \
-                                                                 \
-  DOTP_SW2_SD(s2_m, s3_m, k0_m, k0_m, tp0_m, tp1_m);             \
-  DOTP_SW2_SD(s6_m, s7_m, k0_m, k0_m, tp2_m, tp3_m);             \
-  tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS);                  \
-  tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS);                  \
-  tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS);                  \
-  tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS);                  \
-  out2 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m);              \
-  out3 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m);              \
-}
+#define DOTP_CONST_PAIR_W(reg0_left, reg1_left, reg0_right, reg1_right, \
+                          const0, const1, out0, out1, out2, out3)       \
+  {                                                                     \
+    v4i32 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m;               \
+    v2i64 tp0_m, tp1_m, tp2_m, tp3_m;                                   \
+    v4i32 k0_m = __msa_fill_w((int32_t)const0);                         \
+                                                                        \
+    s0_m = __msa_fill_w((int32_t)const1);                               \
+    k0_m = __msa_ilvev_w(s0_m, k0_m);                                   \
+                                                                        \
+    ILVRL_W2_SW(-reg1_left, reg0_left, s1_m, s0_m);                     \
+    ILVRL_W2_SW(reg0_left, reg1_left, s3_m, s2_m);                      \
+    ILVRL_W2_SW(-reg1_right, reg0_right, s5_m, s4_m);                   \
+    ILVRL_W2_SW(reg0_right, reg1_right, s7_m, s6_m);                    \
+                                                                        \
+    DOTP_SW2_SD(s0_m, s1_m, k0_m, k0_m, tp0_m, tp1_m);                  \
+    DOTP_SW2_SD(s4_m, s5_m, k0_m, k0_m, tp2_m, tp3_m);                  \
+    tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS);                       \
+    tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS);                       \
+    tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS);                       \
+    tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS);                       \
+    out0 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m);                   \
+    out1 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m);                   \
+                                                                        \
+    DOTP_SW2_SD(s2_m, s3_m, k0_m, k0_m, tp0_m, tp1_m);                  \
+    DOTP_SW2_SD(s6_m, s7_m, k0_m, k0_m, tp2_m, tp3_m);                  \
+    tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS);                       \
+    tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS);                       \
+    tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS);                       \
+    tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS);                       \
+    out2 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m);                   \
+    out3 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m);                   \
+  }
 
 void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr,
                         int32_t src_stride);
diff --git a/vpx_dsp/mips/idct16x16_msa.c b/vpx_dsp/mips/idct16x16_msa.c
index c74e264439728b22a0c5efcde22eb7273759b018..8c9a8481f96d99fed8424f047873a152dcdc4cb3 100644
--- a/vpx_dsp/mips/idct16x16_msa.c
+++ b/vpx_dsp/mips/idct16x16_msa.c
@@ -20,10 +20,10 @@ void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
   input += 8;
   LD_SH8(input, 16, reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
 
-  TRANSPOSE8x8_SH_SH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7,
-                     reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
-  TRANSPOSE8x8_SH_SH(reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15,
-                     reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
+  TRANSPOSE8x8_SH_SH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg0, reg1,
+                     reg2, reg3, reg4, reg5, reg6, reg7);
+  TRANSPOSE8x8_SH_SH(reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15, reg8,
+                     reg9, reg10, reg11, reg12, reg13, reg14, reg15);
   DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
   DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
   BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
@@ -93,13 +93,13 @@ void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
   reg3 = tmp7;
 
   /* transpose block */
-  TRANSPOSE8x8_SH_SH(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14,
-                     reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14);
+  TRANSPOSE8x8_SH_SH(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14, reg0,
+                     reg2, reg4, reg6, reg8, reg10, reg12, reg14);
   ST_SH8(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14, output, 16);
 
   /* transpose block */
-  TRANSPOSE8x8_SH_SH(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15,
-                     reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15);
+  TRANSPOSE8x8_SH_SH(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, reg3,
+                     reg13, reg11, reg5, reg7, reg9, reg1, reg15);
   ST_SH8(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, (output + 8), 16);
 }
 
@@ -233,7 +233,7 @@ void vpx_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
   /* short case just considers top 4 rows as valid output */
   out += 4 * 16;
   for (i = 12; i--;) {
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sw     $zero,   0(%[out])     \n\t"
         "sw     $zero,   4(%[out])     \n\t"
         "sw     $zero,   8(%[out])     \n\t"
@@ -244,8 +244,7 @@ void vpx_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
         "sw     $zero,  28(%[out])     \n\t"
 
         :
-        : [out] "r" (out)
-    );
+        : [out] "r"(out));
 
     out += 16;
   }
@@ -283,8 +282,8 @@ void vpx_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst,
     ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
     CLIP_SH4_0_255(res0, res1, res2, res3);
     CLIP_SH4_0_255(res4, res5, res6, res7);
-    PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
-                tmp0, tmp1, tmp2, tmp3);
+    PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, tmp0, tmp1,
+                tmp2, tmp3);
     ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
     dst += (4 * dst_stride);
   }
@@ -295,29 +294,28 @@ void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) {
   v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
 
   /* load input data */
-  LD_SH16(input, 8,
-          l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14, l7, l15);
-  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
-                     l0, l1, l2, l3, l4, l5, l6, l7);
-  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
-                     l8, l9, l10, l11, l12, l13, l14, l15);
+  LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
+          l7, l15);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, l0, l1, l2, l3, l4, l5, l6,
+                     l7);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, l8, l9, l10, l11,
+                     l12, l13, l14, l15);
 
   /* ADST in horizontal */
-  VPX_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7,
-                   l8, l9, l10, l11, l12, l13, l14, l15,
-                   r0, r1, r2, r3, r4, r5, r6, r7,
-                   r8, r9, r10, r11, r12, r13, r14, r15);
+  VPX_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13,
+                   l14, l15, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11,
+                   r12, r13, r14, r15);
 
   l1 = -r8;
   l3 = -r4;
   l13 = -r13;
   l15 = -r1;
 
-  TRANSPOSE8x8_SH_SH(r0, l1, r12, l3, r6, r14, r10, r2,
-                     l0, l1, l2, l3, l4, l5, l6, l7);
+  TRANSPOSE8x8_SH_SH(r0, l1, r12, l3, r6, r14, r10, r2, l0, l1, l2, l3, l4, l5,
+                     l6, l7);
   ST_SH8(l0, l1, l2, l3, l4, l5, l6, l7, output, 16);
-  TRANSPOSE8x8_SH_SH(r3, r11, r15, r7, r5, l13, r9, l15,
-                     l8, l9, l10, l11, l12, l13, l14, l15);
+  TRANSPOSE8x8_SH_SH(r3, r11, r15, r7, r5, l13, r9, l15, l8, l9, l10, l11, l12,
+                     l13, l14, l15);
   ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16);
 }
 
diff --git a/vpx_dsp/mips/idct32x32_msa.c b/vpx_dsp/mips/idct32x32_msa.c
index de47597a8d54e1ffed2ccb99c81328210549c2f2..ed5cef18a9c0426dc008d3946e27adb8d7ce44b0 100644
--- a/vpx_dsp/mips/idct32x32_msa.c
+++ b/vpx_dsp/mips/idct32x32_msa.c
@@ -17,10 +17,10 @@ static void idct32x8_row_transpose_store(const int16_t *input,
   /* 1st & 2nd 8x8 */
   LD_SH8(input, 32, m0, n0, m1, n1, m2, n2, m3, n3);
   LD_SH8((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7);
-  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
-                     m0, n0, m1, n1, m2, n2, m3, n3);
-  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
-                     m4, n4, m5, n5, m6, n6, m7, n7);
+  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
+                     n3);
+  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
+                     n7);
   ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8);
   ST_SH4(m4, n4, m5, n5, (tmp_buf + 8 * 8), 8);
   ST_SH4(m6, n6, m7, n7, (tmp_buf + 12 * 8), 8);
@@ -28,10 +28,10 @@ static void idct32x8_row_transpose_store(const int16_t *input,
   /* 3rd & 4th 8x8 */
   LD_SH8((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3);
   LD_SH8((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7);
-  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
-                     m0, n0, m1, n1, m2, n2, m3, n3);
-  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
-                     m4, n4, m5, n5, m6, n6, m7, n7);
+  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
+                     n3);
+  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
+                     n7);
   ST_SH4(m0, n0, m1, n1, (tmp_buf + 16 * 8), 8);
   ST_SH4(m2, n2, m3, n3, (tmp_buf + 20 * 8), 8);
   ST_SH4(m4, n4, m5, n5, (tmp_buf + 24 * 8), 8);
@@ -186,8 +186,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf,
   DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
 
   /* 4 Stores */
-  SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
-       vec0, vec1, vec2, vec3);
+  SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
   DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
   DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
 
@@ -198,8 +197,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf,
   ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
 
   /* 4 Stores */
-  ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
-       vec1, vec2, vec0, vec3);
+  ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec1, vec2, vec0, vec3);
   BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
   ST_SH(reg0, (tmp_odd_buf + 13 * 8));
   ST_SH(reg1, (tmp_odd_buf + 14 * 8));
@@ -213,8 +211,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf,
   LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
   LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
 
-  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
-       loc0, loc1, loc2, loc3);
+  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
   ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
 
   SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
@@ -228,8 +225,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf,
   LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
   LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
 
-  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
-       loc0, loc1, loc2, loc3);
+  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
   ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
 
   SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
@@ -242,8 +238,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf,
 
 static void idct_butterfly_transpose_store(int16_t *tmp_buf,
                                            int16_t *tmp_eve_buf,
-                                           int16_t *tmp_odd_buf,
-                                           int16_t *dst) {
+                                           int16_t *tmp_odd_buf, int16_t *dst) {
   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
   v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
 
@@ -317,26 +312,26 @@ static void idct_butterfly_transpose_store(int16_t *tmp_buf,
 
   /* Transpose : 16 vectors */
   /* 1st & 2nd 8x8 */
-  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
-                     m0, n0, m1, n1, m2, n2, m3, n3);
+  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
+                     n3);
   ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
   ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
 
-  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
-                     m4, n4, m5, n5, m6, n6, m7, n7);
+  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
+                     n7);
   ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
   ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
 
   /* 3rd & 4th 8x8 */
   LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
   LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
-  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
-                     m0, n0, m1, n1, m2, n2, m3, n3);
+  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
+                     n3);
   ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
   ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
 
-  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
-                     m4, n4, m5, n5, m6, n6, m7, n7);
+  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
+                     n7);
   ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
   ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
 }
@@ -349,8 +344,8 @@ static void idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) {
   idct32x8_row_transpose_store(input, &tmp_buf[0]);
   idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]);
   idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]);
-  idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0],
-                                 &tmp_odd_buf[0], output);
+  idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0], &tmp_odd_buf[0],
+                                 output);
 }
 
 static void idct8x32_column_even_process_store(int16_t *tmp_buf,
@@ -541,8 +536,7 @@ static void idct8x32_column_odd_process_store(int16_t *tmp_buf,
 }
 
 static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
-                                             int16_t *tmp_odd_buf,
-                                             uint8_t *dst,
+                                             int16_t *tmp_odd_buf, uint8_t *dst,
                                              int32_t dst_stride) {
   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
   v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
@@ -563,8 +557,8 @@ static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
   SRARI_H4_SH(m0, m2, m4, m6, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
-                      m0, m2, m4, m6);
+  VPX_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), m0, m2, m4,
+                      m6);
 
   /* Load 8 & Store 8 */
   vec0 = LD_SH(tmp_odd_buf + 4 * 8);
@@ -578,13 +572,12 @@ static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
   SRARI_H4_SH(m1, m3, m5, m7, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
-                      m1, m3, m5, m7);
+  VPX_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), m1, m3, m5, m7);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
   SRARI_H4_SH(m1, m3, m5, m7, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
-                      m1, m3, m5, m7);
+  VPX_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), m1, m3, m5,
+                      m7);
 
   /* Load 8 & Store 8 */
   vec0 = LD_SH(tmp_odd_buf + 2 * 8);
@@ -598,13 +591,12 @@ static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
   SRARI_H4_SH(n0, n2, n4, n6, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
-                      n0, n2, n4, n6);
+  VPX_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), n0, n2, n4, n6);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
   SRARI_H4_SH(n0, n2, n4, n6, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
-                      n0, n2, n4, n6);
+  VPX_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), n0, n2, n4,
+                      n6);
 
   /* Load 8 & Store 8 */
   vec0 = LD_SH(tmp_odd_buf + 5 * 8);
@@ -618,13 +610,12 @@ static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
   SRARI_H4_SH(n1, n3, n5, n7, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
-                      n1, n3, n5, n7);
+  VPX_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), n1, n3, n5, n7);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
   SRARI_H4_SH(n1, n3, n5, n7, 6);
-  VPX_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
-                      n1, n3, n5, n7);
+  VPX_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), n1, n3, n5,
+                      n7);
 }
 
 static void idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
@@ -634,8 +625,8 @@ static void idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
 
   idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
   idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
-  idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0],
-                                   dst, dst_stride);
+  idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0], dst,
+                                   dst_stride);
 }
 
 void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
@@ -665,7 +656,7 @@ void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
   int16_t *out_ptr = out_arr;
 
   for (i = 32; i--;) {
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sw     $zero,      0(%[out_ptr])     \n\t"
         "sw     $zero,      4(%[out_ptr])     \n\t"
         "sw     $zero,      8(%[out_ptr])     \n\t"
@@ -684,8 +675,7 @@ void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
         "sw     $zero,     60(%[out_ptr])     \n\t"
 
         :
-        : [out_ptr] "r" (out_ptr)
-    );
+        : [out_ptr] "r"(out_ptr));
 
     out_ptr += 32;
   }
@@ -728,8 +718,8 @@ void vpx_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
     ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
     CLIP_SH4_0_255(res0, res1, res2, res3);
     CLIP_SH4_0_255(res4, res5, res6, res7);
-    PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
-                tmp0, tmp1, tmp2, tmp3);
+    PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, tmp0, tmp1,
+                tmp2, tmp3);
 
     ST_UB2(tmp0, tmp1, dst, 16);
     dst += dst_stride;
diff --git a/vpx_dsp/mips/idct4x4_msa.c b/vpx_dsp/mips/idct4x4_msa.c
index 04064f87dfef555d140eba00b3f1e2aa06b379b7..50e824850d0c1f9f44ce7ba4a1b353a432231671 100644
--- a/vpx_dsp/mips/idct4x4_msa.c
+++ b/vpx_dsp/mips/idct4x4_msa.c
@@ -42,8 +42,8 @@ void vpx_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
   in0_r -= in3_r;
   in2_r += in1_r;
 
-  PCKEV_H4_SH(in0_r, in0_r, in1_r, in1_r, in2_r, in2_r, in3_r, in3_r,
-              in0, in1, in2, in3);
+  PCKEV_H4_SH(in0_r, in0_r, in1_r, in1_r, in2_r, in2_r, in3_r, in3_r, in0, in1,
+              in2, in3);
   ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride);
 }
 
diff --git a/vpx_dsp/mips/idct8x8_msa.c b/vpx_dsp/mips/idct8x8_msa.c
index 6a24935fffaec50d61f0c90cfdc99c3b622fb7bd..c06330b027f93b8df30f99e4b87df8a5d21eafde 100644
--- a/vpx_dsp/mips/idct8x8_msa.c
+++ b/vpx_dsp/mips/idct8x8_msa.c
@@ -18,17 +18,17 @@ void vpx_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
   LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
 
   /* rows transform */
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
   /* 1D idct8x8 */
-  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
-                 in0, in1, in2, in3, in4, in5, in6, in7);
+  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                 in4, in5, in6, in7);
   /* columns transform */
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
   /* 1D idct8x8 */
-  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
-                 in0, in1, in2, in3, in4, in5, in6, in7);
+  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                 in4, in5, in6, in7);
   /* final rounding (add 2^4, divide by 2^5) and shift */
   SRARI_H4_SH(in0, in1, in2, in3, 5);
   SRARI_H4_SH(in4, in5, in6, in7, 5);
@@ -82,12 +82,12 @@ void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
 
   /* stage4 */
-  BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7,
-              in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
-                 in0, in1, in2, in3, in4, in5, in6, in7);
+  BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7, in0, in1, in2, in3, in4, in5, in6,
+              in7);
+  TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
+  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                 in4, in5, in6, in7);
 
   /* final rounding (add 2^4, divide by 2^5) and shift */
   SRARI_H4_SH(in0, in1, in2, in3, 5);
diff --git a/vpx_dsp/mips/intrapred16_dspr2.c b/vpx_dsp/mips/intrapred16_dspr2.c
index 11444c718e7b4ccc8687e7c54bd42b41173e8dac..3e29d0ac39f37fabee210ccdb953332d78fe6e6e 100644
--- a/vpx_dsp/mips/intrapred16_dspr2.c
+++ b/vpx_dsp/mips/intrapred16_dspr2.c
@@ -13,10 +13,10 @@
 #if HAVE_DSPR2
 void vpx_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
                                  const uint8_t *above, const uint8_t *left) {
-  int32_t  tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
-  int32_t  tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
+  int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+  int32_t tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "lb         %[tmp1],      (%[left])                    \n\t"
       "lb         %[tmp2],      1(%[left])                   \n\t"
       "lb         %[tmp3],      2(%[left])                   \n\t"
@@ -146,26 +146,23 @@ void vpx_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
       "sw         %[tmp16],     8(%[dst])                    \n\t"
       "sw         %[tmp16],     12(%[dst])                   \n\t"
 
-      : [tmp1] "=&r" (tmp1),   [tmp2] "=&r" (tmp2),
-        [tmp3] "=&r" (tmp3),   [tmp4] "=&r" (tmp4),
-        [tmp5] "=&r" (tmp5),   [tmp7] "=&r" (tmp7),
-        [tmp6] "=&r" (tmp6),   [tmp8] "=&r" (tmp8),
-        [tmp9] "=&r" (tmp9),   [tmp10] "=&r" (tmp10),
-        [tmp11] "=&r" (tmp11), [tmp12] "=&r" (tmp12),
-        [tmp13] "=&r" (tmp13), [tmp14] "=&r" (tmp14),
-        [tmp15] "=&r" (tmp15), [tmp16] "=&r" (tmp16)
-      : [left] "r" (left), [dst] "r" (dst), [stride] "r" (stride)
-  );
+      : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3),
+        [tmp4] "=&r"(tmp4), [tmp5] "=&r"(tmp5), [tmp7] "=&r"(tmp7),
+        [tmp6] "=&r"(tmp6), [tmp8] "=&r"(tmp8), [tmp9] "=&r"(tmp9),
+        [tmp10] "=&r"(tmp10), [tmp11] "=&r"(tmp11), [tmp12] "=&r"(tmp12),
+        [tmp13] "=&r"(tmp13), [tmp14] "=&r"(tmp14), [tmp15] "=&r"(tmp15),
+        [tmp16] "=&r"(tmp16)
+      : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
 }
 
 void vpx_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
                                   const uint8_t *above, const uint8_t *left) {
-  int32_t  expected_dc;
-  int32_t  average;
-  int32_t  tmp, above1, above_l1, above_r1, left1, left_r1, left_l1;
-  int32_t  above2, left2;
+  int32_t expected_dc;
+  int32_t average;
+  int32_t tmp, above1, above_l1, above_r1, left1, left_r1, left_l1;
+  int32_t above2, left2;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "lw              %[above1],           (%[above])                    \n\t"
       "lw              %[above2],           4(%[above])                   \n\t"
       "lw              %[left1],            (%[left])                     \n\t"
@@ -316,14 +313,12 @@ void vpx_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride,
       "sw              %[expected_dc],      8(%[dst])                     \n\t"
       "sw              %[expected_dc],      12(%[dst])                    \n\t"
 
-      : [left1] "=&r" (left1), [above1] "=&r" (above1),
-        [left_l1] "=&r" (left_l1), [above_l1] "=&r" (above_l1),
-        [left_r1] "=&r" (left_r1), [above_r1] "=&r" (above_r1),
-        [above2] "=&r" (above2), [left2] "=&r" (left2),
-        [average] "=&r" (average), [tmp] "=&r" (tmp),
-        [expected_dc] "=&r" (expected_dc)
-      : [above] "r" (above), [left] "r" (left),
-        [dst] "r" (dst), [stride] "r" (stride)
-  );
+      : [left1] "=&r"(left1), [above1] "=&r"(above1), [left_l1] "=&r"(left_l1),
+        [above_l1] "=&r"(above_l1), [left_r1] "=&r"(left_r1),
+        [above_r1] "=&r"(above_r1), [above2] "=&r"(above2),
+        [left2] "=&r"(left2), [average] "=&r"(average), [tmp] "=&r"(tmp),
+        [expected_dc] "=&r"(expected_dc)
+      : [above] "r"(above), [left] "r"(left), [dst] "r"(dst),
+        [stride] "r"(stride));
 }
 #endif  // #if HAVE_DSPR2
diff --git a/vpx_dsp/mips/intrapred4_dspr2.c b/vpx_dsp/mips/intrapred4_dspr2.c
index 03baf4c9cc81019ba6960e299414831da42fc131..9f51d50c752f820b4a90fd65636375799977d6de 100644
--- a/vpx_dsp/mips/intrapred4_dspr2.c
+++ b/vpx_dsp/mips/intrapred4_dspr2.c
@@ -13,9 +13,9 @@
 #if HAVE_DSPR2
 void vpx_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
-  int32_t  tmp1, tmp2, tmp3, tmp4;
+  int32_t tmp1, tmp2, tmp3, tmp4;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "lb         %[tmp1],      (%[left])                    \n\t"
       "lb         %[tmp2],      1(%[left])                   \n\t"
       "lb         %[tmp3],      2(%[left])                   \n\t"
@@ -32,19 +32,18 @@ void vpx_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
       "add        %[dst],       %[dst],         %[stride]    \n\t"
       "sw         %[tmp4],      (%[dst])                     \n\t"
 
-      : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2),
-        [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4)
-      : [left] "r" (left), [dst] "r" (dst), [stride] "r" (stride)
-  );
+      : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3),
+        [tmp4] "=&r"(tmp4)
+      : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
 }
 
 void vpx_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
-  int32_t  expected_dc;
-  int32_t  average;
-  int32_t  tmp, above_c, above_l, above_r, left_c, left_r, left_l;
+  int32_t expected_dc;
+  int32_t average;
+  int32_t tmp, above_c, above_l, above_r, left_c, left_r, left_l;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "lw              %[above_c],         (%[above])                    \n\t"
       "lw              %[left_c],          (%[left])                     \n\t"
 
@@ -70,27 +69,26 @@ void vpx_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
       "add             %[dst],              %[dst],          %[stride]   \n\t"
       "sw              %[expected_dc],     (%[dst])                      \n\t"
 
-      : [above_c] "=&r" (above_c), [above_l] "=&r" (above_l),
-        [above_r] "=&r" (above_r), [left_c] "=&r" (left_c),
-        [left_l] "=&r" (left_l), [left_r] "=&r" (left_r),
-        [average] "=&r" (average), [tmp] "=&r" (tmp),
-        [expected_dc] "=&r" (expected_dc)
-      : [above] "r" (above), [left] "r" (left),
-        [dst] "r" (dst), [stride] "r" (stride)
-  );
+      : [above_c] "=&r"(above_c), [above_l] "=&r"(above_l),
+        [above_r] "=&r"(above_r), [left_c] "=&r"(left_c),
+        [left_l] "=&r"(left_l), [left_r] "=&r"(left_r),
+        [average] "=&r"(average), [tmp] "=&r"(tmp),
+        [expected_dc] "=&r"(expected_dc)
+      : [above] "r"(above), [left] "r"(left), [dst] "r"(dst),
+        [stride] "r"(stride));
 }
 
 void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
-  int32_t  abovel, abover;
-  int32_t  left0, left1, left2, left3;
-  int32_t  res0, res1;
-  int32_t  resl;
-  int32_t  resr;
-  int32_t  top_left;
-  uint8_t  *cm = vpx_ff_cropTbl;
-
-  __asm__ __volatile__ (
+  int32_t abovel, abover;
+  int32_t left0, left1, left2, left3;
+  int32_t res0, res1;
+  int32_t resl;
+  int32_t resr;
+  int32_t top_left;
+  uint8_t *cm = vpx_ff_cropTbl;
+
+  __asm__ __volatile__(
       "ulw             %[resl],       (%[above])                         \n\t"
 
       "lbu             %[left0],       (%[left])                         \n\t"
@@ -174,7 +172,6 @@ void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
       "sra             %[res0],        %[res0],           16             \n\t"
       "lbux            %[res0],        %[res0](%[cm])                    \n\t"
 
-
       "sra             %[res1],        %[resr],           16             \n\t"
       "lbux            %[res1],        %[res1](%[cm])                    \n\t"
       "sb              %[res0],        (%[dst])                          \n\t"
@@ -183,7 +180,6 @@ void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
       "sra             %[res0],        %[res0],           16             \n\t"
       "lbux            %[res0],        %[res0](%[cm])                    \n\t"
 
-
       "sb              %[res1],        1(%[dst])                         \n\t"
       "sra             %[res1],        %[resl],           16             \n\t"
       "lbux            %[res1],        %[res1](%[cm])                    \n\t"
@@ -218,12 +214,11 @@ void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride,
       "sb              %[res0],        2(%[dst])                         \n\t"
       "sb              %[res1],        3(%[dst])                         \n\t"
 
-      : [abovel] "=&r" (abovel), [abover] "=&r" (abover),
-        [left0] "=&r" (left0), [left1] "=&r" (left1), [left2] "=&r" (left2),
-        [res0] "=&r" (res0), [res1] "=&r" (res1), [left3] "=&r" (left3),
-        [resl] "=&r" (resl), [resr] "=&r" (resr), [top_left] "=&r" (top_left)
-      : [above] "r" (above), [left] "r" (left),
-        [dst] "r" (dst), [stride] "r" (stride), [cm] "r" (cm)
-  );
+      : [abovel] "=&r"(abovel), [abover] "=&r"(abover), [left0] "=&r"(left0),
+        [left1] "=&r"(left1), [left2] "=&r"(left2), [res0] "=&r"(res0),
+        [res1] "=&r"(res1), [left3] "=&r"(left3), [resl] "=&r"(resl),
+        [resr] "=&r"(resr), [top_left] "=&r"(top_left)
+      : [above] "r"(above), [left] "r"(left), [dst] "r"(dst),
+        [stride] "r"(stride), [cm] "r"(cm));
 }
 #endif  // #if HAVE_DSPR2
diff --git a/vpx_dsp/mips/intrapred8_dspr2.c b/vpx_dsp/mips/intrapred8_dspr2.c
index 196ff5a062ef53ee03f1a769f534bb91ff042810..eac79d51000b0afb5fc8c2b023282eeb45ed17f2 100644
--- a/vpx_dsp/mips/intrapred8_dspr2.c
+++ b/vpx_dsp/mips/intrapred8_dspr2.c
@@ -13,9 +13,9 @@
 #if HAVE_DSPR2
 void vpx_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
-  int32_t  tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+  int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "lb         %[tmp1],      (%[left])                   \n\t"
       "lb         %[tmp2],      1(%[left])                  \n\t"
       "lb         %[tmp3],      2(%[left])                  \n\t"
@@ -58,23 +58,20 @@ void vpx_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
       "sw         %[tmp8],      (%[dst])                    \n\t"
       "sw         %[tmp8],      4(%[dst])                   \n\t"
 
-      : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2),
-        [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4),
-        [tmp5] "=&r" (tmp5), [tmp7] "=&r" (tmp7),
-        [tmp6] "=&r" (tmp6), [tmp8] "=&r" (tmp8)
-      : [left] "r" (left), [dst] "r" (dst),
-        [stride] "r" (stride)
-  );
+      : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3),
+        [tmp4] "=&r"(tmp4), [tmp5] "=&r"(tmp5), [tmp7] "=&r"(tmp7),
+        [tmp6] "=&r"(tmp6), [tmp8] "=&r"(tmp8)
+      : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride));
 }
 
 void vpx_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
-  int32_t  expected_dc;
-  int32_t  average;
-  int32_t  tmp, above1, above_l1, above_r1, left1, left_r1, left_l1;
-  int32_t  above2, above_l2, above_r2, left2, left_r2, left_l2;
+  int32_t expected_dc;
+  int32_t average;
+  int32_t tmp, above1, above_l1, above_r1, left1, left_r1, left_l1;
+  int32_t above2, above_l2, above_r2, left2, left_r2, left_l2;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "lw              %[above1],         (%[above])                      \n\t"
       "lw              %[above2],         4(%[above])                     \n\t"
       "lw              %[left1],          (%[left])                       \n\t"
@@ -137,30 +134,29 @@ void vpx_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
       "sw              %[expected_dc],    (%[dst])                        \n\t"
       "sw              %[expected_dc],    4(%[dst])                       \n\t"
 
-      : [above1] "=&r" (above1), [above_l1] "=&r" (above_l1),
-        [above_r1] "=&r" (above_r1), [left1] "=&r" (left1),
-        [left_l1] "=&r" (left_l1), [left_r1] "=&r" (left_r1),
-        [above2] "=&r" (above2), [above_l2] "=&r" (above_l2),
-        [above_r2] "=&r" (above_r2), [left2] "=&r" (left2),
-        [left_l2] "=&r" (left_l2), [left_r2] "=&r" (left_r2),
-        [average] "=&r" (average), [tmp] "=&r" (tmp),
-        [expected_dc] "=&r" (expected_dc)
-      : [above] "r" (above), [left] "r" (left), [dst] "r" (dst),
-        [stride] "r" (stride)
-  );
+      : [above1] "=&r"(above1), [above_l1] "=&r"(above_l1),
+        [above_r1] "=&r"(above_r1), [left1] "=&r"(left1),
+        [left_l1] "=&r"(left_l1), [left_r1] "=&r"(left_r1),
+        [above2] "=&r"(above2), [above_l2] "=&r"(above_l2),
+        [above_r2] "=&r"(above_r2), [left2] "=&r"(left2),
+        [left_l2] "=&r"(left_l2), [left_r2] "=&r"(left_r2),
+        [average] "=&r"(average), [tmp] "=&r"(tmp),
+        [expected_dc] "=&r"(expected_dc)
+      : [above] "r"(above), [left] "r"(left), [dst] "r"(dst),
+        [stride] "r"(stride));
 }
 
 void vpx_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
-  int32_t   abovel, abover;
-  int32_t   abovel_1, abover_1;
-  int32_t   left0;
-  int32_t   res0, res1, res2, res3;
-  int32_t   reshw;
-  int32_t   top_left;
-  uint8_t   *cm = vpx_ff_cropTbl;
-
-  __asm__ __volatile__ (
+  int32_t abovel, abover;
+  int32_t abovel_1, abover_1;
+  int32_t left0;
+  int32_t res0, res1, res2, res3;
+  int32_t reshw;
+  int32_t top_left;
+  uint8_t *cm = vpx_ff_cropTbl;
+
+  __asm__ __volatile__(
       "ulw             %[reshw],       (%[above])                         \n\t"
       "ulw             %[top_left],    4(%[above])                        \n\t"
 
@@ -595,13 +591,12 @@ void vpx_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride,
       "sb              %[res2],        6(%[dst])                          \n\t"
       "sb              %[res3],        7(%[dst])                          \n\t"
 
-      : [abovel] "=&r" (abovel), [abover] "=&r" (abover),
-        [abovel_1] "=&r" (abovel_1), [abover_1] "=&r" (abover_1),
-        [left0] "=&r" (left0), [res2] "=&r" (res2), [res3] "=&r" (res3),
-        [res0] "=&r" (res0), [res1] "=&r" (res1),
-        [reshw] "=&r" (reshw), [top_left] "=&r" (top_left)
-      : [above] "r" (above), [left] "r" (left),
-        [dst] "r" (dst), [stride] "r" (stride), [cm] "r" (cm)
-  );
+      : [abovel] "=&r"(abovel), [abover] "=&r"(abover),
+        [abovel_1] "=&r"(abovel_1), [abover_1] "=&r"(abover_1),
+        [left0] "=&r"(left0), [res2] "=&r"(res2), [res3] "=&r"(res3),
+        [res0] "=&r"(res0), [res1] "=&r"(res1), [reshw] "=&r"(reshw),
+        [top_left] "=&r"(top_left)
+      : [above] "r"(above), [left] "r"(left), [dst] "r"(dst),
+        [stride] "r"(stride), [cm] "r"(cm));
 }
 #endif  // #if HAVE_DSPR2
diff --git a/vpx_dsp/mips/intrapred_msa.c b/vpx_dsp/mips/intrapred_msa.c
index f6fbe4016257c93a8cac7a071e7205c031828015..b5ee943031a0982bbff270d62afe9b8346a400ef 100644
--- a/vpx_dsp/mips/intrapred_msa.c
+++ b/vpx_dsp/mips/intrapred_msa.c
@@ -11,10 +11,11 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_dsp/mips/macros_msa.h"
 
-#define IPRED_SUBS_UH2_UH(in0, in1, out0, out1) {  \
-  out0 = __msa_subs_u_h(out0, in0);                \
-  out1 = __msa_subs_u_h(out1, in1);                \
-}
+#define IPRED_SUBS_UH2_UH(in0, in1, out0, out1) \
+  {                                             \
+    out0 = __msa_subs_u_h(out0, in0);           \
+    out1 = __msa_subs_u_h(out1, in1);           \
+  }
 
 static void intra_predict_vert_4x4_msa(const uint8_t *src, uint8_t *dst,
                                        int32_t dst_stride) {
@@ -150,8 +151,8 @@ static void intra_predict_horiz_32x32_msa(const uint8_t *src, uint8_t *dst,
 }
 
 static void intra_predict_dc_4x4_msa(const uint8_t *src_top,
-                                     const uint8_t *src_left,
-                                     uint8_t *dst, int32_t dst_stride) {
+                                     const uint8_t *src_left, uint8_t *dst,
+                                     int32_t dst_stride) {
   uint32_t val0, val1;
   v16i8 store, src = { 0 };
   v8u16 sum_h;
@@ -199,8 +200,8 @@ static void intra_predict_128dc_4x4_msa(uint8_t *dst, int32_t dst_stride) {
 }
 
 static void intra_predict_dc_8x8_msa(const uint8_t *src_top,
-                                     const uint8_t *src_left,
-                                     uint8_t *dst, int32_t dst_stride) {
+                                     const uint8_t *src_left, uint8_t *dst,
+                                     int32_t dst_stride) {
   uint64_t val0, val1;
   v16i8 store;
   v16u8 src = { 0 };
@@ -260,8 +261,8 @@ static void intra_predict_128dc_8x8_msa(uint8_t *dst, int32_t dst_stride) {
 }
 
 static void intra_predict_dc_16x16_msa(const uint8_t *src_top,
-                                       const uint8_t *src_left,
-                                       uint8_t *dst, int32_t dst_stride) {
+                                       const uint8_t *src_left, uint8_t *dst,
+                                       int32_t dst_stride) {
   v16u8 top, left, out;
   v8u16 sum_h, sum_top, sum_left;
   v4u32 sum_w;
@@ -313,8 +314,8 @@ static void intra_predict_128dc_16x16_msa(uint8_t *dst, int32_t dst_stride) {
 }
 
 static void intra_predict_dc_32x32_msa(const uint8_t *src_top,
-                                       const uint8_t *src_left,
-                                       uint8_t *dst, int32_t dst_stride) {
+                                       const uint8_t *src_left, uint8_t *dst,
+                                       int32_t dst_stride) {
   uint32_t row;
   v16u8 top0, top1, left0, left1, out;
   v8u16 sum_h, sum_top0, sum_top1, sum_left0, sum_left1;
@@ -381,8 +382,8 @@ static void intra_predict_128dc_32x32_msa(uint8_t *dst, int32_t dst_stride) {
 }
 
 static void intra_predict_tm_4x4_msa(const uint8_t *src_top_ptr,
-                                     const uint8_t *src_left,
-                                     uint8_t *dst, int32_t dst_stride) {
+                                     const uint8_t *src_left, uint8_t *dst,
+                                     int32_t dst_stride) {
   uint32_t val;
   uint8_t top_left = src_top_ptr[-1];
   v16i8 src_left0, src_left1, src_left2, src_left3, tmp0, tmp1, src_top = { 0 };
@@ -409,8 +410,8 @@ static void intra_predict_tm_4x4_msa(const uint8_t *src_top_ptr,
 }
 
 static void intra_predict_tm_8x8_msa(const uint8_t *src_top_ptr,
-                                     const uint8_t *src_left,
-                                     uint8_t *dst, int32_t dst_stride) {
+                                     const uint8_t *src_left, uint8_t *dst,
+                                     int32_t dst_stride) {
   uint64_t val;
   uint8_t top_left = src_top_ptr[-1];
   uint32_t loop_cnt;
@@ -442,8 +443,8 @@ static void intra_predict_tm_8x8_msa(const uint8_t *src_top_ptr,
 }
 
 static void intra_predict_tm_16x16_msa(const uint8_t *src_top_ptr,
-                                       const uint8_t *src_left,
-                                       uint8_t *dst, int32_t dst_stride) {
+                                       const uint8_t *src_left, uint8_t *dst,
+                                       int32_t dst_stride) {
   uint8_t top_left = src_top_ptr[-1];
   uint32_t loop_cnt;
   v16i8 src_top, src_left0, src_left1, src_left2, src_left3;
@@ -491,8 +492,8 @@ static void intra_predict_tm_16x16_msa(const uint8_t *src_top_ptr,
 }
 
 static void intra_predict_tm_32x32_msa(const uint8_t *src_top,
-                                       const uint8_t *src_left,
-                                       uint8_t *dst, int32_t dst_stride) {
+                                       const uint8_t *src_left, uint8_t *dst,
+                                       int32_t dst_stride) {
   uint8_t top_left = src_top[-1];
   uint32_t loop_cnt;
   v16i8 src_top0, src_top1, src_left0, src_left1, src_left2, src_left3;
diff --git a/vpx_dsp/mips/inv_txfm_dspr2.h b/vpx_dsp/mips/inv_txfm_dspr2.h
index abd85091188eb371cc2375b26665c3cd9e95e118..69223a4460a255423fad8a8cd894f94985defc57 100644
--- a/vpx_dsp/mips/inv_txfm_dspr2.h
+++ b/vpx_dsp/mips/inv_txfm_dspr2.h
@@ -23,31 +23,40 @@ extern "C" {
 #endif
 
 #if HAVE_DSPR2
-#define DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input)                    ({   \
+#define DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input)                         \
+  ({                                                                           \
                                                                                \
-  int32_t tmp, out;                                                            \
-  int     dct_cost_rounding = DCT_CONST_ROUNDING;                              \
-  int     in = input;                                                          \
+    int32_t tmp, out;                                                          \
+    int dct_cost_rounding = DCT_CONST_ROUNDING;                                \
+    int in = input;                                                            \
                                                                                \
-  __asm__ __volatile__ (                                                       \
-      /* out = dct_const_round_shift(input_dc * cospi_16_64); */               \
-      "mtlo     %[dct_cost_rounding],   $ac1                              \n\t"\
-      "mthi     $zero,                  $ac1                              \n\t"\
-      "madd     $ac1,                   %[in],            %[cospi_16_64]  \n\t"\
-      "extp     %[tmp],                 $ac1,             31              \n\t"\
+    __asm__ __volatile__(/* out = dct_const_round_shift(input_dc *             \
+                            cospi_16_64); */                                   \
+                         "mtlo     %[dct_cost_rounding],   $ac1              " \
+                         "                \n\t"                                \
+                         "mthi     $zero,                  $ac1              " \
+                         "                \n\t"                                \
+                         "madd     $ac1,                   %[in],            " \
+                         "%[cospi_16_64]  \n\t"                                \
+                         "extp     %[tmp],                 $ac1,             " \
+                         "31              \n\t"                                \
                                                                                \
-      /* out = dct_const_round_shift(out * cospi_16_64); */                    \
-      "mtlo     %[dct_cost_rounding],   $ac2                              \n\t"\
-      "mthi     $zero,                  $ac2                              \n\t"\
-      "madd     $ac2,                   %[tmp],           %[cospi_16_64]  \n\t"\
-      "extp     %[out],                 $ac2,             31              \n\t"\
+                         /* out = dct_const_round_shift(out * cospi_16_64); */ \
+                         "mtlo     %[dct_cost_rounding],   $ac2              " \
+                         "                \n\t"                                \
+                         "mthi     $zero,                  $ac2              " \
+                         "                \n\t"                                \
+                         "madd     $ac2,                   %[tmp],           " \
+                         "%[cospi_16_64]  \n\t"                                \
+                         "extp     %[out],                 $ac2,             " \
+                         "31              \n\t"                                \
                                                                                \
-      : [tmp] "=&r" (tmp), [out] "=r" (out)                                    \
-      : [in] "r" (in),                                                         \
-        [dct_cost_rounding] "r" (dct_cost_rounding),                           \
-        [cospi_16_64] "r" (cospi_16_64)                                        \
-   );                                                                          \
-  out;                                                                    })
+                         : [tmp] "=&r"(tmp), [out] "=r"(out)                   \
+                         : [in] "r"(in),                                       \
+                           [dct_cost_rounding] "r"(dct_cost_rounding),         \
+                           [cospi_16_64] "r"(cospi_16_64));                    \
+    out;                                                                       \
+  })
 
 void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
                                    int dest_stride);
@@ -59,10 +68,8 @@ void idct8_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows);
 void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
                                  int dest_stride);
 void iadst8_dspr2(const int16_t *input, int16_t *output);
-void idct16_rows_dspr2(const int16_t *input, int16_t *output,
-                       uint32_t no_rows);
-void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
-                               int dest_stride);
+void idct16_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows);
+void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride);
 void iadst16_dspr2(const int16_t *input, int16_t *output);
 
 #endif  // #if HAVE_DSPR2
diff --git a/vpx_dsp/mips/inv_txfm_msa.h b/vpx_dsp/mips/inv_txfm_msa.h
index 8d4a831c9951c93d06a6614384cef5fd75e1990f..9597d6395fae76b1090e6e7a3ac641a50807b7ac 100644
--- a/vpx_dsp/mips/inv_txfm_msa.h
+++ b/vpx_dsp/mips/inv_txfm_msa.h
@@ -15,391 +15,392 @@
 #include "vpx_dsp/mips/txfm_macros_msa.h"
 #include "vpx_dsp/txfm_common.h"
 
-#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,               \
-                  out0, out1, out2, out3, out4, out5, out6, out7) {     \
-  v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m;                    \
-  v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m;                     \
-  v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64,  \
-    cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 };               \
-  v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64,              \
-    -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0 };                    \
-                                                                        \
-  SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m);                       \
-  cnst2_m = -cnst0_m;                                                   \
-  ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);    \
-  SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m);                       \
-  cnst4_m = -cnst2_m;                                                   \
-  ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);    \
-                                                                        \
-  ILVRL_H2_SH(in0, in7, vec1_m, vec0_m);                                \
-  ILVRL_H2_SH(in4, in3, vec3_m, vec2_m);                                \
-  DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,        \
-                        cnst1_m, cnst2_m, cnst3_m, in7, in0,            \
-                        in4, in3);                                      \
-                                                                        \
-  SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m);                       \
-  cnst2_m = -cnst0_m;                                                   \
-  ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);    \
-  SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m);                       \
-  cnst4_m = -cnst2_m;                                                   \
-  ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);    \
-                                                                        \
-  ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                \
-  ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                \
-                                                                        \
-  DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,        \
-                        cnst1_m, cnst2_m, cnst3_m, in5, in2,            \
-                        in6, in1);                                      \
-  BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5);                \
-  out7 = -s0_m;                                                         \
-  out0 = s1_m;                                                          \
-                                                                        \
-  SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5,                                    \
-               cnst0_m, cnst1_m, cnst2_m, cnst3_m);                     \
-                                                                        \
-  ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m);    \
-  cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                            \
-  cnst1_m = cnst0_m;                                                    \
-                                                                        \
-  ILVRL_H2_SH(in4, in3, vec1_m, vec0_m);                                \
-  ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                \
-  DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,        \
-                        cnst2_m, cnst3_m, cnst1_m, out1, out6,          \
-                        s0_m, s1_m);                                    \
-                                                                        \
-  SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m);                       \
-  cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                            \
-                                                                        \
-  ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                \
-  ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m);                              \
-  out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                \
-  out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);                \
-  out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m);                \
-  out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m);                \
-                                                                        \
-  out1 = -out1;                                                         \
-  out3 = -out3;                                                         \
-  out5 = -out5;                                                         \
-}
+#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
+                  out3, out4, out5, out6, out7)                              \
+  {                                                                          \
+    v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m;                       \
+    v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m;                        \
+    v8i16 coeff0_m = { cospi_2_64,  cospi_6_64,  cospi_10_64, cospi_14_64,   \
+                       cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
+    v8i16 coeff1_m = { cospi_8_64,  -cospi_8_64,  cospi_16_64, -cospi_16_64, \
+                       cospi_24_64, -cospi_24_64, 0,           0 };          \
+                                                                             \
+    SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m);                          \
+    cnst2_m = -cnst0_m;                                                      \
+    ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);       \
+    SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m);                          \
+    cnst4_m = -cnst2_m;                                                      \
+    ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);       \
+                                                                             \
+    ILVRL_H2_SH(in0, in7, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(in4, in3, vec3_m, vec2_m);                                   \
+    DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m,  \
+                          cnst2_m, cnst3_m, in7, in0, in4, in3);             \
+                                                                             \
+    SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m);                          \
+    cnst2_m = -cnst0_m;                                                      \
+    ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);       \
+    SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m);                          \
+    cnst4_m = -cnst2_m;                                                      \
+    ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);       \
+                                                                             \
+    ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                   \
+                                                                             \
+    DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m,  \
+                          cnst2_m, cnst3_m, in5, in2, in6, in1);             \
+    BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5);                   \
+    out7 = -s0_m;                                                            \
+    out0 = s1_m;                                                             \
+                                                                             \
+    SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m);  \
+                                                                             \
+    ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m);       \
+    cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                               \
+    cnst1_m = cnst0_m;                                                       \
+                                                                             \
+    ILVRL_H2_SH(in4, in3, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                   \
+    DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst2_m,  \
+                          cnst3_m, cnst1_m, out1, out6, s0_m, s1_m);         \
+                                                                             \
+    SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m);                          \
+    cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                               \
+                                                                             \
+    ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m);                                 \
+    out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                   \
+    out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);                   \
+    out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m);                   \
+    out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m);                   \
+                                                                             \
+    out1 = -out1;                                                            \
+    out3 = -out3;                                                            \
+    out5 = -out5;                                                            \
+  }
 
-#define VPX_SET_COSPI_PAIR(c0_h, c1_h) ({  \
-  v8i16 out0_m, r0_m, r1_m;                \
-                                           \
-  r0_m = __msa_fill_h(c0_h);               \
-  r1_m = __msa_fill_h(c1_h);               \
-  out0_m = __msa_ilvev_h(r1_m, r0_m);      \
-                                           \
-  out0_m;                                  \
-})
+#define VPX_SET_COSPI_PAIR(c0_h, c1_h)  \
+  ({                                    \
+    v8i16 out0_m, r0_m, r1_m;           \
+                                        \
+    r0_m = __msa_fill_h(c0_h);          \
+    r1_m = __msa_fill_h(c1_h);          \
+    out0_m = __msa_ilvev_h(r1_m, r0_m); \
+                                        \
+    out0_m;                             \
+  })
 
-#define VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) {  \
-  uint8_t *dst_m = (uint8_t *) (dst);                               \
-  v16u8 dst0_m, dst1_m, dst2_m, dst3_m;                             \
-  v16i8 tmp0_m, tmp1_m;                                             \
-  v16i8 zero_m = { 0 };                                             \
-  v8i16 res0_m, res1_m, res2_m, res3_m;                             \
-                                                                    \
-  LD_UB4(dst_m, dst_stride, dst0_m, dst1_m, dst2_m, dst3_m);        \
-  ILVR_B4_SH(zero_m, dst0_m, zero_m, dst1_m, zero_m, dst2_m,        \
-             zero_m, dst3_m, res0_m, res1_m, res2_m, res3_m);       \
-  ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3,          \
-       res0_m, res1_m, res2_m, res3_m);                             \
-  CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m);                   \
-  PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m);      \
-  ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride);                      \
-}
+#define VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3)               \
+  {                                                                            \
+    uint8_t *dst_m = (uint8_t *)(dst);                                         \
+    v16u8 dst0_m, dst1_m, dst2_m, dst3_m;                                      \
+    v16i8 tmp0_m, tmp1_m;                                                      \
+    v16i8 zero_m = { 0 };                                                      \
+    v8i16 res0_m, res1_m, res2_m, res3_m;                                      \
+                                                                               \
+    LD_UB4(dst_m, dst_stride, dst0_m, dst1_m, dst2_m, dst3_m);                 \
+    ILVR_B4_SH(zero_m, dst0_m, zero_m, dst1_m, zero_m, dst2_m, zero_m, dst3_m, \
+               res0_m, res1_m, res2_m, res3_m);                                \
+    ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, res0_m, res1_m,   \
+         res2_m, res3_m);                                                      \
+    CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m);                            \
+    PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m);               \
+    ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride);                               \
+  }
 
-#define VPX_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) {   \
-  v8i16 c0_m, c1_m, c2_m, c3_m;                                     \
-  v8i16 step0_m, step1_m;                                           \
-  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                             \
-                                                                    \
-  c0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
-  c1_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
-  step0_m = __msa_ilvr_h(in2, in0);                                 \
-  DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m);        \
-                                                                    \
-  c2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
-  c3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
-  step1_m = __msa_ilvr_h(in3, in1);                                 \
-  DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m);        \
-  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);      \
-                                                                    \
-  PCKEV_H2_SW(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp0_m, tmp2_m);      \
-  SLDI_B2_0_SW(tmp0_m, tmp2_m, tmp1_m, tmp3_m, 8);                  \
-  BUTTERFLY_4((v8i16)tmp0_m, (v8i16)tmp1_m,                         \
-              (v8i16)tmp2_m, (v8i16)tmp3_m,                         \
-              out0, out1, out2, out3);                              \
-}
+#define VPX_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3)             \
+  {                                                                         \
+    v8i16 c0_m, c1_m, c2_m, c3_m;                                           \
+    v8i16 step0_m, step1_m;                                                 \
+    v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
+                                                                            \
+    c0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);                    \
+    c1_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);                   \
+    step0_m = __msa_ilvr_h(in2, in0);                                       \
+    DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m);              \
+                                                                            \
+    c2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                    \
+    c3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                     \
+    step1_m = __msa_ilvr_h(in3, in1);                                       \
+    DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m);              \
+    SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);            \
+                                                                            \
+    PCKEV_H2_SW(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp0_m, tmp2_m);            \
+    SLDI_B2_0_SW(tmp0_m, tmp2_m, tmp1_m, tmp3_m, 8);                        \
+    BUTTERFLY_4((v8i16)tmp0_m, (v8i16)tmp1_m, (v8i16)tmp2_m, (v8i16)tmp3_m, \
+                out0, out1, out2, out3);                                    \
+  }
 
-#define VPX_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) {  \
-  v8i16 res0_m, res1_m, c0_m, c1_m;                                 \
-  v8i16 k1_m, k2_m, k3_m, k4_m;                                     \
-  v8i16 zero_m = { 0 };                                             \
-  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                             \
-  v4i32 int0_m, int1_m, int2_m, int3_m;                             \
-  v8i16 mask_m = { sinpi_1_9, sinpi_2_9, sinpi_3_9,                 \
-    sinpi_4_9, -sinpi_1_9, -sinpi_2_9, -sinpi_3_9,                  \
-    -sinpi_4_9 };                                                   \
-                                                                    \
-  SPLATI_H4_SH(mask_m, 3, 0, 1, 2, c0_m, c1_m, k1_m, k2_m);         \
-  ILVEV_H2_SH(c0_m, c1_m, k1_m, k2_m, c0_m, c1_m);                  \
-  ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m);                   \
-  DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m);          \
-  int0_m = tmp2_m + tmp1_m;                                         \
-                                                                    \
-  SPLATI_H2_SH(mask_m, 4, 7, k4_m, k3_m);                           \
-  ILVEV_H2_SH(k4_m, k1_m, k3_m, k2_m, c0_m, c1_m);                  \
-  DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m);          \
-  int1_m = tmp0_m + tmp1_m;                                         \
-                                                                    \
-  c0_m = __msa_splati_h(mask_m, 6);                                 \
-  ILVL_H2_SH(k2_m, c0_m, zero_m, k2_m, c0_m, c1_m);                 \
-  ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m);                   \
-  DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m);          \
-  int2_m = tmp0_m + tmp1_m;                                         \
-                                                                    \
-  c0_m = __msa_splati_h(mask_m, 6);                                 \
-  c0_m = __msa_ilvev_h(c0_m, k1_m);                                 \
-                                                                    \
-  res0_m = __msa_ilvr_h((in1), (in3));                              \
-  tmp0_m = __msa_dotp_s_w(res0_m, c0_m);                            \
-  int3_m = tmp2_m + tmp0_m;                                         \
-                                                                    \
-  res0_m = __msa_ilvr_h((in2), (in3));                              \
-  c1_m = __msa_ilvev_h(k4_m, k3_m);                                 \
-                                                                    \
-  tmp2_m = __msa_dotp_s_w(res0_m, c1_m);                            \
-  res1_m = __msa_ilvr_h((in0), (in2));                              \
-  c1_m = __msa_ilvev_h(k1_m, zero_m);                               \
-                                                                    \
-  tmp3_m = __msa_dotp_s_w(res1_m, c1_m);                            \
-  int3_m += tmp2_m;                                                 \
-  int3_m += tmp3_m;                                                 \
-                                                                    \
-  SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, DCT_CONST_BITS);      \
-  PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1);          \
-  PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3);          \
-}
+#define VPX_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3)       \
+  {                                                                    \
+    v8i16 res0_m, res1_m, c0_m, c1_m;                                  \
+    v8i16 k1_m, k2_m, k3_m, k4_m;                                      \
+    v8i16 zero_m = { 0 };                                              \
+    v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                              \
+    v4i32 int0_m, int1_m, int2_m, int3_m;                              \
+    v8i16 mask_m = { sinpi_1_9,  sinpi_2_9,  sinpi_3_9,  sinpi_4_9,    \
+                     -sinpi_1_9, -sinpi_2_9, -sinpi_3_9, -sinpi_4_9 }; \
+                                                                       \
+    SPLATI_H4_SH(mask_m, 3, 0, 1, 2, c0_m, c1_m, k1_m, k2_m);          \
+    ILVEV_H2_SH(c0_m, c1_m, k1_m, k2_m, c0_m, c1_m);                   \
+    ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m);                    \
+    DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m);           \
+    int0_m = tmp2_m + tmp1_m;                                          \
+                                                                       \
+    SPLATI_H2_SH(mask_m, 4, 7, k4_m, k3_m);                            \
+    ILVEV_H2_SH(k4_m, k1_m, k3_m, k2_m, c0_m, c1_m);                   \
+    DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m);           \
+    int1_m = tmp0_m + tmp1_m;                                          \
+                                                                       \
+    c0_m = __msa_splati_h(mask_m, 6);                                  \
+    ILVL_H2_SH(k2_m, c0_m, zero_m, k2_m, c0_m, c1_m);                  \
+    ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m);                    \
+    DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m);           \
+    int2_m = tmp0_m + tmp1_m;                                          \
+                                                                       \
+    c0_m = __msa_splati_h(mask_m, 6);                                  \
+    c0_m = __msa_ilvev_h(c0_m, k1_m);                                  \
+                                                                       \
+    res0_m = __msa_ilvr_h((in1), (in3));                               \
+    tmp0_m = __msa_dotp_s_w(res0_m, c0_m);                             \
+    int3_m = tmp2_m + tmp0_m;                                          \
+                                                                       \
+    res0_m = __msa_ilvr_h((in2), (in3));                               \
+    c1_m = __msa_ilvev_h(k4_m, k3_m);                                  \
+                                                                       \
+    tmp2_m = __msa_dotp_s_w(res0_m, c1_m);                             \
+    res1_m = __msa_ilvr_h((in0), (in2));                               \
+    c1_m = __msa_ilvev_h(k1_m, zero_m);                                \
+                                                                       \
+    tmp3_m = __msa_dotp_s_w(res1_m, c1_m);                             \
+    int3_m += tmp2_m;                                                  \
+    int3_m += tmp3_m;                                                  \
+                                                                       \
+    SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, DCT_CONST_BITS);       \
+    PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1);           \
+    PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3);           \
+  }
 
-#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) ({  \
-  v8i16 c0_m, c1_m;                                    \
-                                                       \
-  SPLATI_H2_SH(mask_h, idx1_h, idx2_h, c0_m, c1_m);    \
-  c0_m = __msa_ilvev_h(c1_m, c0_m);                    \
-                                                       \
-  c0_m;                                                \
-})
+#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h)    \
+  ({                                                  \
+    v8i16 c0_m, c1_m;                                 \
+                                                      \
+    SPLATI_H2_SH(mask_h, idx1_h, idx2_h, c0_m, c1_m); \
+    c0_m = __msa_ilvev_h(c1_m, c0_m);                 \
+                                                      \
+    c0_m;                                             \
+  })
 
 /* multiply and add macro */
-#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3,        \
-                 out0, out1, out2, out3) {                              \
-  v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;                     \
-  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                 \
-                                                                        \
-  ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m);                        \
-  ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m);                        \
-  DOTP_SH4_SW(madd_s1_m, madd_s0_m, madd_s1_m, madd_s0_m,               \
-              cst0, cst0, cst1, cst1, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
-  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);          \
-  PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1);              \
-  DOTP_SH4_SW(madd_s3_m, madd_s2_m, madd_s3_m, madd_s2_m,               \
-              cst2, cst2, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
-  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);          \
-  PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3);              \
-}
+#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1,  \
+                 out2, out3)                                                  \
+  {                                                                           \
+    v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;                         \
+    v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                     \
+                                                                              \
+    ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m);                            \
+    ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m);                            \
+    DOTP_SH4_SW(madd_s1_m, madd_s0_m, madd_s1_m, madd_s0_m, cst0, cst0, cst1, \
+                cst1, tmp0_m, tmp1_m, tmp2_m, tmp3_m);                        \
+    SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);              \
+    PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1);                  \
+    DOTP_SH4_SW(madd_s3_m, madd_s2_m, madd_s3_m, madd_s2_m, cst2, cst2, cst3, \
+                cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m);                        \
+    SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);              \
+    PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3);                  \
+  }
 
 /* idct 8x8 macro */
-#define VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,               \
-                       out0, out1, out2, out3, out4, out5, out6, out7) {     \
-  v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m;              \
-  v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m;              \
-  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                      \
-  v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64,        \
-    cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 };                  \
-                                                                             \
-  k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5);                                   \
-  k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0);                                   \
-  k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3);                                   \
-  k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2);                                   \
-  VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5);  \
-  SUB2(in1, in3, in7, in5, res0_m, res1_m);                                  \
-  k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7);                                   \
-  k1_m = __msa_splati_h(mask_m, 4);                                          \
-                                                                             \
-  ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m);                               \
-  DOTP_SH4_SW(res2_m, res3_m, res2_m, res3_m, k0_m, k0_m, k1_m, k1_m,        \
-              tmp0_m, tmp1_m, tmp2_m, tmp3_m);                               \
-  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);               \
-  tp4_m = in1 + in3;                                                         \
-  PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m);                 \
-  tp7_m = in7 + in5;                                                         \
-  k2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                       \
-  k3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                        \
-  VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m,                       \
-           in0, in4, in2, in6);                                              \
-  BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m);               \
-  BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m,        \
-              out0, out1, out2, out3, out4, out5, out6, out7);               \
-}
+#define VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1,    \
+                       out2, out3, out4, out5, out6, out7)                    \
+  {                                                                           \
+    v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m;             \
+    v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m;             \
+    v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                     \
+    v8i16 mask_m = { cospi_28_64, cospi_4_64,  cospi_20_64,  cospi_12_64,     \
+                     cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 };  \
+                                                                              \
+    k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5);                                  \
+    k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0);                                  \
+    k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3);                                  \
+    k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2);                                  \
+    VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
+    SUB2(in1, in3, in7, in5, res0_m, res1_m);                                 \
+    k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7);                                  \
+    k1_m = __msa_splati_h(mask_m, 4);                                         \
+                                                                              \
+    ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m);                              \
+    DOTP_SH4_SW(res2_m, res3_m, res2_m, res3_m, k0_m, k0_m, k1_m, k1_m,       \
+                tmp0_m, tmp1_m, tmp2_m, tmp3_m);                              \
+    SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);              \
+    tp4_m = in1 + in3;                                                        \
+    PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m);                \
+    tp7_m = in7 + in5;                                                        \
+    k2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                      \
+    k3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                       \
+    VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, in0, in4, in2, in6); \
+    BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m);              \
+    BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, out0, \
+                out1, out2, out3, out4, out5, out6, out7);                    \
+  }
 
-#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,            \
-                        out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m;                    \
-  v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m;                                \
-  v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1;          \
-  v8i16 mask1_m = { cospi_2_64, cospi_30_64, -cospi_2_64,                  \
-    cospi_10_64, cospi_22_64, -cospi_10_64, cospi_18_64, cospi_14_64 };    \
-  v8i16 mask2_m = { cospi_14_64, -cospi_18_64, cospi_26_64,                \
-    cospi_6_64, -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 };      \
-  v8i16 mask3_m = { -cospi_24_64, cospi_8_64, cospi_16_64,                 \
-    -cospi_16_64, 0, 0, 0, 0 };                                            \
-                                                                           \
-  k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1);                                \
-  k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2);                                \
-  ILVRL_H2_SH(in1, in0, in_s1, in_s0);                                     \
-  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
-              r0_m, r1_m, r2_m, r3_m);                                     \
-  k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7);                                \
-  k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1);                                \
-  ILVRL_H2_SH(in5, in4, in_s1, in_s0);                                     \
-  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
-              r4_m, r5_m, r6_m, r7_m);                                     \
-  ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
-       m0_m, m1_m, m2_m, m3_m);                                            \
-  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
-  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res0_m, res1_m);                     \
-  SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
-       m0_m, m1_m, m2_m, m3_m);                                            \
-  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
-  PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m);                         \
-  k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4);                                \
-  k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5);                                \
-  ILVRL_H2_SH(in3, in2, in_s1, in_s0);                                     \
-  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
-              r0_m, r1_m, r2_m, r3_m);                                     \
-  k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3);                                \
-  k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4);                                \
-  ILVRL_H2_SH(in7, in6, in_s1, in_s0);                                     \
-  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
-              r4_m, r5_m, r6_m, r7_m);                                     \
-  ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
-       m0_m, m1_m, m2_m, m3_m);                                            \
-  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
-  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res2_m, res3_m);                     \
-  SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
-       m0_m, m1_m, m2_m, m3_m);                                            \
-  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
-  PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m);                         \
-  ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m);                                     \
-  BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3);        \
-  k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6);                                \
-  k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7);                                \
-  ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0);                                   \
-  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
-              r0_m, r1_m, r2_m, r3_m);                                     \
-  k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1);                                \
-  DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m,              \
-              r4_m, r5_m, r6_m, r7_m);                                     \
-  ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m,                     \
-       m0_m, m1_m, m2_m, m3_m);                                            \
-  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
-  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in1, out6);                          \
-  SUB4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m,                     \
-       m0_m, m1_m, m2_m, m3_m);                                            \
-  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
-  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5);                           \
-  k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2);                                \
-  k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3);                                \
-  ILVRL_H2_SH(in4, in3, in_s1, in_s0);                                     \
-  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
-              m0_m, m1_m, m2_m, m3_m);                                     \
-  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
-  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in3, out4);                          \
-  ILVRL_H2_SW(in5, in2, m2_m, m3_m);                                       \
-  DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m,              \
-              m0_m, m1_m, m2_m, m3_m);                                     \
-  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
-  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, out2, in5);                          \
-                                                                           \
-  out1 = -in1;                                                             \
-  out3 = -in3;                                                             \
-  out5 = -in5;                                                             \
-  out7 = -in7;                                                             \
-}
+#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1,   \
+                        out2, out3, out4, out5, out6, out7)                   \
+  {                                                                           \
+    v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m;                     \
+    v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m;                                 \
+    v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1;           \
+    v8i16 mask1_m = { cospi_2_64,  cospi_30_64,  -cospi_2_64, cospi_10_64,    \
+                      cospi_22_64, -cospi_10_64, cospi_18_64, cospi_14_64 };  \
+    v8i16 mask2_m = { cospi_14_64,  -cospi_18_64, cospi_26_64, cospi_6_64,    \
+                      -cospi_26_64, cospi_8_64,   cospi_24_64, -cospi_8_64 }; \
+    v8i16 mask3_m = {                                                         \
+      -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0         \
+    };                                                                        \
+                                                                              \
+    k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1);                                 \
+    k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2);                                 \
+    ILVRL_H2_SH(in1, in0, in_s1, in_s0);                                      \
+    DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m,     \
+                r1_m, r2_m, r3_m);                                            \
+    k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7);                                 \
+    k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1);                                 \
+    ILVRL_H2_SH(in5, in4, in_s1, in_s0);                                      \
+    DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r4_m,     \
+                r5_m, r6_m, r7_m);                                            \
+    ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m,    \
+         m3_m);                                                               \
+    SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
+    PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res0_m, res1_m);                      \
+    SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m,    \
+         m3_m);                                                               \
+    SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
+    PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m);                          \
+    k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4);                                 \
+    k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5);                                 \
+    ILVRL_H2_SH(in3, in2, in_s1, in_s0);                                      \
+    DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m,     \
+                r1_m, r2_m, r3_m);                                            \
+    k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3);                                 \
+    k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4);                                 \
+    ILVRL_H2_SH(in7, in6, in_s1, in_s0);                                      \
+    DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r4_m,     \
+                r5_m, r6_m, r7_m);                                            \
+    ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m,    \
+         m3_m);                                                               \
+    SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
+    PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res2_m, res3_m);                      \
+    SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m,    \
+         m3_m);                                                               \
+    SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
+    PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m);                          \
+    ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m);                                      \
+    BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3);         \
+    k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6);                                 \
+    k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7);                                 \
+    ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0);                                    \
+    DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m,     \
+                r1_m, r2_m, r3_m);                                            \
+    k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1);                                 \
+    DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, r4_m, r5_m,   \
+                r6_m, r7_m);                                                  \
+    ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, m0_m, m1_m, m2_m,    \
+         m3_m);                                                               \
+    SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
+    PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in1, out6);                           \
+    SUB4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, m0_m, m1_m, m2_m,    \
+         m3_m);                                                               \
+    SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
+    PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5);                            \
+    k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2);                                 \
+    k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3);                                 \
+    ILVRL_H2_SH(in4, in3, in_s1, in_s0);                                      \
+    DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, m0_m,     \
+                m1_m, m2_m, m3_m);                                            \
+    SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
+    PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in3, out4);                           \
+    ILVRL_H2_SW(in5, in2, m2_m, m3_m);                                        \
+    DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, m0_m, m1_m,   \
+                m2_m, m3_m);                                                  \
+    SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                      \
+    PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, out2, in5);                           \
+                                                                              \
+    out1 = -in1;                                                              \
+    out3 = -in3;                                                              \
+    out5 = -in5;                                                              \
+    out7 = -in7;                                                              \
+  }
 
-#define VPX_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8,        \
-                         r9, r10, r11, r12, r13, r14, r15,          \
-                         out0, out1, out2, out3, out4, out5,        \
-                         out6, out7, out8, out9, out10, out11,      \
-                         out12, out13, out14, out15) {              \
-  v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m;             \
-  v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m;       \
-  v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m;             \
-  v8i16 h8_m, h9_m, h10_m, h11_m;                                   \
-  v8i16 k0_m, k1_m, k2_m, k3_m;                                     \
-                                                                    \
-  /* stage 1 */                                                     \
-  k0_m = VPX_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);               \
-  k1_m = VPX_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);              \
-  k2_m = VPX_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);              \
-  k3_m = VPX_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);             \
-  MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m,                  \
-          g0_m, g1_m, g2_m, g3_m);                                  \
-  k0_m = VPX_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);               \
-  k1_m = VPX_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);              \
-  k2_m = VPX_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);              \
-  k3_m = VPX_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);             \
-  MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m,                 \
-          g4_m, g5_m, g6_m, g7_m);                                  \
-  k0_m = VPX_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);               \
-  k1_m = VPX_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);              \
-  k2_m = VPX_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);               \
-  k3_m = VPX_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);              \
-  MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m,                 \
-          g8_m, g9_m, g10_m, g11_m);                                \
-  k0_m = VPX_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);              \
-  k1_m = VPX_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);             \
-  k2_m = VPX_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);               \
-  k3_m = VPX_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);              \
-  MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m,                  \
-          g12_m, g13_m, g14_m, g15_m);                              \
-                                                                    \
-  /* stage 2 */                                                     \
-  k0_m = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);               \
-  k1_m = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);              \
-  k2_m = VPX_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);              \
-  MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m,          \
-          h0_m, h1_m, h2_m, h3_m);                                  \
-  k0_m = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);              \
-  k1_m = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);             \
-  k2_m = VPX_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);             \
-  MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m,         \
-          h4_m, h5_m, h6_m, h7_m);                                  \
-  BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10);    \
-  BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m,    \
-              h8_m, h9_m, h10_m, h11_m, h6_m, h4_m, h2_m, h0_m);    \
-                                                                    \
-  /* stage 3 */                                                     \
-  BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m);  \
-  k0_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
-  k1_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
-  k2_m = VPX_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);              \
-  MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m,           \
-          out4, out6, out5, out7);                                  \
-  MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m,           \
-          out12, out14, out13, out15);                              \
-                                                                    \
-  /* stage 4 */                                                     \
-  k0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
-  k1_m = VPX_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);            \
-  k2_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
-  k3_m = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);             \
-  MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3);                 \
-  MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7);                   \
-  MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11);               \
-  MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15);               \
-}
+#define VPX_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11,     \
+                         r12, r13, r14, r15, out0, out1, out2, out3, out4,     \
+                         out5, out6, out7, out8, out9, out10, out11, out12,    \
+                         out13, out14, out15)                                  \
+  {                                                                            \
+    v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m;                      \
+    v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m;                \
+    v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m;                      \
+    v8i16 h8_m, h9_m, h10_m, h11_m;                                            \
+    v8i16 k0_m, k1_m, k2_m, k3_m;                                              \
+                                                                               \
+    /* stage 1 */                                                              \
+    k0_m = VPX_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);                        \
+    k1_m = VPX_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);                       \
+    k2_m = VPX_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);                       \
+    k3_m = VPX_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);                      \
+    MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m, g0_m, g1_m, g2_m, g3_m);  \
+    k0_m = VPX_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);                        \
+    k1_m = VPX_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);                       \
+    k2_m = VPX_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);                       \
+    k3_m = VPX_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);                      \
+    MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m, g4_m, g5_m, g6_m, g7_m); \
+    k0_m = VPX_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);                        \
+    k1_m = VPX_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);                       \
+    k2_m = VPX_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);                        \
+    k3_m = VPX_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);                       \
+    MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m, g8_m, g9_m, g10_m,       \
+            g11_m);                                                            \
+    k0_m = VPX_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);                       \
+    k1_m = VPX_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);                      \
+    k2_m = VPX_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);                        \
+    k3_m = VPX_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);                       \
+    MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m, g12_m, g13_m, g14_m,      \
+            g15_m);                                                            \
+                                                                               \
+    /* stage 2 */                                                              \
+    k0_m = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);                        \
+    k1_m = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);                       \
+    k2_m = VPX_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);                       \
+    MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m, h0_m, h1_m, h2_m, \
+            h3_m);                                                             \
+    k0_m = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);                       \
+    k1_m = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);                      \
+    k2_m = VPX_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);                      \
+    MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m, h4_m, h5_m,      \
+            h6_m, h7_m);                                                       \
+    BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10);             \
+    BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m, h8_m, h9_m, \
+                h10_m, h11_m, h6_m, h4_m, h2_m, h0_m);                         \
+                                                                               \
+    /* stage 3 */                                                              \
+    BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m);           \
+    k0_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                        \
+    k1_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                       \
+    k2_m = VPX_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);                       \
+    MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m, out4, out6, out5,  \
+            out7);                                                             \
+    MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m, out12, out14,      \
+            out13, out15);                                                     \
+                                                                               \
+    /* stage 4 */                                                              \
+    k0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);                       \
+    k1_m = VPX_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);                     \
+    k2_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);                      \
+    k3_m = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);                      \
+    MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3);                          \
+    MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7);                            \
+    MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11);                        \
+    MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15);                        \
+  }
 
 void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
                                       int32_t dst_stride);
diff --git a/vpx_dsp/mips/itrans16_dspr2.c b/vpx_dsp/mips/itrans16_dspr2.c
index 6d41e6190b78c032023aa851efc9c65c71c7c8f1..0ec0c2059f4463c6c84185892f9e4ec2d9375a8f 100644
--- a/vpx_dsp/mips/itrans16_dspr2.c
+++ b/vpx_dsp/mips/itrans16_dspr2.c
@@ -26,11 +26,11 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output,
   int result1, result2, result3, result4;
   const int const_2_power_13 = 8192;
 
-  for (i = no_rows; i--; ) {
+  for (i = no_rows; i--;) {
     /* prefetch row */
     prefetch_load((const uint8_t *)(input + 16));
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lh       %[load1],              0(%[input])                    \n\t"
         "lh       %[load2],             16(%[input])                    \n\t"
         "lh       %[load3],              8(%[input])                    \n\t"
@@ -64,19 +64,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output,
         "sub      %[step1_2],           %[step2_1],     %[step2_2]      \n\t"
         "sub      %[step1_3],           %[step2_0],     %[step2_3]      \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [step2_0] "=&r" (step2_0), [step2_1] "=&r" (step2_1),
-          [step2_2] "=&r" (step2_2), [step2_3] "=&r" (step2_3),
-          [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1),
-          [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [step2_0] "=&r"(step2_0),
+          [step2_1] "=&r"(step2_1), [step2_2] "=&r"(step2_2),
+          [step2_3] "=&r"(step2_3), [step1_0] "=r"(step1_0),
+          [step1_1] "=r"(step1_1), [step1_2] "=r"(step1_2),
+          [step1_3] "=r"(step1_3)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64),
+          [cospi_16_64] "r"(cospi_16_64));
+
+    __asm__ __volatile__(
         "lh       %[load5],             2(%[input])                     \n\t"
         "lh       %[load6],             30(%[input])                    \n\t"
         "lh       %[load7],             18(%[input])                    \n\t"
@@ -126,19 +125,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step2_8],           %[result1],     %[result2]      \n\t"
         "add      %[step2_15],          %[result4],     %[result3]      \n\t"
 
-        : [load5] "=&r" (load5), [load6] "=&r" (load6),
-          [load7] "=&r" (load7), [load8] "=&r" (load8),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [result3] "=&r" (result3), [result4] "=&r" (result4),
-          [step2_8] "=r" (step2_8), [step2_15] "=r" (step2_15),
-          [step2_9] "=r" (step2_9), [step2_14] "=r" (step2_14)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64),
-          [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7),
+          [load8] "=&r"(load8), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [result3] "=&r"(result3),
+          [result4] "=&r"(result4), [step2_8] "=r"(step2_8),
+          [step2_15] "=r"(step2_15), [step2_9] "=r"(step2_9),
+          [step2_14] "=r"(step2_14)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_30_64] "r"(cospi_30_64), [cospi_2_64] "r"(cospi_2_64),
+          [cospi_14_64] "r"(cospi_14_64), [cospi_18_64] "r"(cospi_18_64),
+          [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             10(%[input])                    \n\t"
         "lh       %[load2],             22(%[input])                    \n\t"
         "lh       %[load3],             26(%[input])                    \n\t"
@@ -188,19 +186,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step2_11],          %[result1],     %[result2]      \n\t"
         "add      %[step2_12],          %[result4],     %[result3]      \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [result3] "=&r" (result3), [result4] "=&r" (result4),
-          [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11),
-          [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64),
-          [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [result3] "=&r"(result3),
+          [result4] "=&r"(result4), [step2_10] "=r"(step2_10),
+          [step2_11] "=r"(step2_11), [step2_12] "=r"(step2_12),
+          [step2_13] "=r"(step2_13)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_22_64] "r"(cospi_22_64), [cospi_10_64] "r"(cospi_10_64),
+          [cospi_6_64] "r"(cospi_6_64), [cospi_26_64] "r"(cospi_26_64),
+          [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64));
+
+    __asm__ __volatile__(
         "lh       %[load5],             4(%[input])                     \n\t"
         "lh       %[load6],             28(%[input])                    \n\t"
         "lh       %[load7],             20(%[input])                    \n\t"
@@ -253,19 +250,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step1_4],           %[result1],     %[result2]      \n\t"
         "add      %[step1_7],           %[result4],     %[result3]      \n\t"
 
-        : [load5] "=&r" (load5), [load6] "=&r" (load6),
-          [load7] "=&r" (load7), [load8] "=&r" (load8),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [result3] "=&r" (result3), [result4] "=&r" (result4),
-          [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5),
-          [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7),
+          [load8] "=&r"(load8), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [result3] "=&r"(result3),
+          [result4] "=&r"(result4), [step1_4] "=r"(step1_4),
+          [step1_5] "=r"(step1_5), [step1_6] "=r"(step1_6),
+          [step1_7] "=r"(step1_7)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_20_64] "r"(cospi_20_64), [cospi_12_64] "r"(cospi_12_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64),
+          [cospi_16_64] "r"(cospi_16_64));
+
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "mtlo     %[const_2_power_13],  $ac1                            \n\t"
@@ -305,18 +301,16 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output,
         "extp     %[step1_11],          $ac2,           31              \n\t"
         "extp     %[step1_12],          $ac3,           31              \n\t"
 
-        : [load5] "=&r" (load5), [load6] "=&r" (load6),
-          [step1_10] "=r" (step1_10), [step1_11] "=r" (step1_11),
-          [step1_12] "=r" (step1_12), [step1_13] "=r" (step1_13)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_14] "r" (step2_14), [step2_13] "r" (step2_13),
-          [step2_9] "r" (step2_9), [step2_10] "r" (step2_10),
-          [step2_15] "r" (step2_15), [step2_12] "r" (step2_12),
-          [step2_8] "r" (step2_8), [step2_11] "r" (step2_11),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load5] "=&r"(load5), [load6] "=&r"(load6), [step1_10] "=r"(step1_10),
+          [step1_11] "=r"(step1_11), [step1_12] "=r"(step1_12),
+          [step1_13] "=r"(step1_13)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_14] "r"(step2_14),
+          [step2_13] "r"(step2_13), [step2_9] "r"(step2_9),
+          [step2_10] "r"(step2_10), [step2_15] "r"(step2_15),
+          [step2_12] "r"(step2_12), [step2_8] "r"(step2_8),
+          [step2_11] "r"(step2_11), [cospi_16_64] "r"(cospi_16_64));
+
+    __asm__ __volatile__(
         "add      %[load5],             %[step1_0],     %[step1_7]      \n\t"
         "add      %[load5],             %[load5],       %[step2_12]     \n\t"
         "add      %[load5],             %[load5],       %[step2_15]     \n\t"
@@ -350,17 +344,15 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output,
         "sh       %[load5],             448(%[output])                  \n\t"
         "sh       %[load6],             480(%[output])                  \n\t"
 
-        : [load5] "=&r" (load5), [load6] "=&r" (load6)
-        : [output] "r" (output),
-          [step1_0] "r" (step1_0), [step1_1] "r" (step1_1),
-          [step1_6] "r" (step1_6), [step1_7] "r" (step1_7),
-          [step2_8] "r" (step2_8), [step2_9] "r" (step2_9),
-          [step2_10] "r" (step2_10), [step2_11] "r" (step2_11),
-          [step2_12] "r" (step2_12), [step2_13] "r" (step2_13),
-          [step2_14] "r" (step2_14), [step2_15] "r" (step2_15)
-    );
-
-    __asm__ __volatile__ (
+        : [load5] "=&r"(load5), [load6] "=&r"(load6)
+        : [output] "r"(output), [step1_0] "r"(step1_0), [step1_1] "r"(step1_1),
+          [step1_6] "r"(step1_6), [step1_7] "r"(step1_7),
+          [step2_8] "r"(step2_8), [step2_9] "r"(step2_9),
+          [step2_10] "r"(step2_10), [step2_11] "r"(step2_11),
+          [step2_12] "r"(step2_12), [step2_13] "r"(step2_13),
+          [step2_14] "r"(step2_14), [step2_15] "r"(step2_15));
+
+    __asm__ __volatile__(
         "add      %[load5],             %[step1_2],     %[step1_5]      \n\t"
         "add      %[load5],             %[load5],       %[step1_13]     \n\t"
         "add      %[load6],             %[step1_3],     %[step1_4]      \n\t"
@@ -386,21 +378,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output,
         "sh       %[load5],             384(%[output])                  \n\t"
         "sh       %[load6],             416(%[output])                  \n\t"
 
-        : [load5] "=&r" (load5), [load6] "=&r" (load6)
-        : [output] "r" (output),
-          [step1_2] "r" (step1_2), [step1_3] "r" (step1_3),
-          [step1_4] "r" (step1_4), [step1_5] "r" (step1_5),
-          [step1_10] "r" (step1_10), [step1_11] "r" (step1_11),
-          [step1_12] "r" (step1_12), [step1_13] "r" (step1_13)
-    );
+        : [load5] "=&r"(load5), [load6] "=&r"(load6)
+        : [output] "r"(output), [step1_2] "r"(step1_2), [step1_3] "r"(step1_3),
+          [step1_4] "r"(step1_4), [step1_5] "r"(step1_5),
+          [step1_10] "r"(step1_10), [step1_11] "r"(step1_11),
+          [step1_12] "r"(step1_12), [step1_13] "r"(step1_13));
 
     input += 16;
     output += 1;
   }
 }
 
-void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
-                               int dest_stride) {
+void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride) {
   int i;
   int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7;
   int step1_8, step1_9, step1_10, step1_11;
@@ -416,9 +405,9 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
 
   /* prefetch vpx_ff_cropTbl */
   prefetch_load(vpx_ff_cropTbl);
-  prefetch_load(vpx_ff_cropTbl +  32);
-  prefetch_load(vpx_ff_cropTbl +  64);
-  prefetch_load(vpx_ff_cropTbl +  96);
+  prefetch_load(vpx_ff_cropTbl + 32);
+  prefetch_load(vpx_ff_cropTbl + 64);
+  prefetch_load(vpx_ff_cropTbl + 96);
   prefetch_load(vpx_ff_cropTbl + 128);
   prefetch_load(vpx_ff_cropTbl + 160);
   prefetch_load(vpx_ff_cropTbl + 192);
@@ -426,7 +415,7 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
 
   for (i = 0; i < 16; ++i) {
     dest_pix = (dest + i);
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lh       %[load1],              0(%[input])                    \n\t"
         "lh       %[load2],             16(%[input])                    \n\t"
         "lh       %[load3],              8(%[input])                    \n\t"
@@ -460,19 +449,18 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "sub      %[step1_2],           %[step2_1],     %[step2_2]      \n\t"
         "sub      %[step1_3],           %[step2_0],     %[step2_3]      \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [step2_0] "=&r" (step2_0), [step2_1] "=&r" (step2_1),
-          [step2_2] "=&r" (step2_2), [step2_3] "=&r" (step2_3),
-          [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1),
-          [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [step2_0] "=&r"(step2_0),
+          [step2_1] "=&r"(step2_1), [step2_2] "=&r"(step2_2),
+          [step2_3] "=&r"(step2_3), [step1_0] "=r"(step1_0),
+          [step1_1] "=r"(step1_1), [step1_2] "=r"(step1_2),
+          [step1_3] "=r"(step1_3)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64),
+          [cospi_16_64] "r"(cospi_16_64));
+
+    __asm__ __volatile__(
         "lh       %[load5],             2(%[input])                     \n\t"
         "lh       %[load6],             30(%[input])                    \n\t"
         "lh       %[load7],             18(%[input])                    \n\t"
@@ -522,19 +510,18 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step2_8],           %[result1],     %[result2]      \n\t"
         "add      %[step2_15],          %[result4],     %[result3]      \n\t"
 
-        : [load5] "=&r" (load5), [load6] "=&r" (load6),
-          [load7] "=&r" (load7), [load8] "=&r" (load8),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [result3] "=&r" (result3), [result4] "=&r" (result4),
-          [step2_8] "=r" (step2_8), [step2_15] "=r" (step2_15),
-          [step2_9] "=r" (step2_9), [step2_14] "=r" (step2_14)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64),
-          [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7),
+          [load8] "=&r"(load8), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [result3] "=&r"(result3),
+          [result4] "=&r"(result4), [step2_8] "=r"(step2_8),
+          [step2_15] "=r"(step2_15), [step2_9] "=r"(step2_9),
+          [step2_14] "=r"(step2_14)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_30_64] "r"(cospi_30_64), [cospi_2_64] "r"(cospi_2_64),
+          [cospi_14_64] "r"(cospi_14_64), [cospi_18_64] "r"(cospi_18_64),
+          [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             10(%[input])                    \n\t"
         "lh       %[load2],             22(%[input])                    \n\t"
         "lh       %[load3],             26(%[input])                    \n\t"
@@ -584,19 +571,18 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step2_11],          %[result1],     %[result2]      \n\t"
         "add      %[step2_12],          %[result4],     %[result3]      \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [result3] "=&r" (result3), [result4] "=&r" (result4),
-          [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11),
-          [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64),
-          [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [result3] "=&r"(result3),
+          [result4] "=&r"(result4), [step2_10] "=r"(step2_10),
+          [step2_11] "=r"(step2_11), [step2_12] "=r"(step2_12),
+          [step2_13] "=r"(step2_13)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_22_64] "r"(cospi_22_64), [cospi_10_64] "r"(cospi_10_64),
+          [cospi_6_64] "r"(cospi_6_64), [cospi_26_64] "r"(cospi_26_64),
+          [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64));
+
+    __asm__ __volatile__(
         "lh       %[load5],             4(%[input])                   \n\t"
         "lh       %[load6],             28(%[input])                  \n\t"
         "lh       %[load7],             20(%[input])                  \n\t"
@@ -650,19 +636,18 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step1_4],           %[result1],     %[result2]      \n\t"
         "add      %[step1_7],           %[result4],     %[result3]      \n\t"
 
-        : [load5] "=&r" (load5), [load6] "=&r" (load6),
-          [load7] "=&r" (load7), [load8] "=&r" (load8),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [result3] "=&r" (result3), [result4] "=&r" (result4),
-          [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5),
-          [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7),
+          [load8] "=&r"(load8), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [result3] "=&r"(result3),
+          [result4] "=&r"(result4), [step1_4] "=r"(step1_4),
+          [step1_5] "=r"(step1_5), [step1_6] "=r"(step1_6),
+          [step1_7] "=r"(step1_7)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_20_64] "r"(cospi_20_64), [cospi_12_64] "r"(cospi_12_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64),
+          [cospi_16_64] "r"(cospi_16_64));
+
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "mtlo     %[const_2_power_13],  $ac1                            \n\t"
@@ -702,23 +687,21 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "extp     %[step1_11],          $ac2,           31              \n\t"
         "extp     %[step1_12],          $ac3,           31              \n\t"
 
-        : [load5] "=&r" (load5), [load6] "=&r" (load6),
-          [step1_10] "=r" (step1_10), [step1_11] "=r" (step1_11),
-          [step1_12] "=r" (step1_12), [step1_13] "=r" (step1_13)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_14] "r" (step2_14), [step2_13] "r" (step2_13),
-          [step2_9] "r" (step2_9), [step2_10] "r" (step2_10),
-          [step2_15] "r" (step2_15), [step2_12] "r" (step2_12),
-          [step2_8] "r" (step2_8), [step2_11] "r" (step2_11),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [load5] "=&r"(load5), [load6] "=&r"(load6), [step1_10] "=r"(step1_10),
+          [step1_11] "=r"(step1_11), [step1_12] "=r"(step1_12),
+          [step1_13] "=r"(step1_13)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_14] "r"(step2_14),
+          [step2_13] "r"(step2_13), [step2_9] "r"(step2_9),
+          [step2_10] "r"(step2_10), [step2_15] "r"(step2_15),
+          [step2_12] "r"(step2_12), [step2_8] "r"(step2_8),
+          [step2_11] "r"(step2_11), [cospi_16_64] "r"(cospi_16_64));
 
     step1_8 = step2_8 + step2_11;
     step1_9 = step2_9 + step2_10;
     step1_14 = step2_13 + step2_14;
     step1_15 = step2_12 + step2_15;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lbu      %[load7],         0(%[dest_pix])                      \n\t"
         "add      %[load5],         %[step1_0],         %[step1_7]      \n\t"
         "add      %[load5],         %[load5],           %[step1_15]     \n\t"
@@ -870,18 +853,16 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "lbux     %[load6],         %[load8](%[cm])                     \n\t"
         "sb       %[load6],         0(%[dest_pix])                      \n\t"
 
-        : [load5] "=&r" (load5), [load6] "=&r" (load6), [load7] "=&r" (load7),
-          [load8] "=&r" (load8), [dest_pix] "+r" (dest_pix)
-        : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
-          [step1_0] "r" (step1_0), [step1_1] "r" (step1_1),
-          [step1_2] "r" (step1_2), [step1_3] "r" (step1_3),
-          [step1_4] "r" (step1_4), [step1_5] "r" (step1_5),
-          [step1_6] "r" (step1_6), [step1_7] "r" (step1_7),
-          [step1_8] "r" (step1_8), [step1_9] "r" (step1_9),
-          [step1_10] "r" (step1_10), [step1_11] "r" (step1_11),
-          [step1_12] "r" (step1_12), [step1_13] "r" (step1_13),
-          [step1_14] "r" (step1_14), [step1_15] "r" (step1_15)
-    );
+        : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7),
+          [load8] "=&r"(load8), [dest_pix] "+r"(dest_pix)
+        :
+        [cm] "r"(cm), [dest_stride] "r"(dest_stride), [step1_0] "r"(step1_0),
+        [step1_1] "r"(step1_1), [step1_2] "r"(step1_2), [step1_3] "r"(step1_3),
+        [step1_4] "r"(step1_4), [step1_5] "r"(step1_5), [step1_6] "r"(step1_6),
+        [step1_7] "r"(step1_7), [step1_8] "r"(step1_8), [step1_9] "r"(step1_9),
+        [step1_10] "r"(step1_10), [step1_11] "r"(step1_11),
+        [step1_12] "r"(step1_12), [step1_13] "r"(step1_13),
+        [step1_14] "r"(step1_14), [step1_15] "r"(step1_15));
 
     input += 16;
   }
@@ -889,15 +870,11 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
 
 void vpx_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
                                  int dest_stride) {
-  DECLARE_ALIGNED(32, int16_t,  out[16 * 16]);
+  DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp    %[pos],    1    \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp    %[pos],    1    \n\t" : : [pos] "r"(pos));
 
   // First transform rows
   idct16_rows_dspr2(input, out, 16);
@@ -908,17 +885,13 @@ void vpx_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest,
 
 void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest,
                                 int dest_stride) {
-  DECLARE_ALIGNED(32, int16_t,  out[16 * 16]);
+  DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   int16_t *outptr = out;
   uint32_t i;
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp    %[pos],    1    \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp    %[pos],    1    \n\t" : : [pos] "r"(pos));
 
   // First transform rows. Since all non-zero dct coefficients are in
   // upper-left 4x4 area, we only need to calculate first 4 rows here.
@@ -926,7 +899,7 @@ void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest,
 
   outptr += 4;
   for (i = 0; i < 6; ++i) {
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sw     $zero,    0(%[outptr])     \n\t"
         "sw     $zero,   32(%[outptr])     \n\t"
         "sw     $zero,   64(%[outptr])     \n\t"
@@ -945,8 +918,7 @@ void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest,
         "sw     $zero,  480(%[outptr])     \n\t"
 
         :
-        : [outptr] "r" (outptr)
-    );
+        : [outptr] "r"(outptr));
 
     outptr += 2;
   }
@@ -966,35 +938,31 @@ void vpx_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest,
   int32_t vector_1, vector_2, vector_3, vector_4;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
 
-    :
-    : [pos] "r" (pos)
-  );
+                       :
+                       : [pos] "r"(pos));
 
   out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input[0]);
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "addi     %[out],     %[out],     32      \n\t"
       "sra      %[a1],      %[out],     6       \n\t"
 
-      : [out] "+r" (out), [a1] "=r" (a1)
-      :
-  );
+      : [out] "+r"(out), [a1] "=r"(a1)
+      :);
 
   if (a1 < 0) {
     /* use quad-byte
      * input and output memory are four byte aligned */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "abs        %[absa1],       %[a1]       \n\t"
         "replv.qb   %[vector_a1],   %[absa1]    \n\t"
 
-        : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
-        : [a1] "r" (a1)
-    );
+        : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1)
+        : [a1] "r"(a1));
 
     for (r = 16; r--;) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "lw             %[t1],          0(%[dest])                      \n\t"
           "lw             %[t2],          4(%[dest])                      \n\t"
           "lw             %[t3],          8(%[dest])                      \n\t"
@@ -1009,25 +977,22 @@ void vpx_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest,
           "sw             %[vector_4],    12(%[dest])                     \n\t"
           "add            %[dest],        %[dest],        %[dest_stride]  \n\t"
 
-          : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4),
-            [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
-            [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4),
-            [dest] "+&r" (dest)
-          : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
-      );
+          : [t1] "=&r"(t1), [t2] "=&r"(t2), [t3] "=&r"(t3), [t4] "=&r"(t4),
+            [vector_1] "=&r"(vector_1), [vector_2] "=&r"(vector_2),
+            [vector_3] "=&r"(vector_3), [vector_4] "=&r"(vector_4),
+            [dest] "+&r"(dest)
+          : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1));
     }
   } else {
     /* use quad-byte
      * input and output memory are four byte aligned */
-    __asm__ __volatile__ (
-        "replv.qb   %[vector_a1],   %[a1]   \n\t"
+    __asm__ __volatile__("replv.qb   %[vector_a1],   %[a1]   \n\t"
 
-        : [vector_a1] "=r" (vector_a1)
-        : [a1] "r" (a1)
-    );
+                         : [vector_a1] "=r"(vector_a1)
+                         : [a1] "r"(a1));
 
     for (r = 16; r--;) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "lw             %[t1],          0(%[dest])                      \n\t"
           "lw             %[t2],          4(%[dest])                      \n\t"
           "lw             %[t3],          8(%[dest])                      \n\t"
@@ -1042,12 +1007,11 @@ void vpx_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest,
           "sw             %[vector_4],    12(%[dest])                     \n\t"
           "add            %[dest],        %[dest],        %[dest_stride]  \n\t"
 
-          : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4),
-            [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
-            [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4),
-            [dest] "+&r" (dest)
-          : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
-      );
+          : [t1] "=&r"(t1), [t2] "=&r"(t2), [t3] "=&r"(t3), [t4] "=&r"(t4),
+            [vector_1] "=&r"(vector_1), [vector_2] "=&r"(vector_2),
+            [vector_3] "=&r"(vector_3), [vector_4] "=&r"(vector_4),
+            [dest] "+&r"(dest)
+          : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1));
     }
   }
 }
@@ -1072,21 +1036,20 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) {
   int x14 = input[1];
   int x15 = input[14];
 
-  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
-           | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
-    output[0] = output[1] = output[2] = output[3] = output[4]
-              = output[5] = output[6] = output[7] = output[8]
-              = output[9] = output[10] = output[11] = output[12]
-              = output[13] = output[14] = output[15] = 0;
+  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
+        x13 | x14 | x15)) {
+    output[0] = output[1] = output[2] = output[3] = output[4] = output[5] =
+        output[6] = output[7] = output[8] = output[9] = output[10] =
+            output[11] = output[12] = output[13] = output[14] = output[15] = 0;
     return;
   }
 
   // stage 1
-  s0 = x0 * cospi_1_64  + x1 * cospi_31_64;
+  s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
   s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
-  s2 = x2 * cospi_5_64  + x3 * cospi_27_64;
+  s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
   s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
-  s4 = x4 * cospi_9_64  + x5 * cospi_23_64;
+  s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
   s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
   s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
   s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -1095,9 +1058,9 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) {
   s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
   s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
   s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
-  s13 = x12 * cospi_7_64  - x13 * cospi_25_64;
+  s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
   s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
-  s15 = x14 * cospi_3_64  - x15 * cospi_29_64;
+  s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
 
   x0 = dct_const_round_shift(s0 + s8);
   x1 = dct_const_round_shift(s1 + s9);
@@ -1107,8 +1070,8 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) {
   x5 = dct_const_round_shift(s5 + s13);
   x6 = dct_const_round_shift(s6 + s14);
   x7 = dct_const_round_shift(s7 + s15);
-  x8  = dct_const_round_shift(s0 - s8);
-  x9  = dct_const_round_shift(s1 - s9);
+  x8 = dct_const_round_shift(s0 - s8);
+  x9 = dct_const_round_shift(s1 - s9);
   x10 = dct_const_round_shift(s2 - s10);
   x11 = dct_const_round_shift(s3 - s11);
   x12 = dct_const_round_shift(s4 - s12);
@@ -1125,14 +1088,14 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) {
   s5 = x5;
   s6 = x6;
   s7 = x7;
-  s8 =    x8 * cospi_4_64   + x9 * cospi_28_64;
-  s9 =    x8 * cospi_28_64  - x9 * cospi_4_64;
-  s10 =   x10 * cospi_20_64 + x11 * cospi_12_64;
-  s11 =   x10 * cospi_12_64 - x11 * cospi_20_64;
-  s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
-  s13 =   x12 * cospi_4_64  + x13 * cospi_28_64;
-  s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
-  s15 =   x14 * cospi_20_64 + x15 * cospi_12_64;
+  s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+  s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+  s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+  s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+  s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+  s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+  s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+  s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
 
   x0 = s0 + s4;
   x1 = s1 + s5;
@@ -1156,18 +1119,18 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) {
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 = x4 * cospi_8_64  + x5 * cospi_24_64;
+  s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
   s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
-  s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
-  s7 =   x6 * cospi_8_64  + x7 * cospi_24_64;
+  s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+  s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
   s8 = x8;
   s9 = x9;
   s10 = x10;
   s11 = x11;
-  s12 = x12 * cospi_8_64  + x13 * cospi_24_64;
+  s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
   s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
-  s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
-  s15 =   x14 * cospi_8_64  + x15 * cospi_24_64;
+  s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+  s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
 
   x0 = s0 + s2;
   x1 = s1 + s3;
@@ -1187,13 +1150,13 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) {
   x15 = dct_const_round_shift(s13 - s15);
 
   // stage 4
-  s2 = (- cospi_16_64) * (x2 + x3);
+  s2 = (-cospi_16_64) * (x2 + x3);
   s3 = cospi_16_64 * (x2 - x3);
   s6 = cospi_16_64 * (x6 + x7);
-  s7 = cospi_16_64 * (- x6 + x7);
+  s7 = cospi_16_64 * (-x6 + x7);
   s10 = cospi_16_64 * (x10 + x11);
-  s11 = cospi_16_64 * (- x10 + x11);
-  s14 = (- cospi_16_64) * (x14 + x15);
+  s11 = cospi_16_64 * (-x10 + x11);
+  s14 = (-cospi_16_64) * (x14 + x15);
   s15 = cospi_16_64 * (x14 - x15);
 
   x2 = dct_const_round_shift(s2);
@@ -1205,23 +1168,22 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) {
   x14 = dct_const_round_shift(s14);
   x15 = dct_const_round_shift(s15);
 
-  output[0] =  x0;
+  output[0] = x0;
   output[1] = -x8;
-  output[2] =  x12;
+  output[2] = x12;
   output[3] = -x4;
-  output[4] =  x6;
-  output[5] =  x14;
-  output[6] =  x10;
-  output[7] =  x2;
-  output[8] =  x3;
-  output[9] =  x11;
-  output[10] =  x15;
-  output[11] =  x7;
-  output[12] =  x5;
+  output[4] = x6;
+  output[5] = x14;
+  output[6] = x10;
+  output[7] = x2;
+  output[8] = x3;
+  output[9] = x11;
+  output[10] = x15;
+  output[11] = x7;
+  output[12] = x5;
   output[13] = -x13;
-  output[14] =  x9;
+  output[14] = x9;
   output[15] = -x1;
 }
 
-
 #endif  // HAVE_DSPR2
diff --git a/vpx_dsp/mips/itrans32_cols_dspr2.c b/vpx_dsp/mips/itrans32_cols_dspr2.c
index 553acb0f5bfd96885447377357fd9966d7f83df0..ce25d55c9c0bb23b4a3bfe28ca14cd84101a0613 100644
--- a/vpx_dsp/mips/itrans32_cols_dspr2.c
+++ b/vpx_dsp/mips/itrans32_cols_dspr2.c
@@ -39,9 +39,9 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
 
   /* prefetch vpx_ff_cropTbl */
   prefetch_load(vpx_ff_cropTbl);
-  prefetch_load(vpx_ff_cropTbl +  32);
-  prefetch_load(vpx_ff_cropTbl +  64);
-  prefetch_load(vpx_ff_cropTbl +  96);
+  prefetch_load(vpx_ff_cropTbl + 32);
+  prefetch_load(vpx_ff_cropTbl + 64);
+  prefetch_load(vpx_ff_cropTbl + 96);
   prefetch_load(vpx_ff_cropTbl + 128);
   prefetch_load(vpx_ff_cropTbl + 160);
   prefetch_load(vpx_ff_cropTbl + 192);
@@ -51,7 +51,7 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
     dest_pix = dest + i;
     dest_pix1 = dest + i + 31 * dest_stride;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lh       %[load1],             2(%[input])                     \n\t"
         "lh       %[load2],             62(%[input])                    \n\t"
         "lh       %[load3],             34(%[input])                    \n\t"
@@ -101,18 +101,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step1_16],          %[temp0],       %[temp1]        \n\t"
         "add      %[step1_31],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
-          [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_16] "=r" (step1_16), [step1_17] "=r" (step1_17),
-          [step1_30] "=r" (step1_30), [step1_31] "=r" (step1_31)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_31_64] "r" (cospi_31_64), [cospi_1_64] "r" (cospi_1_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_17_64] "r" (cospi_17_64),
-          [cospi_15_64] "r" (cospi_15_64), [cospi_28_64] "r" (cospi_28_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_16] "=r"(step1_16),
+          [step1_17] "=r"(step1_17), [step1_30] "=r"(step1_30),
+          [step1_31] "=r"(step1_31)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_31_64] "r"(cospi_31_64), [cospi_1_64] "r"(cospi_1_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_17_64] "r"(cospi_17_64),
+          [cospi_15_64] "r"(cospi_15_64), [cospi_28_64] "r"(cospi_28_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             18(%[input])                    \n\t"
         "lh       %[load2],             46(%[input])                    \n\t"
         "lh       %[load3],             50(%[input])                    \n\t"
@@ -162,18 +161,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step1_19],          %[temp0],       %[temp1]        \n\t"
         "add      %[step1_28],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
-          [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_18] "=r" (step1_18), [step1_19] "=r" (step1_19),
-          [step1_28] "=r" (step1_28), [step1_29] "=r" (step1_29)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_23_64] "r" (cospi_23_64), [cospi_9_64] "r" (cospi_9_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_7_64] "r" (cospi_7_64),
-          [cospi_25_64] "r" (cospi_25_64), [cospi_28_64] "r" (cospi_28_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_18] "=r"(step1_18),
+          [step1_19] "=r"(step1_19), [step1_28] "=r"(step1_28),
+          [step1_29] "=r"(step1_29)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_23_64] "r"(cospi_23_64), [cospi_9_64] "r"(cospi_9_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_7_64] "r"(cospi_7_64),
+          [cospi_25_64] "r"(cospi_25_64), [cospi_28_64] "r"(cospi_28_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             10(%[input])                    \n\t"
         "lh       %[load2],             54(%[input])                    \n\t"
         "lh       %[load3],             42(%[input])                    \n\t"
@@ -223,18 +221,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step1_20],          %[temp0],       %[temp1]        \n\t"
         "add      %[step1_27],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
-          [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_20] "=r" (step1_20), [step1_21] "=r" (step1_21),
-          [step1_26] "=r" (step1_26), [step1_27] "=r" (step1_27)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_27_64] "r" (cospi_27_64), [cospi_5_64] "r" (cospi_5_64),
-          [cospi_11_64] "r" (cospi_11_64), [cospi_21_64] "r" (cospi_21_64),
-          [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_20] "=r"(step1_20),
+          [step1_21] "=r"(step1_21), [step1_26] "=r"(step1_26),
+          [step1_27] "=r"(step1_27)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_27_64] "r"(cospi_27_64), [cospi_5_64] "r"(cospi_5_64),
+          [cospi_11_64] "r"(cospi_11_64), [cospi_21_64] "r"(cospi_21_64),
+          [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             26(%[input])                    \n\t"
         "lh       %[load2],             38(%[input])                    \n\t"
         "lh       %[load3],             58(%[input])                    \n\t"
@@ -280,18 +277,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step1_23],          %[temp0],       %[temp1]        \n\t"
         "add      %[step1_24],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
-          [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_22] "=r" (step1_22), [step1_23] "=r" (step1_23),
-          [step1_24] "=r" (step1_24), [step1_25] "=r" (step1_25)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_19_64] "r" (cospi_19_64), [cospi_13_64] "r" (cospi_13_64),
-          [cospi_3_64] "r" (cospi_3_64), [cospi_29_64] "r" (cospi_29_64),
-          [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_22] "=r"(step1_22),
+          [step1_23] "=r"(step1_23), [step1_24] "=r"(step1_24),
+          [step1_25] "=r"(step1_25)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_19_64] "r"(cospi_19_64), [cospi_13_64] "r"(cospi_13_64),
+          [cospi_3_64] "r"(cospi_3_64), [cospi_29_64] "r"(cospi_29_64),
+          [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],              4(%[input])                    \n\t"
         "lh       %[load2],             60(%[input])                    \n\t"
         "lh       %[load3],             36(%[input])                    \n\t"
@@ -337,18 +333,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step2_8],           %[temp0],       %[temp1]        \n\t"
         "add      %[step2_15],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
-          [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step2_8] "=r" (step2_8), [step2_9] "=r" (step2_9),
-          [step2_14] "=r" (step2_14), [step2_15] "=r" (step2_15)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64),
-          [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64),
-          [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step2_8] "=r"(step2_8),
+          [step2_9] "=r"(step2_9), [step2_14] "=r"(step2_14),
+          [step2_15] "=r"(step2_15)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_30_64] "r"(cospi_30_64), [cospi_2_64] "r"(cospi_2_64),
+          [cospi_14_64] "r"(cospi_14_64), [cospi_18_64] "r"(cospi_18_64),
+          [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             20(%[input])                    \n\t"
         "lh       %[load2],             44(%[input])                    \n\t"
         "lh       %[load3],             52(%[input])                    \n\t"
@@ -394,18 +389,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step2_11],          %[temp0],       %[temp1]        \n\t"
         "add      %[step2_12],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3),
-          [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11),
-          [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64),
-          [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64),
-          [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step2_10] "=r"(step2_10),
+          [step2_11] "=r"(step2_11), [step2_12] "=r"(step2_12),
+          [step2_13] "=r"(step2_13)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_22_64] "r"(cospi_22_64), [cospi_10_64] "r"(cospi_10_64),
+          [cospi_6_64] "r"(cospi_6_64), [cospi_26_64] "r"(cospi_26_64),
+          [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64));
+
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "sub      %[temp0],             %[step2_14],    %[step2_13]     \n\t"
@@ -440,33 +434,31 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "extp     %[step3_11],          $ac2,           31              \n\t"
         "extp     %[step3_12],          $ac3,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [step3_8] "=r" (step3_8), [step3_9] "=r" (step3_9),
-          [step3_10] "=r" (step3_10), [step3_11] "=r" (step3_11),
-          [step3_12] "=r" (step3_12), [step3_13] "=r" (step3_13),
-          [step3_14] "=r" (step3_14), [step3_15] "=r" (step3_15)
-        : [const_2_power_13] "r" (const_2_power_13), [step2_8] "r" (step2_8),
-          [step2_9] "r" (step2_9), [step2_10] "r" (step2_10),
-          [step2_11] "r" (step2_11), [step2_12] "r" (step2_12),
-          [step2_13] "r" (step2_13), [step2_14] "r" (step2_14),
-          [step2_15] "r" (step2_15), [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [step3_8] "=r"(step3_8),
+          [step3_9] "=r"(step3_9), [step3_10] "=r"(step3_10),
+          [step3_11] "=r"(step3_11), [step3_12] "=r"(step3_12),
+          [step3_13] "=r"(step3_13), [step3_14] "=r"(step3_14),
+          [step3_15] "=r"(step3_15)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_8] "r"(step2_8),
+          [step2_9] "r"(step2_9), [step2_10] "r"(step2_10),
+          [step2_11] "r"(step2_11), [step2_12] "r"(step2_12),
+          [step2_13] "r"(step2_13), [step2_14] "r"(step2_14),
+          [step2_15] "r"(step2_15), [cospi_16_64] "r"(cospi_16_64));
 
     step2_18 = step1_17 - step1_18;
     step2_29 = step1_30 - step1_29;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "msub     $ac0,                 %[step2_18],    %[cospi_8_64]   \n\t"
         "madd     $ac0,                 %[step2_29],    %[cospi_24_64]  \n\t"
         "extp     %[step3_18],          $ac0,           31              \n\t"
 
-        : [step3_18] "=r" (step3_18)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_18] "r" (step2_18), [step2_29] "r" (step2_29),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
+        : [step3_18] "=r"(step3_18)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_18] "r"(step2_18),
+          [step2_29] "r"(step2_29), [cospi_24_64] "r"(cospi_24_64),
+          [cospi_8_64] "r"(cospi_8_64));
 
     temp21 = step2_18 * cospi_24_64 + step2_29 * cospi_8_64;
     step3_29 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
@@ -474,18 +466,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
     step2_19 = step1_16 - step1_19;
     step2_28 = step1_31 - step1_28;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "msub     $ac0,                 %[step2_19],    %[cospi_8_64]   \n\t"
         "madd     $ac0,                 %[step2_28],    %[cospi_24_64]  \n\t"
         "extp     %[step3_19],          $ac0,           31              \n\t"
 
-        : [step3_19] "=r" (step3_19)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_19] "r" (step2_19), [step2_28] "r" (step2_28),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
+        : [step3_19] "=r"(step3_19)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_19] "r"(step2_19),
+          [step2_28] "r"(step2_28), [cospi_24_64] "r"(cospi_24_64),
+          [cospi_8_64] "r"(cospi_8_64));
 
     temp21 = step2_19 * cospi_24_64 + step2_28 * cospi_8_64;
     step3_28 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
@@ -498,18 +489,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
     step2_20 = step1_23 - step1_20;
     step2_27 = step1_24 - step1_27;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "msub     $ac0,                 %[step2_20],    %[cospi_24_64]  \n\t"
         "msub     $ac0,                 %[step2_27],    %[cospi_8_64]   \n\t"
         "extp     %[step3_20],          $ac0,           31              \n\t"
 
-        : [step3_20] "=r" (step3_20)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_20] "r" (step2_20), [step2_27] "r" (step2_27),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
+        : [step3_20] "=r"(step3_20)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_20] "r"(step2_20),
+          [step2_27] "r"(step2_27), [cospi_24_64] "r"(cospi_24_64),
+          [cospi_8_64] "r"(cospi_8_64));
 
     temp21 = -step2_20 * cospi_8_64 + step2_27 * cospi_24_64;
     step3_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
@@ -517,18 +507,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
     step2_21 = step1_22 - step1_21;
     step2_26 = step1_25 - step1_26;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac1                            \n\t"
         "mthi     $zero,                $ac1                            \n\t"
         "msub     $ac1,                 %[step2_21],    %[cospi_24_64]  \n\t"
         "msub     $ac1,                 %[step2_26],    %[cospi_8_64]   \n\t"
         "extp     %[step3_21],          $ac1,           31              \n\t"
 
-        : [step3_21] "=r" (step3_21)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_21] "r" (step2_21), [step2_26] "r" (step2_26),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
+        : [step3_21] "=r"(step3_21)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_21] "r"(step2_21),
+          [step2_26] "r"(step2_26), [cospi_24_64] "r"(cospi_24_64),
+          [cospi_8_64] "r"(cospi_8_64));
 
     temp21 = -step2_21 * cospi_8_64 + step2_26 * cospi_24_64;
     step3_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
@@ -556,7 +545,7 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
     step2_30 = step3_30 + step3_25;
     step2_31 = step3_31 + step3_24;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lh       %[load1],             0(%[input])                     \n\t"
         "lh       %[load2],             32(%[input])                    \n\t"
         "lh       %[load3],             16(%[input])                    \n\t"
@@ -588,19 +577,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "sub      %[step1_2],           %[temp1],       %[temp2]        \n\t"
         "sub      %[step1_3],           %[temp0],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1),
-          [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_0] "=r"(step1_0),
+          [step1_1] "=r"(step1_1), [step1_2] "=r"(step1_2),
+          [step1_3] "=r"(step1_3)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64),
+          [cospi_16_64] "r"(cospi_16_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             8(%[input])                     \n\t"
         "lh       %[load2],             56(%[input])                    \n\t"
         "lh       %[load3],             40(%[input])                    \n\t"
@@ -649,17 +636,15 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "add      %[step1_4],           %[temp0],       %[temp1]        \n\t"
         "add      %[step1_7],           %[temp3],       %[temp2]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5),
-          [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_4] "=r"(step1_4),
+          [step1_5] "=r"(step1_5), [step1_6] "=r"(step1_6),
+          [step1_7] "=r"(step1_7)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_20_64] "r"(cospi_20_64), [cospi_12_64] "r"(cospi_12_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64),
+          [cospi_16_64] "r"(cospi_16_64));
 
     step2_0 = step1_0 + step1_7;
     step2_1 = step1_1 + step1_6;
@@ -688,67 +673,63 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
     step1_14 = step2_1 - step3_14;
     step1_15 = step2_0 - step3_15;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sub      %[temp0],             %[step2_27],    %[step2_20]     \n\t"
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "madd     $ac0,                 %[temp0],       %[cospi_16_64]  \n\t"
         "extp     %[step1_20],          $ac0,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [step1_20] "=r" (step1_20)
-        : [const_2_power_13] "r" (const_2_power_13), [step2_20] "r" (step2_20),
-          [step2_27] "r" (step2_27), [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [step1_20] "=r"(step1_20)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_20] "r"(step2_20),
+          [step2_27] "r"(step2_27), [cospi_16_64] "r"(cospi_16_64));
 
     temp21 = (step2_20 + step2_27) * cospi_16_64;
     step1_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sub      %[temp0],             %[step2_26],    %[step2_21]     \n\t"
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "madd     $ac0,                 %[temp0],       %[cospi_16_64]  \n\t"
         "extp     %[step1_21],          $ac0,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [step1_21] "=r" (step1_21)
-        : [const_2_power_13] "r" (const_2_power_13), [step2_26] "r" (step2_26),
-          [step2_21] "r" (step2_21), [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [step1_21] "=r"(step1_21)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_26] "r"(step2_26),
+          [step2_21] "r"(step2_21), [cospi_16_64] "r"(cospi_16_64));
 
     temp21 = (step2_21 + step2_26) * cospi_16_64;
     step1_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sub      %[temp0],             %[step2_25],    %[step2_22]     \n\t"
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "madd     $ac0,                 %[temp0],       %[cospi_16_64]  \n\t"
         "extp     %[step1_22],          $ac0,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [step1_22] "=r" (step1_22)
-        : [const_2_power_13] "r" (const_2_power_13), [step2_25] "r" (step2_25),
-          [step2_22] "r" (step2_22), [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [step1_22] "=r"(step1_22)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_25] "r"(step2_25),
+          [step2_22] "r"(step2_22), [cospi_16_64] "r"(cospi_16_64));
 
     temp21 = (step2_22 + step2_25) * cospi_16_64;
     step1_25 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sub      %[temp0],             %[step2_24],    %[step2_23]     \n\t"
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "madd     $ac0,                 %[temp0],       %[cospi_16_64]  \n\t"
         "extp     %[step1_23],          $ac0,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [step1_23] "=r" (step1_23)
-        : [const_2_power_13] "r" (const_2_power_13), [step2_24] "r" (step2_24),
-          [step2_23] "r" (step2_23), [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [step1_23] "=r"(step1_23)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_24] "r"(step2_24),
+          [step2_23] "r"(step2_23), [cospi_16_64] "r"(cospi_16_64));
 
     temp21 = (step2_23 + step2_24) * cospi_16_64;
     step1_24 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lbu      %[temp2],         0(%[dest_pix])                      \n\t"
         "add      %[temp0],         %[step1_0],         %[step2_31]     \n\t"
         "addi     %[temp0],         %[temp0],           32              \n\t"
@@ -783,21 +764,20 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "sb       %[temp1],         0(%[dest_pix])                      \n\t"
         "addu     %[dest_pix],      %[dest_pix],        %[dest_stride]  \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
-          [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix)
-        : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
-          [step1_0] "r" (step1_0), [step1_1] "r" (step1_1),
-          [step1_2] "r" (step1_2), [step1_3] "r" (step1_3),
-          [step2_28] "r" (step2_28), [step2_29] "r" (step2_29),
-          [step2_30] "r" (step2_30), [step2_31] "r" (step2_31)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2),
+          [temp3] "=&r"(temp3), [dest_pix] "+r"(dest_pix)
+        : [cm] "r"(cm), [dest_stride] "r"(dest_stride), [step1_0] "r"(step1_0),
+          [step1_1] "r"(step1_1), [step1_2] "r"(step1_2),
+          [step1_3] "r"(step1_3), [step2_28] "r"(step2_28),
+          [step2_29] "r"(step2_29), [step2_30] "r"(step2_30),
+          [step2_31] "r"(step2_31));
 
     step3_12 = ROUND_POWER_OF_TWO((step1_3 - step2_28), 6);
     step3_13 = ROUND_POWER_OF_TWO((step1_2 - step2_29), 6);
     step3_14 = ROUND_POWER_OF_TWO((step1_1 - step2_30), 6);
     step3_15 = ROUND_POWER_OF_TWO((step1_0 - step2_31), 6);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lbu      %[temp2],         0(%[dest_pix1])                     \n\t"
         "add      %[temp2],         %[temp2],           %[step3_15]     \n\t"
         "lbux     %[temp0],         %[temp2](%[cm])                     \n\t"
@@ -820,14 +800,13 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "sb       %[temp1],         0(%[dest_pix1])                     \n\t"
         "subu     %[dest_pix1],     %[dest_pix1],       %[dest_stride]  \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
-          [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1)
-        : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
-          [step3_12] "r" (step3_12), [step3_13] "r" (step3_13),
-          [step3_14] "r" (step3_14), [step3_15] "r" (step3_15)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2),
+          [temp3] "=&r"(temp3), [dest_pix1] "+r"(dest_pix1)
+        : [cm] "r"(cm), [dest_stride] "r"(dest_stride),
+          [step3_12] "r"(step3_12), [step3_13] "r"(step3_13),
+          [step3_14] "r"(step3_14), [step3_15] "r"(step3_15));
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lbu      %[temp2],         0(%[dest_pix])                      \n\t"
         "add      %[temp0],         %[step1_4],         %[step1_27]     \n\t"
         "addi     %[temp0],         %[temp0],           32              \n\t"
@@ -862,21 +841,20 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "sb       %[temp1],         0(%[dest_pix])                      \n\t"
         "addu     %[dest_pix],      %[dest_pix],        %[dest_stride]  \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
-          [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix)
-        : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
-          [step1_4] "r" (step1_4), [step1_5] "r" (step1_5),
-          [step1_6] "r" (step1_6), [step1_7] "r" (step1_7),
-          [step1_24] "r" (step1_24), [step1_25] "r" (step1_25),
-          [step1_26] "r" (step1_26), [step1_27] "r" (step1_27)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2),
+          [temp3] "=&r"(temp3), [dest_pix] "+r"(dest_pix)
+        : [cm] "r"(cm), [dest_stride] "r"(dest_stride), [step1_4] "r"(step1_4),
+          [step1_5] "r"(step1_5), [step1_6] "r"(step1_6),
+          [step1_7] "r"(step1_7), [step1_24] "r"(step1_24),
+          [step1_25] "r"(step1_25), [step1_26] "r"(step1_26),
+          [step1_27] "r"(step1_27));
 
     step3_12 = ROUND_POWER_OF_TWO((step1_7 - step1_24), 6);
     step3_13 = ROUND_POWER_OF_TWO((step1_6 - step1_25), 6);
     step3_14 = ROUND_POWER_OF_TWO((step1_5 - step1_26), 6);
     step3_15 = ROUND_POWER_OF_TWO((step1_4 - step1_27), 6);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lbu      %[temp2],         0(%[dest_pix1])                     \n\t"
         "add      %[temp2],         %[temp2],           %[step3_15]     \n\t"
         "lbux     %[temp0],         %[temp2](%[cm])                     \n\t"
@@ -899,14 +877,13 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "sb       %[temp1],         0(%[dest_pix1])                     \n\t"
         "subu     %[dest_pix1],     %[dest_pix1],       %[dest_stride]  \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
-          [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1)
-        : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
-          [step3_12] "r" (step3_12), [step3_13] "r" (step3_13),
-          [step3_14] "r" (step3_14), [step3_15] "r" (step3_15)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2),
+          [temp3] "=&r"(temp3), [dest_pix1] "+r"(dest_pix1)
+        : [cm] "r"(cm), [dest_stride] "r"(dest_stride),
+          [step3_12] "r"(step3_12), [step3_13] "r"(step3_13),
+          [step3_14] "r"(step3_14), [step3_15] "r"(step3_15));
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lbu      %[temp2],         0(%[dest_pix])                      \n\t"
         "add      %[temp0],         %[step1_8],         %[step1_23]     \n\t"
         "addi     %[temp0],         %[temp0],           32              \n\t"
@@ -941,21 +918,20 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "sb       %[temp1],         0(%[dest_pix])                      \n\t"
         "addu     %[dest_pix],      %[dest_pix],        %[dest_stride]  \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
-          [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix)
-        : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
-          [step1_8] "r" (step1_8), [step1_9] "r" (step1_9),
-          [step1_10] "r" (step1_10), [step1_11] "r" (step1_11),
-          [step1_20] "r" (step1_20), [step1_21] "r" (step1_21),
-          [step1_22] "r" (step1_22), [step1_23] "r" (step1_23)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2),
+          [temp3] "=&r"(temp3), [dest_pix] "+r"(dest_pix)
+        : [cm] "r"(cm), [dest_stride] "r"(dest_stride), [step1_8] "r"(step1_8),
+          [step1_9] "r"(step1_9), [step1_10] "r"(step1_10),
+          [step1_11] "r"(step1_11), [step1_20] "r"(step1_20),
+          [step1_21] "r"(step1_21), [step1_22] "r"(step1_22),
+          [step1_23] "r"(step1_23));
 
     step3_12 = ROUND_POWER_OF_TWO((step1_11 - step1_20), 6);
     step3_13 = ROUND_POWER_OF_TWO((step1_10 - step1_21), 6);
     step3_14 = ROUND_POWER_OF_TWO((step1_9 - step1_22), 6);
     step3_15 = ROUND_POWER_OF_TWO((step1_8 - step1_23), 6);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lbu      %[temp2],         0(%[dest_pix1])                     \n\t"
         "add      %[temp2],         %[temp2],           %[step3_15]     \n\t"
         "lbux     %[temp0],         %[temp2](%[cm])                     \n\t"
@@ -978,14 +954,13 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "sb       %[temp1],         0(%[dest_pix1])                     \n\t"
         "subu     %[dest_pix1],     %[dest_pix1],       %[dest_stride]  \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
-          [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1)
-        : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
-          [step3_12] "r" (step3_12), [step3_13] "r" (step3_13),
-          [step3_14] "r" (step3_14), [step3_15] "r" (step3_15)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2),
+          [temp3] "=&r"(temp3), [dest_pix1] "+r"(dest_pix1)
+        : [cm] "r"(cm), [dest_stride] "r"(dest_stride),
+          [step3_12] "r"(step3_12), [step3_13] "r"(step3_13),
+          [step3_14] "r"(step3_14), [step3_15] "r"(step3_15));
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lbu      %[temp2],         0(%[dest_pix])                      \n\t"
         "add      %[temp0],         %[step1_12],        %[step2_19]     \n\t"
         "addi     %[temp0],         %[temp0],           32              \n\t"
@@ -1019,21 +994,20 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "lbux     %[temp1],         %[temp3](%[cm])                     \n\t"
         "sb       %[temp1],         0(%[dest_pix])                      \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
-          [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix)
-        : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
-          [step1_12] "r" (step1_12), [step1_13] "r" (step1_13),
-          [step1_14] "r" (step1_14), [step1_15] "r" (step1_15),
-          [step2_16] "r" (step2_16), [step2_17] "r" (step2_17),
-          [step2_18] "r" (step2_18), [step2_19] "r" (step2_19)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2),
+          [temp3] "=&r"(temp3), [dest_pix] "+r"(dest_pix)
+        : [cm] "r"(cm), [dest_stride] "r"(dest_stride),
+          [step1_12] "r"(step1_12), [step1_13] "r"(step1_13),
+          [step1_14] "r"(step1_14), [step1_15] "r"(step1_15),
+          [step2_16] "r"(step2_16), [step2_17] "r"(step2_17),
+          [step2_18] "r"(step2_18), [step2_19] "r"(step2_19));
 
     step3_12 = ROUND_POWER_OF_TWO((step1_15 - step2_16), 6);
     step3_13 = ROUND_POWER_OF_TWO((step1_14 - step2_17), 6);
     step3_14 = ROUND_POWER_OF_TWO((step1_13 - step2_18), 6);
     step3_15 = ROUND_POWER_OF_TWO((step1_12 - step2_19), 6);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lbu      %[temp2],         0(%[dest_pix1])                     \n\t"
         "add      %[temp2],         %[temp2],           %[step3_15]     \n\t"
         "lbux     %[temp0],         %[temp2](%[cm])                     \n\t"
@@ -1055,12 +1029,11 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "lbux     %[temp1],         %[temp3](%[cm])                     \n\t"
         "sb       %[temp1],         0(%[dest_pix1])                     \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
-          [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1)
-        : [cm] "r" (cm), [dest_stride] "r" (dest_stride),
-          [step3_12] "r" (step3_12), [step3_13] "r" (step3_13),
-          [step3_14] "r" (step3_14), [step3_15] "r" (step3_15)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2),
+          [temp3] "=&r"(temp3), [dest_pix1] "+r"(dest_pix1)
+        : [cm] "r"(cm), [dest_stride] "r"(dest_stride),
+          [step3_12] "r"(step3_12), [step3_13] "r"(step3_13),
+          [step3_14] "r"(step3_14), [step3_15] "r"(step3_15));
 
     input += 32;
   }
diff --git a/vpx_dsp/mips/itrans32_dspr2.c b/vpx_dsp/mips/itrans32_dspr2.c
index 523da1df1bc7bb45e78b94f29041e4a7a990c114..d71c5ffed512feeb347abd147c352b303759b66a 100644
--- a/vpx_dsp/mips/itrans32_dspr2.c
+++ b/vpx_dsp/mips/itrans32_dspr2.c
@@ -40,16 +40,16 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
   const int const_2_power_13 = 8192;
   const int32_t *input_int;
 
-  for (i = no_rows; i--; ) {
+  for (i = no_rows; i--;) {
     input_int = (const int32_t *)input;
 
-    if (!(input_int[0]  | input_int[1]  | input_int[2]  | input_int[3]  |
-          input_int[4]  | input_int[5]  | input_int[6]  | input_int[7]  |
-          input_int[8]  | input_int[9]  | input_int[10] | input_int[11] |
+    if (!(input_int[0] | input_int[1] | input_int[2] | input_int[3] |
+          input_int[4] | input_int[5] | input_int[6] | input_int[7] |
+          input_int[8] | input_int[9] | input_int[10] | input_int[11] |
           input_int[12] | input_int[13] | input_int[14] | input_int[15])) {
       input += 32;
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "sh     $zero,     0(%[output])     \n\t"
           "sh     $zero,    64(%[output])     \n\t"
           "sh     $zero,   128(%[output])     \n\t"
@@ -84,8 +84,7 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
           "sh     $zero,  1984(%[output])     \n\t"
 
           :
-          : [output] "r" (output)
-      );
+          : [output] "r"(output));
 
       output += 1;
 
@@ -96,7 +95,7 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
     prefetch_load((const uint8_t *)(input + 32));
     prefetch_load((const uint8_t *)(input + 48));
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lh       %[load1],             2(%[input])                     \n\t"
         "lh       %[load2],             62(%[input])                    \n\t"
         "lh       %[load3],             34(%[input])                    \n\t"
@@ -146,19 +145,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step1_16],          %[temp0],       %[temp1]        \n\t"
         "add      %[step1_31],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_16] "=r" (step1_16), [step1_17] "=r" (step1_17),
-          [step1_30] "=r" (step1_30), [step1_31] "=r" (step1_31)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_31_64] "r" (cospi_31_64), [cospi_1_64] "r" (cospi_1_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_17_64] "r" (cospi_17_64),
-          [cospi_15_64] "r" (cospi_15_64), [cospi_28_64] "r" (cospi_28_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_16] "=r"(step1_16),
+          [step1_17] "=r"(step1_17), [step1_30] "=r"(step1_30),
+          [step1_31] "=r"(step1_31)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_31_64] "r"(cospi_31_64), [cospi_1_64] "r"(cospi_1_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_17_64] "r"(cospi_17_64),
+          [cospi_15_64] "r"(cospi_15_64), [cospi_28_64] "r"(cospi_28_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             18(%[input])                    \n\t"
         "lh       %[load2],             46(%[input])                    \n\t"
         "lh       %[load3],             50(%[input])                    \n\t"
@@ -208,19 +205,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step1_19],          %[temp0],       %[temp1]        \n\t"
         "add      %[step1_28],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_18] "=r" (step1_18), [step1_19] "=r" (step1_19),
-          [step1_28] "=r" (step1_28), [step1_29] "=r" (step1_29)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_23_64] "r" (cospi_23_64), [cospi_9_64] "r" (cospi_9_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_7_64] "r" (cospi_7_64),
-          [cospi_25_64] "r" (cospi_25_64), [cospi_28_64] "r" (cospi_28_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_18] "=r"(step1_18),
+          [step1_19] "=r"(step1_19), [step1_28] "=r"(step1_28),
+          [step1_29] "=r"(step1_29)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_23_64] "r"(cospi_23_64), [cospi_9_64] "r"(cospi_9_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_7_64] "r"(cospi_7_64),
+          [cospi_25_64] "r"(cospi_25_64), [cospi_28_64] "r"(cospi_28_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             10(%[input])                    \n\t"
         "lh       %[load2],             54(%[input])                    \n\t"
         "lh       %[load3],             42(%[input])                    \n\t"
@@ -270,19 +265,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step1_20],          %[temp0],       %[temp1]        \n\t"
         "add      %[step1_27],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_20] "=r" (step1_20), [step1_21] "=r" (step1_21),
-          [step1_26] "=r" (step1_26), [step1_27] "=r" (step1_27)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_27_64] "r" (cospi_27_64), [cospi_5_64] "r" (cospi_5_64),
-          [cospi_11_64] "r" (cospi_11_64), [cospi_21_64] "r" (cospi_21_64),
-          [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_20] "=r"(step1_20),
+          [step1_21] "=r"(step1_21), [step1_26] "=r"(step1_26),
+          [step1_27] "=r"(step1_27)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_27_64] "r"(cospi_27_64), [cospi_5_64] "r"(cospi_5_64),
+          [cospi_11_64] "r"(cospi_11_64), [cospi_21_64] "r"(cospi_21_64),
+          [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             26(%[input])                    \n\t"
         "lh       %[load2],             38(%[input])                    \n\t"
         "lh       %[load3],             58(%[input])                    \n\t"
@@ -332,19 +325,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step1_23],          %[temp0],       %[temp1]        \n\t"
         "add      %[step1_24],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_22] "=r" (step1_22), [step1_23] "=r" (step1_23),
-          [step1_24] "=r" (step1_24), [step1_25] "=r" (step1_25)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_19_64] "r" (cospi_19_64), [cospi_13_64] "r" (cospi_13_64),
-          [cospi_3_64] "r" (cospi_3_64), [cospi_29_64] "r" (cospi_29_64),
-          [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_22] "=r"(step1_22),
+          [step1_23] "=r"(step1_23), [step1_24] "=r"(step1_24),
+          [step1_25] "=r"(step1_25)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_19_64] "r"(cospi_19_64), [cospi_13_64] "r"(cospi_13_64),
+          [cospi_3_64] "r"(cospi_3_64), [cospi_29_64] "r"(cospi_29_64),
+          [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],              4(%[input])                    \n\t"
         "lh       %[load2],             60(%[input])                    \n\t"
         "lh       %[load3],             36(%[input])                    \n\t"
@@ -394,19 +385,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step2_8],           %[temp0],       %[temp1]        \n\t"
         "add      %[step2_15],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step2_8] "=r" (step2_8), [step2_9] "=r" (step2_9),
-          [step2_14] "=r" (step2_14), [step2_15] "=r" (step2_15)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64),
-          [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64),
-          [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step2_8] "=r"(step2_8),
+          [step2_9] "=r"(step2_9), [step2_14] "=r"(step2_14),
+          [step2_15] "=r"(step2_15)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_30_64] "r"(cospi_30_64), [cospi_2_64] "r"(cospi_2_64),
+          [cospi_14_64] "r"(cospi_14_64), [cospi_18_64] "r"(cospi_18_64),
+          [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64));
+
+    __asm__ __volatile__(
         "lh       %[load1],             20(%[input])                    \n\t"
         "lh       %[load2],             44(%[input])                    \n\t"
         "lh       %[load3],             52(%[input])                    \n\t"
@@ -456,19 +445,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step2_11],          %[temp0],       %[temp1]        \n\t"
         "add      %[step2_12],          %[temp2],       %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11),
-          [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64),
-          [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64),
-          [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64)
-    );
-
-    __asm__ __volatile__ (
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step2_10] "=r"(step2_10),
+          [step2_11] "=r"(step2_11), [step2_12] "=r"(step2_12),
+          [step2_13] "=r"(step2_13)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_22_64] "r"(cospi_22_64), [cospi_10_64] "r"(cospi_10_64),
+          [cospi_6_64] "r"(cospi_6_64), [cospi_26_64] "r"(cospi_26_64),
+          [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64));
+
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "sub      %[temp0],             %[step2_14],    %[step2_13]     \n\t"
@@ -507,34 +494,31 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
         "extp     %[step3_11],          $ac2,           31              \n\t"
         "extp     %[step3_12],          $ac3,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [step3_8] "=r" (step3_8), [step3_9] "=r" (step3_9),
-          [step3_10] "=r" (step3_10), [step3_11] "=r" (step3_11),
-          [step3_12] "=r" (step3_12), [step3_13] "=r" (step3_13),
-          [step3_14] "=r" (step3_14), [step3_15] "=r" (step3_15)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_8] "r" (step2_8), [step2_9] "r" (step2_9),
-          [step2_10] "r" (step2_10), [step2_11] "r" (step2_11),
-          [step2_12] "r" (step2_12), [step2_13] "r" (step2_13),
-          [step2_14] "r" (step2_14), [step2_15] "r" (step2_15),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [step3_8] "=r"(step3_8),
+          [step3_9] "=r"(step3_9), [step3_10] "=r"(step3_10),
+          [step3_11] "=r"(step3_11), [step3_12] "=r"(step3_12),
+          [step3_13] "=r"(step3_13), [step3_14] "=r"(step3_14),
+          [step3_15] "=r"(step3_15)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_8] "r"(step2_8),
+          [step2_9] "r"(step2_9), [step2_10] "r"(step2_10),
+          [step2_11] "r"(step2_11), [step2_12] "r"(step2_12),
+          [step2_13] "r"(step2_13), [step2_14] "r"(step2_14),
+          [step2_15] "r"(step2_15), [cospi_16_64] "r"(cospi_16_64));
 
     step2_18 = step1_17 - step1_18;
     step2_29 = step1_30 - step1_29;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "msub     $ac0,                 %[step2_18],    %[cospi_8_64]   \n\t"
         "madd     $ac0,                 %[step2_29],    %[cospi_24_64]  \n\t"
         "extp     %[step3_18],          $ac0,           31              \n\t"
 
-        : [step3_18] "=r" (step3_18)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_18] "r" (step2_18), [step2_29] "r" (step2_29),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
+        : [step3_18] "=r"(step3_18)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_18] "r"(step2_18),
+          [step2_29] "r"(step2_29), [cospi_24_64] "r"(cospi_24_64),
+          [cospi_8_64] "r"(cospi_8_64));
 
     temp21 = step2_18 * cospi_24_64 + step2_29 * cospi_8_64;
     step3_29 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
@@ -542,18 +526,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
     step2_19 = step1_16 - step1_19;
     step2_28 = step1_31 - step1_28;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "msub     $ac0,                 %[step2_19],    %[cospi_8_64]   \n\t"
         "madd     $ac0,                 %[step2_28],    %[cospi_24_64]  \n\t"
         "extp     %[step3_19],          $ac0,           31              \n\t"
 
-        : [step3_19] "=r" (step3_19)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_19] "r" (step2_19), [step2_28] "r" (step2_28),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
+        : [step3_19] "=r"(step3_19)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_19] "r"(step2_19),
+          [step2_28] "r"(step2_28), [cospi_24_64] "r"(cospi_24_64),
+          [cospi_8_64] "r"(cospi_8_64));
 
     temp21 = step2_19 * cospi_24_64 + step2_28 * cospi_8_64;
     step3_28 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
@@ -566,18 +549,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
     step2_20 = step1_23 - step1_20;
     step2_27 = step1_24 - step1_27;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "msub     $ac0,                 %[step2_20],    %[cospi_24_64]  \n\t"
         "msub     $ac0,                 %[step2_27],    %[cospi_8_64]   \n\t"
         "extp     %[step3_20],          $ac0,           31              \n\t"
 
-        : [step3_20] "=r" (step3_20)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_20] "r" (step2_20), [step2_27] "r" (step2_27),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
+        : [step3_20] "=r"(step3_20)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_20] "r"(step2_20),
+          [step2_27] "r"(step2_27), [cospi_24_64] "r"(cospi_24_64),
+          [cospi_8_64] "r"(cospi_8_64));
 
     temp21 = -step2_20 * cospi_8_64 + step2_27 * cospi_24_64;
     step3_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
@@ -585,18 +567,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
     step2_21 = step1_22 - step1_21;
     step2_26 = step1_25 - step1_26;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "mtlo     %[const_2_power_13],  $ac1                            \n\t"
         "mthi     $zero,                $ac1                            \n\t"
         "msub     $ac1,                 %[step2_21],    %[cospi_24_64]  \n\t"
         "msub     $ac1,                 %[step2_26],    %[cospi_8_64]   \n\t"
         "extp     %[step3_21],          $ac1,           31              \n\t"
 
-        : [step3_21] "=r" (step3_21)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_21] "r" (step2_21), [step2_26] "r" (step2_26),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
-    );
+        : [step3_21] "=r"(step3_21)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_21] "r"(step2_21),
+          [step2_26] "r"(step2_26), [cospi_24_64] "r"(cospi_24_64),
+          [cospi_8_64] "r"(cospi_8_64));
 
     temp21 = -step2_21 * cospi_8_64 + step2_26 * cospi_24_64;
     step3_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
@@ -624,7 +605,7 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
     step2_30 = step3_30 + step3_25;
     step2_31 = step3_31 + step3_24;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lh       %[load1],             0(%[input])                     \n\t"
         "lh       %[load2],             32(%[input])                    \n\t"
         "lh       %[load3],             16(%[input])                    \n\t"
@@ -658,20 +639,19 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
         "sub      %[step1_2],          %[temp1],        %[temp2]        \n\t"
         "sub      %[step1_3],          %[temp0],        %[temp3]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [result1] "=&r" (result1), [result2] "=&r" (result2),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1),
-          [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_16_64] "r" (cospi_16_64),
-          [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64)
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [result1] "=&r"(result1),
+          [result2] "=&r"(result2), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_0] "=r"(step1_0),
+          [step1_1] "=r"(step1_1), [step1_2] "=r"(step1_2),
+          [step1_3] "=r"(step1_3)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_16_64] "r"(cospi_16_64), [cospi_24_64] "r"(cospi_24_64),
+          [cospi_8_64] "r"(cospi_8_64)
 
-    );
+            );
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lh       %[load1],             8(%[input])                     \n\t"
         "lh       %[load2],             56(%[input])                    \n\t"
         "lh       %[load3],             40(%[input])                    \n\t"
@@ -724,17 +704,15 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
         "add      %[step1_4],           %[temp0],       %[temp1]        \n\t"
         "add      %[step1_7],           %[temp3],       %[temp2]        \n\t"
 
-        : [load1] "=&r" (load1), [load2] "=&r" (load2),
-          [load3] "=&r" (load3), [load4] "=&r" (load4),
-          [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
-          [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
-          [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5),
-          [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7)
-        : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input),
-          [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
+          [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1),
+          [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_4] "=r"(step1_4),
+          [step1_5] "=r"(step1_5), [step1_6] "=r"(step1_6),
+          [step1_7] "=r"(step1_7)
+        : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input),
+          [cospi_20_64] "r"(cospi_20_64), [cospi_12_64] "r"(cospi_12_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64),
+          [cospi_16_64] "r"(cospi_16_64));
 
     step2_0 = step1_0 + step1_7;
     step2_1 = step1_1 + step1_6;
@@ -762,66 +740,58 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
     step1_14 = step2_1 - step3_14;
     step1_15 = step2_0 - step3_15;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sub      %[temp0],             %[step2_27],    %[step2_20]     \n\t"
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "madd     $ac0,                 %[temp0],       %[cospi_16_64]  \n\t"
         "extp     %[step1_20],          $ac0,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [step1_20] "=r" (step1_20)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_20] "r" (step2_20), [step2_27] "r" (step2_27),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [step1_20] "=r"(step1_20)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_20] "r"(step2_20),
+          [step2_27] "r"(step2_27), [cospi_16_64] "r"(cospi_16_64));
 
     temp21 = (step2_20 + step2_27) * cospi_16_64;
     step1_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sub      %[temp0],             %[step2_26],    %[step2_21]     \n\t"
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "madd     $ac0,                 %[temp0],       %[cospi_16_64]  \n\t"
         "extp     %[step1_21],          $ac0,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [step1_21] "=r" (step1_21)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_26] "r" (step2_26), [step2_21] "r" (step2_21),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [step1_21] "=r"(step1_21)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_26] "r"(step2_26),
+          [step2_21] "r"(step2_21), [cospi_16_64] "r"(cospi_16_64));
 
     temp21 = (step2_21 + step2_26) * cospi_16_64;
     step1_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sub      %[temp0],             %[step2_25],    %[step2_22]     \n\t"
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "madd     $ac0,                 %[temp0],       %[cospi_16_64]  \n\t"
         "extp     %[step1_22],          $ac0,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [step1_22] "=r" (step1_22)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_25] "r" (step2_25), [step2_22] "r" (step2_22),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [step1_22] "=r"(step1_22)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_25] "r"(step2_25),
+          [step2_22] "r"(step2_22), [cospi_16_64] "r"(cospi_16_64));
 
     temp21 = (step2_22 + step2_25) * cospi_16_64;
     step1_25 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sub      %[temp0],             %[step2_24],    %[step2_23]     \n\t"
         "mtlo     %[const_2_power_13],  $ac0                            \n\t"
         "mthi     $zero,                $ac0                            \n\t"
         "madd     $ac0,                 %[temp0],       %[cospi_16_64]  \n\t"
         "extp     %[step1_23],          $ac0,           31              \n\t"
 
-        : [temp0] "=&r" (temp0), [step1_23] "=r" (step1_23)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [step2_24] "r" (step2_24), [step2_23] "r" (step2_23),
-          [cospi_16_64] "r" (cospi_16_64)
-    );
+        : [temp0] "=&r"(temp0), [step1_23] "=r"(step1_23)
+        : [const_2_power_13] "r"(const_2_power_13), [step2_24] "r"(step2_24),
+          [step2_23] "r"(step2_23), [cospi_16_64] "r"(cospi_16_64));
 
     temp21 = (step2_23 + step2_24) * cospi_16_64;
     step1_24 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
@@ -867,16 +837,14 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output,
 
 void vpx_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest,
                                   int dest_stride) {
-  DECLARE_ALIGNED(32, int16_t,  out[32 * 32]);
+  DECLARE_ALIGNED(32, int16_t, out[32 * 32]);
   int16_t *outptr = out;
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
   // Rows
   idct32_rows_dspr2(input, outptr, 32);
@@ -887,23 +855,21 @@ void vpx_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest,
 
 void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest,
                                 int stride) {
-  DECLARE_ALIGNED(32, int16_t,  out[32 * 32]);
+  DECLARE_ALIGNED(32, int16_t, out[32 * 32]);
   int16_t *outptr = out;
   uint32_t i;
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
   // Rows
   idct32_rows_dspr2(input, outptr, 8);
 
   outptr += 8;
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "sw     $zero,      0(%[outptr])     \n\t"
       "sw     $zero,      4(%[outptr])     \n\t"
       "sw     $zero,      8(%[outptr])     \n\t"
@@ -918,13 +884,12 @@ void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest,
       "sw     $zero,     44(%[outptr])     \n\t"
 
       :
-      : [outptr] "r" (outptr)
-  );
+      : [outptr] "r"(outptr));
 
   for (i = 0; i < 31; ++i) {
     outptr += 32;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "sw     $zero,      0(%[outptr])     \n\t"
         "sw     $zero,      4(%[outptr])     \n\t"
         "sw     $zero,      8(%[outptr])     \n\t"
@@ -939,8 +904,7 @@ void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest,
         "sw     $zero,     44(%[outptr])     \n\t"
 
         :
-        : [outptr] "r" (outptr)
-    );
+        : [outptr] "r"(outptr));
   }
 
   // Columns
@@ -949,43 +913,39 @@ void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest,
 
 void vpx_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest,
                                int stride) {
-  int       r, out;
-  int32_t   a1, absa1;
-  int32_t   vector_a1;
-  int32_t   t1, t2, t3, t4;
-  int32_t   vector_1, vector_2, vector_3, vector_4;
-  uint32_t  pos = 45;
+  int r, out;
+  int32_t a1, absa1;
+  int32_t vector_a1;
+  int32_t t1, t2, t3, t4;
+  int32_t vector_1, vector_2, vector_3, vector_4;
+  uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
 
-    :
-    : [pos] "r" (pos)
-  );
+                       :
+                       : [pos] "r"(pos));
 
   out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input[0]);
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "addi     %[out],    %[out],    32      \n\t"
       "sra      %[a1],     %[out],    6       \n\t"
 
-      : [out] "+r" (out), [a1] "=r" (a1)
-      :
-  );
+      : [out] "+r"(out), [a1] "=r"(a1)
+      :);
 
   if (a1 < 0) {
     /* use quad-byte
      * input and output memory are four byte aligned */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "abs        %[absa1],     %[a1]         \n\t"
         "replv.qb   %[vector_a1], %[absa1]      \n\t"
 
-        : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
-        : [a1] "r" (a1)
-    );
+        : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1)
+        : [a1] "r"(a1));
 
     for (r = 32; r--;) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "lw             %[t1],          0(%[dest])                      \n\t"
           "lw             %[t2],          4(%[dest])                      \n\t"
           "lw             %[t3],          8(%[dest])                      \n\t"
@@ -1014,25 +974,22 @@ void vpx_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest,
 
           "add            %[dest],        %[dest],        %[stride]       \n\t"
 
-          : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4),
-            [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
-            [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4),
-            [dest] "+&r" (dest)
-          : [stride] "r" (stride), [vector_a1] "r" (vector_a1)
-      );
+          : [t1] "=&r"(t1), [t2] "=&r"(t2), [t3] "=&r"(t3), [t4] "=&r"(t4),
+            [vector_1] "=&r"(vector_1), [vector_2] "=&r"(vector_2),
+            [vector_3] "=&r"(vector_3), [vector_4] "=&r"(vector_4),
+            [dest] "+&r"(dest)
+          : [stride] "r"(stride), [vector_a1] "r"(vector_a1));
     }
   } else {
     /* use quad-byte
      * input and output memory are four byte aligned */
-    __asm__ __volatile__ (
-        "replv.qb       %[vector_a1],   %[a1]     \n\t"
+    __asm__ __volatile__("replv.qb       %[vector_a1],   %[a1]     \n\t"
 
-        : [vector_a1] "=r" (vector_a1)
-        : [a1] "r" (a1)
-    );
+                         : [vector_a1] "=r"(vector_a1)
+                         : [a1] "r"(a1));
 
     for (r = 32; r--;) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "lw             %[t1],          0(%[dest])                      \n\t"
           "lw             %[t2],          4(%[dest])                      \n\t"
           "lw             %[t3],          8(%[dest])                      \n\t"
@@ -1061,12 +1018,11 @@ void vpx_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest,
 
           "add            %[dest],        %[dest],        %[stride]       \n\t"
 
-          : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4),
-            [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
-            [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4),
-            [dest] "+&r" (dest)
-          : [stride] "r" (stride), [vector_a1] "r" (vector_a1)
-      );
+          : [t1] "=&r"(t1), [t2] "=&r"(t2), [t3] "=&r"(t3), [t4] "=&r"(t4),
+            [vector_1] "=&r"(vector_1), [vector_2] "=&r"(vector_2),
+            [vector_3] "=&r"(vector_3), [vector_4] "=&r"(vector_4),
+            [dest] "+&r"(dest)
+          : [stride] "r"(stride), [vector_a1] "r"(vector_a1));
     }
   }
 }
diff --git a/vpx_dsp/mips/itrans4_dspr2.c b/vpx_dsp/mips/itrans4_dspr2.c
index ecb8bd3de7518d0039d92deeb52e9833953a3ca9..516ea80f4ae96179ebbc2c643130ab0f4a4d3543 100644
--- a/vpx_dsp/mips/itrans4_dspr2.c
+++ b/vpx_dsp/mips/itrans4_dspr2.c
@@ -15,13 +15,13 @@
 
 #if HAVE_DSPR2
 void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) {
-  int16_t   step_0, step_1, step_2, step_3;
-  int       Temp0, Temp1, Temp2, Temp3;
+  int16_t step_0, step_1, step_2, step_3;
+  int Temp0, Temp1, Temp2, Temp3;
   const int const_2_power_13 = 8192;
-  int       i;
+  int i;
 
-  for (i = 4; i--; ) {
-    __asm__ __volatile__ (
+  for (i = 4; i--;) {
+    __asm__ __volatile__(
         /*
           temp_1 = (input[0] + input[2]) * cospi_16_64;
           step_0 = dct_const_round_shift(temp_1);
@@ -83,16 +83,12 @@ void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) {
         "sub      %[Temp3],             %[step_0],      %[step_3]       \n\t"
         "sh       %[Temp3],             24(%[output])                   \n\t"
 
-      : [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1),
-        [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-        [step_0] "=&r" (step_0), [step_1] "=&r" (step_1),
-        [step_2] "=&r" (step_2), [step_3] "=&r" (step_3),
-        [output] "+r" (output)
-      : [const_2_power_13] "r" (const_2_power_13),
-        [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64),
-        [cospi_24_64] "r" (cospi_24_64),
-        [input] "r" (input)
-    );
+        : [Temp0] "=&r"(Temp0), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+          [Temp3] "=&r"(Temp3), [step_0] "=&r"(step_0), [step_1] "=&r"(step_1),
+          [step_2] "=&r"(step_2), [step_3] "=&r"(step_3), [output] "+r"(output)
+        : [const_2_power_13] "r"(const_2_power_13),
+          [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64),
+          [cospi_24_64] "r"(cospi_24_64), [input] "r"(input));
 
     input += 4;
     output += 1;
@@ -101,27 +97,27 @@ void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) {
 
 void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
                                      int dest_stride) {
-  int16_t   step_0, step_1, step_2, step_3;
-  int       Temp0, Temp1, Temp2, Temp3;
+  int16_t step_0, step_1, step_2, step_3;
+  int Temp0, Temp1, Temp2, Temp3;
   const int const_2_power_13 = 8192;
-  int       i;
-  uint8_t   *dest_pix;
-  uint8_t   *cm = vpx_ff_cropTbl;
+  int i;
+  uint8_t *dest_pix;
+  uint8_t *cm = vpx_ff_cropTbl;
 
   /* prefetch vpx_ff_cropTbl */
   prefetch_load(vpx_ff_cropTbl);
-  prefetch_load(vpx_ff_cropTbl +  32);
-  prefetch_load(vpx_ff_cropTbl +  64);
-  prefetch_load(vpx_ff_cropTbl +  96);
+  prefetch_load(vpx_ff_cropTbl + 32);
+  prefetch_load(vpx_ff_cropTbl + 64);
+  prefetch_load(vpx_ff_cropTbl + 96);
   prefetch_load(vpx_ff_cropTbl + 128);
   prefetch_load(vpx_ff_cropTbl + 160);
   prefetch_load(vpx_ff_cropTbl + 192);
   prefetch_load(vpx_ff_cropTbl + 224);
 
   for (i = 0; i < 4; ++i) {
-      dest_pix = (dest + i);
+    dest_pix = (dest + i);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         /*
           temp_1 = (input[0] + input[2]) * cospi_16_64;
           step_0 = dct_const_round_shift(temp_1);
@@ -206,16 +202,14 @@ void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "lbux     %[Temp2],             %[Temp1](%[cm])                 \n\t"
         "sb       %[Temp2],             0(%[dest_pix])                  \n\t"
 
-      : [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1),
-        [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-        [step_0] "=&r" (step_0), [step_1] "=&r" (step_1),
-        [step_2] "=&r" (step_2), [step_3] "=&r" (step_3),
-        [dest_pix] "+r" (dest_pix)
-      : [const_2_power_13] "r" (const_2_power_13),
-        [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64),
-        [cospi_24_64] "r" (cospi_24_64),
-        [input] "r" (input), [cm] "r" (cm), [dest_stride] "r" (dest_stride)
-    );
+        : [Temp0] "=&r"(Temp0), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+          [Temp3] "=&r"(Temp3), [step_0] "=&r"(step_0), [step_1] "=&r"(step_1),
+          [step_2] "=&r"(step_2), [step_3] "=&r"(step_3),
+          [dest_pix] "+r"(dest_pix)
+        : [const_2_power_13] "r"(const_2_power_13),
+          [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64),
+          [cospi_24_64] "r"(cospi_24_64), [input] "r"(input), [cm] "r"(cm),
+          [dest_stride] "r"(dest_stride));
 
     input += 4;
   }
@@ -228,11 +222,9 @@ void vpx_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
+                       :
+                       : [pos] "r"(pos));
 
   // Rows
   vpx_idct4_rows_dspr2(input, outptr);
@@ -243,73 +235,63 @@ void vpx_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
 
 void vpx_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest,
                              int dest_stride) {
-  int       a1, absa1;
-  int       r;
-  int32_t   out;
-  int       t2, vector_a1, vector_a;
-  uint32_t  pos = 45;
-  int16_t   input_dc = input[0];
+  int a1, absa1;
+  int r;
+  int32_t out;
+  int t2, vector_a1, vector_a;
+  uint32_t pos = 45;
+  int16_t input_dc = input[0];
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
 
-    :
-    : [pos] "r" (pos)
-  );
+                       :
+                       : [pos] "r"(pos));
 
   out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input_dc);
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "addi     %[out],     %[out],    8       \n\t"
       "sra      %[a1],      %[out],    4       \n\t"
 
-      : [out] "+r" (out), [a1] "=r" (a1)
-      :
-  );
+      : [out] "+r"(out), [a1] "=r"(a1)
+      :);
 
   if (a1 < 0) {
     /* use quad-byte
      * input and output memory are four byte aligned */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "abs        %[absa1],     %[a1]         \n\t"
         "replv.qb   %[vector_a1], %[absa1]      \n\t"
 
-        : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
-        : [a1] "r" (a1)
-    );
+        : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1)
+        : [a1] "r"(a1));
 
     for (r = 4; r--;) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "lw             %[t2],          0(%[dest])                      \n\t"
           "subu_s.qb      %[vector_a],    %[t2],          %[vector_a1]    \n\t"
           "sw             %[vector_a],    0(%[dest])                      \n\t"
           "add            %[dest],        %[dest],        %[dest_stride]  \n\t"
 
-          : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a),
-            [dest] "+&r" (dest)
-          : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
-      );
+          : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a), [dest] "+&r"(dest)
+          : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1));
     }
   } else {
     /* use quad-byte
      * input and output memory are four byte aligned */
-    __asm__ __volatile__ (
-        "replv.qb       %[vector_a1],   %[a1]     \n\t"
-        : [vector_a1] "=r" (vector_a1)
-        : [a1] "r" (a1)
-    );
+    __asm__ __volatile__("replv.qb       %[vector_a1],   %[a1]     \n\t"
+                         : [vector_a1] "=r"(vector_a1)
+                         : [a1] "r"(a1));
 
     for (r = 4; r--;) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "lw           %[t2],          0(%[dest])                        \n\t"
           "addu_s.qb    %[vector_a],    %[t2],            %[vector_a1]    \n\t"
           "sw           %[vector_a],    0(%[dest])                        \n\t"
           "add          %[dest],        %[dest],          %[dest_stride]  \n\t"
 
-          : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a),
-            [dest] "+&r" (dest)
-          : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
-      );
+          : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a), [dest] "+&r"(dest)
+          : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1));
     }
   }
 }
diff --git a/vpx_dsp/mips/itrans8_dspr2.c b/vpx_dsp/mips/itrans8_dspr2.c
index 823e845d59d5618396990d4b18c5e0ae962df131..08a6c78b6e4d2a6b066f82acce04405954514e08 100644
--- a/vpx_dsp/mips/itrans8_dspr2.c
+++ b/vpx_dsp/mips/itrans8_dspr2.c
@@ -20,8 +20,8 @@ void idct8_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows) {
   int Temp0, Temp1, Temp2, Temp3, Temp4;
   int i;
 
-  for (i = no_rows; i--; ) {
-    __asm__ __volatile__ (
+  for (i = no_rows; i--;) {
+    __asm__ __volatile__(
         /*
           temp_1 = (input[0] + input[4]) * cospi_16_64;
           step2_0 = dct_const_round_shift(temp_1);
@@ -174,20 +174,18 @@ void idct8_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows) {
         "sub      %[Temp1],             %[step1_0],     %[step1_7]      \n\t"
         "sh       %[Temp1],             112(%[output])                  \n\t"
 
-        : [step1_0] "=&r" (step1_0), [step1_1] "=&r" (step1_1),
-          [step1_2] "=&r" (step1_2), [step1_3] "=&r" (step1_3),
-          [step1_4] "=&r" (step1_4), [step1_5] "=&r" (step1_5),
-          [step1_6] "=&r" (step1_6), [step1_7] "=&r" (step1_7),
-          [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1),
-          [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-          [Temp4] "=&r" (Temp4)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64),
-          [cospi_20_64] "r" (cospi_20_64), [cospi_8_64] "r" (cospi_8_64),
-          [cospi_24_64] "r" (cospi_24_64),
-          [output] "r" (output), [input] "r" (input)
-    );
+        : [step1_0] "=&r"(step1_0), [step1_1] "=&r"(step1_1),
+          [step1_2] "=&r"(step1_2), [step1_3] "=&r"(step1_3),
+          [step1_4] "=&r"(step1_4), [step1_5] "=&r"(step1_5),
+          [step1_6] "=&r"(step1_6), [step1_7] "=&r"(step1_7),
+          [Temp0] "=&r"(Temp0), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+          [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4)
+        : [const_2_power_13] "r"(const_2_power_13),
+          [cospi_16_64] "r"(cospi_16_64), [cospi_28_64] "r"(cospi_28_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_12_64] "r"(cospi_12_64),
+          [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64),
+          [cospi_24_64] "r"(cospi_24_64), [output] "r"(output),
+          [input] "r"(input));
 
     input += 8;
     output += 1;
@@ -205,18 +203,18 @@ void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
 
   /* prefetch vpx_ff_cropTbl */
   prefetch_load(vpx_ff_cropTbl);
-  prefetch_load(vpx_ff_cropTbl +  32);
-  prefetch_load(vpx_ff_cropTbl +  64);
-  prefetch_load(vpx_ff_cropTbl +  96);
+  prefetch_load(vpx_ff_cropTbl + 32);
+  prefetch_load(vpx_ff_cropTbl + 64);
+  prefetch_load(vpx_ff_cropTbl + 96);
   prefetch_load(vpx_ff_cropTbl + 128);
   prefetch_load(vpx_ff_cropTbl + 160);
   prefetch_load(vpx_ff_cropTbl + 192);
   prefetch_load(vpx_ff_cropTbl + 224);
 
   for (i = 0; i < 8; ++i) {
-      dest_pix = (dest + i);
+    dest_pix = (dest + i);
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         /*
           temp_1 = (input[0] + input[4]) * cospi_16_64;
           step2_0 = dct_const_round_shift(temp_1);
@@ -423,20 +421,18 @@ void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest,
         "lbux     %[Temp2],             %[Temp1](%[cm])                 \n\t"
         "sb       %[Temp2],             0(%[dest_pix])                  \n\t"
 
-        : [step1_0] "=&r" (step1_0), [step1_1] "=&r" (step1_1),
-          [step1_2] "=&r" (step1_2), [step1_3] "=&r" (step1_3),
-          [step1_4] "=&r" (step1_4), [step1_5] "=&r" (step1_5),
-          [step1_6] "=&r" (step1_6), [step1_7] "=&r" (step1_7),
-          [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1),
-          [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-          [dest_pix] "+r" (dest_pix)
-        : [const_2_power_13] "r" (const_2_power_13),
-          [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64),
-          [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64),
-          [cospi_20_64] "r" (cospi_20_64), [cospi_8_64] "r" (cospi_8_64),
-          [cospi_24_64] "r" (cospi_24_64),
-          [input] "r" (input), [cm] "r" (cm), [dest_stride] "r" (dest_stride)
-    );
+        : [step1_0] "=&r"(step1_0), [step1_1] "=&r"(step1_1),
+          [step1_2] "=&r"(step1_2), [step1_3] "=&r"(step1_3),
+          [step1_4] "=&r"(step1_4), [step1_5] "=&r"(step1_5),
+          [step1_6] "=&r"(step1_6), [step1_7] "=&r"(step1_7),
+          [Temp0] "=&r"(Temp0), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+          [Temp3] "=&r"(Temp3), [dest_pix] "+r"(dest_pix)
+        : [const_2_power_13] "r"(const_2_power_13),
+          [cospi_16_64] "r"(cospi_16_64), [cospi_28_64] "r"(cospi_28_64),
+          [cospi_4_64] "r"(cospi_4_64), [cospi_12_64] "r"(cospi_12_64),
+          [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64),
+          [cospi_24_64] "r"(cospi_24_64), [input] "r"(input), [cm] "r"(cm),
+          [dest_stride] "r"(dest_stride));
 
     input += 8;
   }
@@ -449,11 +445,7 @@ void vpx_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp    %[pos],    1    \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp    %[pos],    1    \n\t" : : [pos] "r"(pos));
 
   // First transform rows
   idct8_rows_dspr2(input, outptr, 8);
@@ -469,18 +461,14 @@ void vpx_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest,
   uint32_t pos = 45;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp    %[pos],    1    \n\t"
-    :
-    : [pos] "r" (pos)
-  );
+  __asm__ __volatile__("wrdsp    %[pos],    1    \n\t" : : [pos] "r"(pos));
 
   // First transform rows
   idct8_rows_dspr2(input, outptr, 4);
 
   outptr += 4;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "sw  $zero,   0(%[outptr])  \n\t"
       "sw  $zero,   4(%[outptr])  \n\t"
       "sw  $zero,  16(%[outptr])  \n\t"
@@ -499,9 +487,7 @@ void vpx_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest,
       "sw  $zero, 116(%[outptr])  \n\t"
 
       :
-      : [outptr] "r" (outptr)
-  );
-
+      : [outptr] "r"(outptr));
 
   // Then transform columns and add to dest
   idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride);
@@ -516,35 +502,31 @@ void vpx_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest,
   int32_t t1, t2, vector_a1, vector_1, vector_2;
 
   /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
+  __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
 
-    :
-    : [pos] "r" (pos)
-  );
+                       :
+                       : [pos] "r"(pos));
 
   out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input[0]);
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "addi     %[out],     %[out],     16      \n\t"
       "sra      %[a1],      %[out],     5       \n\t"
 
-      : [out] "+r" (out), [a1] "=r" (a1)
-      :
-  );
+      : [out] "+r"(out), [a1] "=r"(a1)
+      :);
 
   if (a1 < 0) {
     /* use quad-byte
      * input and output memory are four byte aligned */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "abs        %[absa1],       %[a1]       \n\t"
         "replv.qb   %[vector_a1],   %[absa1]    \n\t"
 
-        : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
-        : [a1] "r" (a1)
-    );
+        : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1)
+        : [a1] "r"(a1));
 
     for (r = 8; r--;) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "lw           %[t1],          0(%[dest])                      \n\t"
           "lw           %[t2],          4(%[dest])                      \n\t"
           "subu_s.qb    %[vector_1],    %[t1],          %[vector_a1]    \n\t"
@@ -553,24 +535,20 @@ void vpx_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest,
           "sw           %[vector_2],    4(%[dest])                      \n\t"
           "add          %[dest],        %[dest],        %[dest_stride]  \n\t"
 
-          : [t1] "=&r" (t1), [t2] "=&r" (t2),
-            [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
-            [dest] "+&r" (dest)
-          : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
-      );
+          : [t1] "=&r"(t1), [t2] "=&r"(t2), [vector_1] "=&r"(vector_1),
+            [vector_2] "=&r"(vector_2), [dest] "+&r"(dest)
+          : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1));
     }
   } else {
     /* use quad-byte
      * input and output memory are four byte aligned */
-    __asm__ __volatile__ (
-        "replv.qb   %[vector_a1],   %[a1]   \n\t"
+    __asm__ __volatile__("replv.qb   %[vector_a1],   %[a1]   \n\t"
 
-        : [vector_a1] "=r" (vector_a1)
-        : [a1] "r" (a1)
-    );
+                         : [vector_a1] "=r"(vector_a1)
+                         : [a1] "r"(a1));
 
     for (r = 8; r--;) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "lw           %[t1],          0(%[dest])                      \n\t"
           "lw           %[t2],          4(%[dest])                      \n\t"
           "addu_s.qb    %[vector_1],    %[t1],          %[vector_a1]    \n\t"
@@ -579,11 +557,9 @@ void vpx_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest,
           "sw           %[vector_2],    4(%[dest])                      \n\t"
           "add          %[dest],        %[dest],        %[dest_stride]  \n\t"
 
-          : [t1] "=&r" (t1), [t2] "=&r" (t2),
-            [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2),
-            [dest] "+r" (dest)
-          : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1)
-      );
+          : [t1] "=&r"(t1), [t2] "=&r"(t2), [vector_1] "=&r"(vector_1),
+            [vector_2] "=&r"(vector_2), [dest] "+r"(dest)
+          : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1));
     }
   }
 }
@@ -602,20 +578,20 @@ void iadst8_dspr2(const int16_t *input, int16_t *output) {
   x7 = input[6];
 
   if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
-    output[0] = output[1] = output[2] = output[3] = output[4]
-              = output[5] = output[6] = output[7] = 0;
+    output[0] = output[1] = output[2] = output[3] = output[4] = output[5] =
+        output[6] = output[7] = 0;
     return;
   }
 
   // stage 1
-  s0 = cospi_2_64  * x0 + cospi_30_64 * x1;
-  s1 = cospi_30_64 * x0 - cospi_2_64  * x1;
+  s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+  s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
   s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
   s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
   s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
   s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
-  s6 = cospi_26_64 * x6 + cospi_6_64  * x7;
-  s7 = cospi_6_64  * x6 - cospi_26_64 * x7;
+  s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+  s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
 
   x0 = ROUND_POWER_OF_TWO((s0 + s4), DCT_CONST_BITS);
   x1 = ROUND_POWER_OF_TWO((s1 + s5), DCT_CONST_BITS);
@@ -631,10 +607,10 @@ void iadst8_dspr2(const int16_t *input, int16_t *output) {
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 =  cospi_8_64  * x4 + cospi_24_64 * x5;
-  s5 =  cospi_24_64 * x4 - cospi_8_64  * x5;
-  s6 = -cospi_24_64 * x6 + cospi_8_64  * x7;
-  s7 =  cospi_8_64  * x6 + cospi_24_64 * x7;
+  s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+  s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+  s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+  s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
 
   x0 = s0 + s2;
   x1 = s1 + s3;
@@ -656,13 +632,13 @@ void iadst8_dspr2(const int16_t *input, int16_t *output) {
   x6 = ROUND_POWER_OF_TWO((s6), DCT_CONST_BITS);
   x7 = ROUND_POWER_OF_TWO((s7), DCT_CONST_BITS);
 
-  output[0] =  x0;
+  output[0] = x0;
   output[1] = -x4;
-  output[2] =  x6;
+  output[2] = x6;
   output[3] = -x2;
-  output[4] =  x3;
+  output[4] = x3;
   output[5] = -x7;
-  output[6] =  x5;
+  output[6] = x5;
   output[7] = -x1;
 }
 #endif  // HAVE_DSPR2
diff --git a/vpx_dsp/mips/loopfilter_16_msa.c b/vpx_dsp/mips/loopfilter_16_msa.c
index 09f132d1b87c4c106f56a84f2b33478f6ed4e0c4..d5eae3bfa1c96a3826de6ea9456d9c437e43f359 100644
--- a/vpx_dsp/mips/loopfilter_16_msa.c
+++ b/vpx_dsp/mips/loopfilter_16_msa.c
@@ -11,8 +11,7 @@
 #include "vpx_ports/mem.h"
 #include "vpx_dsp/mips/loopfilter_msa.h"
 
-int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch,
-                                 uint8_t *filter48,
+int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48,
                                  const uint8_t *b_limit_ptr,
                                  const uint8_t *limit_ptr,
                                  const uint8_t *thresh_ptr) {
@@ -33,8 +32,8 @@ int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch,
   limit = (v16u8)__msa_fill_b(*limit_ptr);
 
   /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+               mask, flat);
   VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
@@ -43,9 +42,8 @@ int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch,
 
     return 1;
   } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
-               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r,
-               q2_r, q3_r);
+    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
+               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
     VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
@@ -107,8 +105,8 @@ void vpx_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) {
   } else {
     src -= 7 * pitch;
 
-    ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2,
-               zero, p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in,
+    ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, zero,
+               p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in,
                p2_r_in, p1_r_in, p0_r_in);
 
     q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0);
@@ -408,8 +406,7 @@ void vpx_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) {
 void vpx_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch,
                                     const uint8_t *b_limit_ptr,
                                     const uint8_t *limit_ptr,
-                                    const uint8_t *thresh_ptr,
-                                    int32_t count) {
+                                    const uint8_t *thresh_ptr, int32_t count) {
   DECLARE_ALIGNED(32, uint8_t, filter48[16 * 8]);
   uint8_t early_exit = 0;
 
@@ -426,8 +423,7 @@ void vpx_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch,
 void vpx_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch,
                                const uint8_t *b_limit_ptr,
                                const uint8_t *limit_ptr,
-                               const uint8_t *thresh_ptr,
-                               int32_t count) {
+                               const uint8_t *thresh_ptr, int32_t count) {
   if (1 == count) {
     uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d;
     uint64_t dword0, dword1;
@@ -449,8 +445,8 @@ void vpx_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch,
     b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
     limit = (v16u8)__msa_fill_b(*limit_ptr);
 
-    LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-                 hev, mask, flat);
+    LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+                 mask, flat);
     VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
     VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
                        q1_out);
@@ -472,9 +468,8 @@ void vpx_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch,
                   p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
 
       /* convert 16 bit output data into 8 bit */
-      PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8,
-                  zero, q0_filter8, p2_filter8, p1_filter8, p0_filter8,
-                  q0_filter8);
+      PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8, zero,
+                  q0_filter8, p2_filter8, p1_filter8, p0_filter8, q0_filter8);
       PCKEV_B2_SH(zero, q1_filter8, zero, q2_filter8, q1_filter8, q2_filter8);
 
       /* store pixel values */
@@ -654,8 +649,8 @@ static void transpose_16x8_to_8x16(uint8_t *input, int32_t in_pitch,
   v16i8 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
   v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
 
-  LD_UB8(input, in_pitch,
-         p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org, p0_org);
+  LD_UB8(input, in_pitch, p7_org, p6_org, p5_org, p4_org, p3_org, p2_org,
+         p1_org, p0_org);
   /* 8x8 transpose */
   TRANSPOSE8x8_UB_UB(p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org,
                      p0_org, p7, p6, p5, p4, p3, p2, p1, p0);
@@ -685,8 +680,8 @@ static void transpose_8x16_to_16x8(uint8_t *input, int32_t in_pitch,
   ST_UB8(p7_o, p6_o, p5_o, p4_o, p3_o, p2_o, p1_o, p0_o, output, out_pitch);
 }
 
-static void transpose_16x16(uint8_t *input, int32_t in_pitch,
-                            uint8_t *output, int32_t out_pitch) {
+static void transpose_16x16(uint8_t *input, int32_t in_pitch, uint8_t *output,
+                            int32_t out_pitch) {
   v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
   v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
   v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
@@ -695,12 +690,11 @@ static void transpose_16x16(uint8_t *input, int32_t in_pitch,
 
   LD_UB8(input, in_pitch, row0, row1, row2, row3, row4, row5, row6, row7);
   input += (8 * in_pitch);
-  LD_UB8(input, in_pitch,
-         row8, row9, row10, row11, row12, row13, row14, row15);
+  LD_UB8(input, in_pitch, row8, row9, row10, row11, row12, row13, row14, row15);
 
-  TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
-                      row8, row9, row10, row11, row12, row13, row14, row15,
-                      p7, p6, p5, p4, p3, p2, p1, p0);
+  TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+                      row9, row10, row11, row12, row13, row14, row15, p7, p6,
+                      p5, p4, p3, p2, p1, p0);
 
   /* transpose 16x8 matrix into 8x16 */
   /* total 8 intermediate register and 32 instructions */
@@ -765,8 +759,8 @@ int32_t vpx_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48,
   limit = (v16u8)__msa_fill_b(*limit_ptr);
 
   /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+               mask, flat);
   /* flat4 */
   VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
@@ -780,9 +774,8 @@ int32_t vpx_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48,
     ST4x8_UB(vec2, vec3, (src_org - 2), pitch_org);
     return 1;
   } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
-               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
-               q3_r);
+    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
+               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
     VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
@@ -850,9 +843,9 @@ int32_t vpx_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch,
   } else {
     src -= 7 * 16;
 
-    ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2,
-               zero, p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in,
-               p3_r_in, p2_r_in, p1_r_in, p0_r_in);
+    ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, zero,
+               p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in,
+               p2_r_in, p1_r_in, p0_r_in);
     q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0);
 
     tmp0_r = p7_r_in << 3;
@@ -1042,9 +1035,9 @@ void vpx_lpf_vertical_16_msa(uint8_t *src, int32_t pitch,
 
   transpose_16x8_to_8x16(src - 8, pitch, transposed_input, 16);
 
-  early_exit = vpx_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8),
-                                       &filter48[0], src, pitch, b_limit_ptr,
-                                       limit_ptr, thresh_ptr);
+  early_exit =
+      vpx_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), &filter48[0], src,
+                              pitch, b_limit_ptr, limit_ptr, thresh_ptr);
 
   if (0 == early_exit) {
     early_exit = vpx_vt_lpf_t16_8w((transposed_input + 16 * 8), src, pitch,
@@ -1079,8 +1072,8 @@ int32_t vpx_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48,
   limit = (v16u8)__msa_fill_b(*limit_ptr);
 
   /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+               mask, flat);
   /* flat4 */
   VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
@@ -1099,9 +1092,8 @@ int32_t vpx_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48,
 
     return 1;
   } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
-               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
-               q3_r);
+    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
+               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
     VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
     ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
@@ -1182,9 +1174,9 @@ int32_t vpx_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch,
   } else {
     src -= 7 * 16;
 
-    ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2,
-               zero, p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in,
-               p3_r_in, p2_r_in, p1_r_in, p0_r_in);
+    ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, zero,
+               p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in,
+               p2_r_in, p1_r_in, p0_r_in);
     q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0);
 
     tmp0_r = p7_r_in << 3;
@@ -1465,9 +1457,9 @@ void vpx_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch,
 
   transpose_16x16((src - 8), pitch, &transposed_input[0], 16);
 
-  early_exit = vpx_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8),
-                                        &filter48[0], src, pitch, b_limit_ptr,
-                                        limit_ptr, thresh_ptr);
+  early_exit =
+      vpx_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), &filter48[0], src,
+                               pitch, b_limit_ptr, limit_ptr, thresh_ptr);
 
   if (0 == early_exit) {
     early_exit = vpx_vt_lpf_t16_16w((transposed_input + 16 * 8), src, pitch,
diff --git a/vpx_dsp/mips/loopfilter_4_msa.c b/vpx_dsp/mips/loopfilter_4_msa.c
index 303276a7dc36f06875a6137e1d0a32a7522abfd7..7ca0ac6f90233018fd69388619375e42fb5169e8 100644
--- a/vpx_dsp/mips/loopfilter_4_msa.c
+++ b/vpx_dsp/mips/loopfilter_4_msa.c
@@ -13,8 +13,7 @@
 void vpx_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch,
                               const uint8_t *b_limit_ptr,
                               const uint8_t *limit_ptr,
-                              const uint8_t *thresh_ptr,
-                              int32_t count) {
+                              const uint8_t *thresh_ptr, int32_t count) {
   uint64_t p1_d, p0_d, q0_d, q1_d;
   v16u8 mask, hev, flat, thresh, b_limit, limit;
   v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out;
@@ -28,8 +27,8 @@ void vpx_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch,
   b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
   limit = (v16u8)__msa_fill_b(*limit_ptr);
 
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+               mask, flat);
   VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
@@ -64,8 +63,8 @@ void vpx_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
   limit1 = (v16u8)__msa_fill_b(*limit1_ptr);
   limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0);
 
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
+               mask, flat);
   VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
 
   ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch);
@@ -73,8 +72,7 @@ void vpx_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
 
 void vpx_lpf_vertical_4_msa(uint8_t *src, int32_t pitch,
                             const uint8_t *b_limit_ptr,
-                            const uint8_t *limit_ptr,
-                            const uint8_t *thresh_ptr,
+                            const uint8_t *limit_ptr, const uint8_t *thresh_ptr,
                             int32_t count) {
   v16u8 mask, hev, flat, limit, thresh, b_limit;
   v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
@@ -88,10 +86,10 @@ void vpx_lpf_vertical_4_msa(uint8_t *src, int32_t pitch,
   b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
   limit = (v16u8)__msa_fill_b(*limit_ptr);
 
-  TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3,
-                     p3, p2, p1, p0, q0, q1, q2, q3);
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-               hev, mask, flat);
+  TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2,
+                     q3);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+               mask, flat);
   VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
   ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1);
   ILVRL_H2_SH(vec1, vec0, vec2, vec3);
@@ -117,12 +115,12 @@ void vpx_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch,
   v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
 
   LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
-  LD_UB8(src - 4 + (8 * pitch), pitch,
-         row8, row9, row10, row11, row12, row13, row14, row15);
+  LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13,
+         row14, row15);
 
-  TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
-                      row8, row9, row10, row11, row12, row13, row14, row15,
-                      p3, p2, p1, p0, q0, q1, q2, q3);
+  TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+                      row9, row10, row11, row12, row13, row14, row15, p3, p2,
+                      p1, p0, q0, q1, q2, q3);
 
   thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr);
   thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr);
@@ -136,8 +134,8 @@ void vpx_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch,
   limit1 = (v16u8)__msa_fill_b(*limit1_ptr);
   limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0);
 
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
+               mask, flat);
   VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
   ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
   ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3);
diff --git a/vpx_dsp/mips/loopfilter_8_msa.c b/vpx_dsp/mips/loopfilter_8_msa.c
index 2bfcd4def22cf1547c46570ab93358755d82215a..6564af51e095a1e5063ed96aa56facf1df2bfdab 100644
--- a/vpx_dsp/mips/loopfilter_8_msa.c
+++ b/vpx_dsp/mips/loopfilter_8_msa.c
@@ -13,8 +13,7 @@
 void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
                               const uint8_t *b_limit_ptr,
                               const uint8_t *limit_ptr,
-                              const uint8_t *thresh_ptr,
-                              int32_t count) {
+                              const uint8_t *thresh_ptr, int32_t count) {
   uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d;
   v16u8 mask, hev, flat, thresh, b_limit, limit;
   v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
@@ -32,8 +31,8 @@ void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
   b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
   limit = (v16u8)__msa_fill_b(*limit_ptr);
 
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+               mask, flat);
   VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
@@ -46,16 +45,14 @@ void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
     q1_d = __msa_copy_u_d((v2i64)q1_out, 0);
     SD4(p1_d, p0_d, q0_d, q1_d, (src - 2 * pitch), pitch);
   } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
-               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r,
-               q2_r, q3_r);
+    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
+               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
     VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
                 p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
 
     /* convert 16 bit output data into 8 bit */
-    PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8,
-                zero, q0_filter8, p2_filter8, p1_filter8, p0_filter8,
-                q0_filter8);
+    PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8, zero,
+                q0_filter8, p2_filter8, p1_filter8, p0_filter8, q0_filter8);
     PCKEV_B2_SH(zero, q1_filter8, zero, q2_filter8, q1_filter8, q2_filter8);
 
     /* store pixel values */
@@ -83,13 +80,10 @@ void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch,
   }
 }
 
-void vpx_lpf_horizontal_8_dual_msa(uint8_t *src, int32_t pitch,
-                                   const uint8_t *b_limit0,
-                                   const uint8_t *limit0,
-                                   const uint8_t *thresh0,
-                                   const uint8_t *b_limit1,
-                                   const uint8_t *limit1,
-                                   const uint8_t *thresh1) {
+void vpx_lpf_horizontal_8_dual_msa(
+    uint8_t *src, int32_t pitch, const uint8_t *b_limit0, const uint8_t *limit0,
+    const uint8_t *thresh0, const uint8_t *b_limit1, const uint8_t *limit1,
+    const uint8_t *thresh1) {
   v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
   v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
   v16u8 flat, mask, hev, tmp, thresh, b_limit, limit;
@@ -115,17 +109,16 @@ void vpx_lpf_horizontal_8_dual_msa(uint8_t *src, int32_t pitch,
   limit = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)limit);
 
   /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+               mask, flat);
   VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   if (__msa_test_bz_v(flat)) {
     ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
   } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
-               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r,
-               q2_r, q3_r);
+    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
+               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
     VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
@@ -160,8 +153,7 @@ void vpx_lpf_horizontal_8_dual_msa(uint8_t *src, int32_t pitch,
 
 void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
                             const uint8_t *b_limit_ptr,
-                            const uint8_t *limit_ptr,
-                            const uint8_t *thresh_ptr,
+                            const uint8_t *limit_ptr, const uint8_t *thresh_ptr,
                             int32_t count) {
   v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
   v16u8 p1_out, p0_out, q0_out, q1_out;
@@ -176,16 +168,16 @@ void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
   /* load vector elements */
   LD_UB8(src - 4, pitch, p3, p2, p1, p0, q0, q1, q2, q3);
 
-  TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3,
-                     p3, p2, p1, p0, q0, q1, q2, q3);
+  TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2,
+                     q3);
 
   thresh = (v16u8)__msa_fill_b(*thresh_ptr);
   b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
   limit = (v16u8)__msa_fill_b(*limit_ptr);
 
   /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+               mask, flat);
   /* flat4 */
   VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
@@ -203,9 +195,8 @@ void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
     src += 4 * pitch;
     ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
   } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
-               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
-               q3_r);
+    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
+               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
     VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
     /* convert 16 bit output data into 8 bit */
@@ -238,11 +229,9 @@ void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch,
 }
 
 void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
-                                 const uint8_t *b_limit0,
-                                 const uint8_t *limit0,
+                                 const uint8_t *b_limit0, const uint8_t *limit0,
                                  const uint8_t *thresh0,
-                                 const uint8_t *b_limit1,
-                                 const uint8_t *limit1,
+                                 const uint8_t *b_limit1, const uint8_t *limit1,
                                  const uint8_t *thresh1) {
   uint8_t *temp_src;
   v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
@@ -263,9 +252,9 @@ void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
   LD_UB8(temp_src, pitch, q3, q2, q1, q0, row12, row13, row14, row15);
 
   /* transpose 16x8 matrix into 8x16 */
-  TRANSPOSE16x8_UB_UB(p0, p1, p2, p3, row4, row5, row6, row7,
-                      q3, q2, q1, q0, row12, row13, row14, row15,
-                      p3, p2, p1, p0, q0, q1, q2, q3);
+  TRANSPOSE16x8_UB_UB(p0, p1, p2, p3, row4, row5, row6, row7, q3, q2, q1, q0,
+                      row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2,
+                      q3);
 
   thresh = (v16u8)__msa_fill_b(*thresh0);
   vec0 = (v8i16)__msa_fill_b(*thresh1);
@@ -280,8 +269,8 @@ void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
   limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)limit);
 
   /* mask and hev */
-  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
-               hev, mask, flat);
+  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+               mask, flat);
   /* flat4 */
   VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
@@ -298,9 +287,8 @@ void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch,
     src += 8 * pitch;
     ST4x8_UB(vec4, vec5, src, pitch);
   } else {
-    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
-               zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
-               q3_r);
+    ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero,
+               q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r);
     VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
diff --git a/vpx_dsp/mips/loopfilter_filters_dspr2.c b/vpx_dsp/mips/loopfilter_filters_dspr2.c
index 99a96d89b94edb37e950128163cc3267085e1572..7d17165a166e2a85e5efa98f4d08180f84108f90 100644
--- a/vpx_dsp/mips/loopfilter_filters_dspr2.c
+++ b/vpx_dsp/mips/loopfilter_filters_dspr2.c
@@ -19,34 +19,30 @@
 #include "vpx_mem/vpx_mem.h"
 
 #if HAVE_DSPR2
-void vpx_lpf_horizontal_4_dspr2(unsigned char *s,
-                                int pitch,
-                                const uint8_t *blimit,
-                                const uint8_t *limit,
-                                const uint8_t *thresh,
-                                int count) {
-  uint8_t   i;
-  uint32_t  mask;
-  uint32_t  hev;
-  uint32_t  pm1, p0, p1, p2, p3, p4, p5, p6;
-  uint8_t   *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
-  uint32_t  thresh_vec, flimit_vec, limit_vec;
-  uint32_t  uflimit, ulimit, uthresh;
+void vpx_lpf_horizontal_4_dspr2(unsigned char *s, int pitch,
+                                const uint8_t *blimit, const uint8_t *limit,
+                                const uint8_t *thresh, int count) {
+  uint8_t i;
+  uint32_t mask;
+  uint32_t hev;
+  uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+  uint8_t *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
+  uint32_t thresh_vec, flimit_vec, limit_vec;
+  uint32_t uflimit, ulimit, uthresh;
 
   uflimit = *blimit;
   ulimit = *limit;
   uthresh = *thresh;
 
   /* create quad-byte */
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "replv.qb       %[thresh_vec],    %[uthresh]    \n\t"
       "replv.qb       %[flimit_vec],    %[uflimit]    \n\t"
       "replv.qb       %[limit_vec],     %[ulimit]     \n\t"
 
-      : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec),
-        [limit_vec] "=r" (limit_vec)
-      : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit)
-  );
+      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+        [limit_vec] "=r"(limit_vec)
+      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
 
   /* prefetch data for store */
   prefetch_store(s);
@@ -63,49 +59,44 @@ void vpx_lpf_horizontal_4_dspr2(unsigned char *s,
     s5 = s4 + pitch;
     s6 = s5 + pitch;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lw     %[p1],  (%[s1])    \n\t"
         "lw     %[p2],  (%[s2])    \n\t"
         "lw     %[p3],  (%[s3])    \n\t"
         "lw     %[p4],  (%[s4])    \n\t"
 
-        : [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4)
-        : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4)
-    );
+        : [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4)
+        : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
 
     /* if (p1 - p4 == 0) and (p2 - p3 == 0)
        mask will be zero and filtering is not needed */
     if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "lw       %[pm1], (%[sm1])   \n\t"
           "lw       %[p0],  (%[s0])    \n\t"
           "lw       %[p5],  (%[s5])    \n\t"
           "lw       %[p6],  (%[s6])    \n\t"
 
-          : [pm1] "=&r" (pm1), [p0] "=&r" (p0), [p5] "=&r" (p5),
-            [p6] "=&r" (p6)
-          : [sm1] "r" (sm1), [s0] "r" (s0), [s5] "r" (s5), [s6] "r" (s6)
-      );
+          : [pm1] "=&r"(pm1), [p0] "=&r"(p0), [p5] "=&r"(p5), [p6] "=&r"(p6)
+          : [sm1] "r"(sm1), [s0] "r"(s0), [s5] "r"(s5), [s6] "r"(s6));
 
-      filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2,
-                            pm1, p0, p3, p4, p5, p6,
-                            thresh_vec, &hev, &mask);
+      filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2, pm1, p0, p3, p4, p5,
+                            p6, thresh_vec, &hev, &mask);
 
       /* if mask == 0 do filtering is not needed */
       if (mask) {
         /* filtering */
         filter_dspr2(mask, hev, &p1, &p2, &p3, &p4);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sw     %[p1],  (%[s1])    \n\t"
             "sw     %[p2],  (%[s2])    \n\t"
             "sw     %[p3],  (%[s3])    \n\t"
             "sw     %[p4],  (%[s4])    \n\t"
 
             :
-            : [p1] "r" (p1), [p2] "r" (p2), [p3] "r" (p3), [p4] "r" (p4),
-              [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4)
-        );
+            : [p1] "r"(p1), [p2] "r"(p2), [p3] "r"(p3), [p4] "r"(p4),
+              [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
       }
     }
 
@@ -113,34 +104,30 @@ void vpx_lpf_horizontal_4_dspr2(unsigned char *s,
   }
 }
 
-void vpx_lpf_vertical_4_dspr2(unsigned char *s,
-                              int pitch,
-                              const uint8_t *blimit,
-                              const uint8_t *limit,
-                              const uint8_t *thresh,
-                              int count) {
-  uint8_t   i;
-  uint32_t  mask, hev;
-  uint32_t  pm1, p0, p1, p2, p3, p4, p5, p6;
-  uint8_t   *s1, *s2, *s3, *s4;
-  uint32_t  prim1, prim2, sec3, sec4, prim3, prim4;
-  uint32_t  thresh_vec, flimit_vec, limit_vec;
-  uint32_t  uflimit, ulimit, uthresh;
+void vpx_lpf_vertical_4_dspr2(unsigned char *s, int pitch,
+                              const uint8_t *blimit, const uint8_t *limit,
+                              const uint8_t *thresh, int count) {
+  uint8_t i;
+  uint32_t mask, hev;
+  uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+  uint8_t *s1, *s2, *s3, *s4;
+  uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+  uint32_t thresh_vec, flimit_vec, limit_vec;
+  uint32_t uflimit, ulimit, uthresh;
 
   uflimit = *blimit;
   ulimit = *limit;
   uthresh = *thresh;
 
   /* create quad-byte */
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "replv.qb       %[thresh_vec],    %[uthresh]    \n\t"
       "replv.qb       %[flimit_vec],    %[uflimit]    \n\t"
       "replv.qb       %[limit_vec],     %[ulimit]     \n\t"
 
-      : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec),
-        [limit_vec] "=r" (limit_vec)
-      : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit)
-  );
+      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+        [limit_vec] "=r"(limit_vec)
+      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
 
   /* prefetch data for store */
   prefetch_store(s + pitch);
@@ -150,22 +137,22 @@ void vpx_lpf_vertical_4_dspr2(unsigned char *s,
     s2 = s + pitch;
     s3 = s2 + pitch;
     s4 = s3 + pitch;
-    s  = s4 + pitch;
+    s = s4 + pitch;
 
     /* load quad-byte vectors
      * memory is 4 byte aligned
      */
-    p2  = *((uint32_t *)(s1 - 4));
-    p6  = *((uint32_t *)(s1));
-    p1  = *((uint32_t *)(s2 - 4));
-    p5  = *((uint32_t *)(s2));
-    p0  = *((uint32_t *)(s3 - 4));
-    p4  = *((uint32_t *)(s3));
+    p2 = *((uint32_t *)(s1 - 4));
+    p6 = *((uint32_t *)(s1));
+    p1 = *((uint32_t *)(s2 - 4));
+    p5 = *((uint32_t *)(s2));
+    p0 = *((uint32_t *)(s3 - 4));
+    p4 = *((uint32_t *)(s3));
     pm1 = *((uint32_t *)(s4 - 4));
-    p3  = *((uint32_t *)(s4));
+    p3 = *((uint32_t *)(s4));
 
     /* transpose pm1, p0, p1, p2 */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "precrq.qb.ph   %[prim1],   %[p2],      %[p1]       \n\t"
         "precr.qb.ph    %[prim2],   %[p2],      %[p1]       \n\t"
         "precrq.qb.ph   %[prim3],   %[p0],      %[pm1]      \n\t"
@@ -181,15 +168,13 @@ void vpx_lpf_vertical_4_dspr2(unsigned char *s,
         "append         %[p1],      %[sec3],    16          \n\t"
         "append         %[pm1],     %[sec4],    16          \n\t"
 
-        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
-          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
-          [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1),
-          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
-        :
-    );
+        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+          [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0),
+          [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+        :);
 
     /* transpose p3, p4, p5, p6 */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "precrq.qb.ph   %[prim1],   %[p6],      %[p5]       \n\t"
         "precr.qb.ph    %[prim2],   %[p6],      %[p5]       \n\t"
         "precrq.qb.ph   %[prim3],   %[p4],      %[p3]       \n\t"
@@ -205,20 +190,17 @@ void vpx_lpf_vertical_4_dspr2(unsigned char *s,
         "append         %[p5],      %[sec3],    16          \n\t"
         "append         %[p3],      %[sec4],    16          \n\t"
 
-        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
-          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
-          [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
-          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
-        :
-    );
+        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+          [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4),
+          [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+        :);
 
     /* if (p1 - p4 == 0) and (p2 - p3 == 0)
      * mask will be zero and filtering is not needed
      */
     if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
-      filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2, pm1,
-                            p0, p3, p4, p5, p6, thresh_vec,
-                            &hev, &mask);
+      filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2, pm1, p0, p3, p4, p5,
+                            p6, thresh_vec, &hev, &mask);
 
       /* if mask == 0 do filtering is not needed */
       if (mask) {
@@ -229,107 +211,93 @@ void vpx_lpf_vertical_4_dspr2(unsigned char *s,
          * don't use transpose on output data
          * because memory isn't aligned
          */
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p4],   1(%[s4])    \n\t"
             "sb     %[p3],   0(%[s4])    \n\t"
             "sb     %[p2],  -1(%[s4])    \n\t"
             "sb     %[p1],  -2(%[s4])    \n\t"
 
             :
-            : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1),
-              [s4] "r" (s4)
-        );
+            : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
+              [s4] "r"(s4));
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "srl    %[p4],  %[p4],  8     \n\t"
             "srl    %[p3],  %[p3],  8     \n\t"
             "srl    %[p2],  %[p2],  8     \n\t"
             "srl    %[p1],  %[p1],  8     \n\t"
 
-            : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
-            :
-        );
+            : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+            :);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p4],   1(%[s3])    \n\t"
             "sb     %[p3],   0(%[s3])    \n\t"
             "sb     %[p2],  -1(%[s3])    \n\t"
             "sb     %[p1],  -2(%[s3])    \n\t"
 
-            : [p1] "+r" (p1)
-            : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2), [s3] "r" (s3)
-        );
+            : [p1] "+r"(p1)
+            : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [s3] "r"(s3));
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "srl    %[p4],  %[p4],  8     \n\t"
             "srl    %[p3],  %[p3],  8     \n\t"
             "srl    %[p2],  %[p2],  8     \n\t"
             "srl    %[p1],  %[p1],  8     \n\t"
 
-            : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
-            :
-        );
+            : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+            :);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p4],   1(%[s2])    \n\t"
             "sb     %[p3],   0(%[s2])    \n\t"
             "sb     %[p2],  -1(%[s2])    \n\t"
             "sb     %[p1],  -2(%[s2])    \n\t"
 
             :
-            : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1),
-              [s2] "r" (s2)
-        );
+            : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
+              [s2] "r"(s2));
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "srl    %[p4],  %[p4],  8     \n\t"
             "srl    %[p3],  %[p3],  8     \n\t"
             "srl    %[p2],  %[p2],  8     \n\t"
             "srl    %[p1],  %[p1],  8     \n\t"
 
-            : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
-            :
-        );
+            : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+            :);
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p4],   1(%[s1])    \n\t"
             "sb     %[p3],   0(%[s1])    \n\t"
             "sb     %[p2],  -1(%[s1])    \n\t"
             "sb     %[p1],  -2(%[s1])    \n\t"
 
             :
-            : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1),
-              [s1] "r" (s1)
-        );
+            : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
+              [s1] "r"(s1));
       }
     }
   }
 }
 
-void vpx_lpf_horizontal_4_dual_dspr2(uint8_t *s, int p /* pitch */,
-                                     const uint8_t *blimit0,
-                                     const uint8_t *limit0,
-                                     const uint8_t *thresh0,
-                                     const uint8_t *blimit1,
-                                     const uint8_t *limit1,
-                                     const uint8_t *thresh1) {
+void vpx_lpf_horizontal_4_dual_dspr2(
+    uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
+    const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
+    const uint8_t *limit1, const uint8_t *thresh1) {
   vpx_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0, 1);
   vpx_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1, 1);
 }
 
-void vpx_lpf_horizontal_8_dual_dspr2(uint8_t *s, int p /* pitch */,
-                                     const uint8_t *blimit0,
-                                     const uint8_t *limit0,
-                                     const uint8_t *thresh0,
-                                     const uint8_t *blimit1,
-                                     const uint8_t *limit1,
-                                     const uint8_t *thresh1) {
+void vpx_lpf_horizontal_8_dual_dspr2(
+    uint8_t *s, int p /* pitch */, const uint8_t *blimit0,
+    const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1,
+    const uint8_t *limit1, const uint8_t *thresh1) {
   vpx_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0, 1);
   vpx_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1, 1);
 }
 
-void vpx_lpf_vertical_4_dual_dspr2(uint8_t *s, int p,
-                                   const uint8_t *blimit0,
+void vpx_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
                                    const uint8_t *limit0,
                                    const uint8_t *thresh0,
                                    const uint8_t *blimit1,
@@ -339,20 +307,17 @@ void vpx_lpf_vertical_4_dual_dspr2(uint8_t *s, int p,
   vpx_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, 1);
 }
 
-void vpx_lpf_vertical_8_dual_dspr2(uint8_t *s, int p,
-                                   const uint8_t *blimit0,
+void vpx_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0,
                                    const uint8_t *limit0,
                                    const uint8_t *thresh0,
                                    const uint8_t *blimit1,
                                    const uint8_t *limit1,
                                    const uint8_t *thresh1) {
   vpx_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0, 1);
-  vpx_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1,
-                                       1);
+  vpx_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, 1);
 }
 
-void vpx_lpf_vertical_16_dual_dspr2(uint8_t *s, int p,
-                                    const uint8_t *blimit,
+void vpx_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit,
                                     const uint8_t *limit,
                                     const uint8_t *thresh) {
   vpx_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh);
diff --git a/vpx_dsp/mips/loopfilter_filters_dspr2.h b/vpx_dsp/mips/loopfilter_filters_dspr2.h
index db39854368cdcaa595df1ecd080d8db657917db1..11f286d281beb6f83290abc48f1c09ad7c6d980a 100644
--- a/vpx_dsp/mips/loopfilter_filters_dspr2.h
+++ b/vpx_dsp/mips/loopfilter_filters_dspr2.h
@@ -24,22 +24,21 @@ extern "C" {
 
 #if HAVE_DSPR2
 /* inputs & outputs are quad-byte vectors */
-static INLINE void filter_dspr2(uint32_t mask, uint32_t hev,
-                                uint32_t *ps1, uint32_t *ps0,
-                                uint32_t *qs0, uint32_t *qs1) {
-  int32_t   vpx_filter_l, vpx_filter_r;
-  int32_t   Filter1_l, Filter1_r, Filter2_l, Filter2_r;
-  int32_t   subr_r, subr_l;
-  uint32_t  t1, t2, HWM, t3;
-  uint32_t  hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r;
-  int32_t   vps1, vps0, vqs0, vqs1;
-  int32_t   vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r;
-  uint32_t  N128;
+static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, uint32_t *ps1,
+                                uint32_t *ps0, uint32_t *qs0, uint32_t *qs1) {
+  int32_t vpx_filter_l, vpx_filter_r;
+  int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
+  int32_t subr_r, subr_l;
+  uint32_t t1, t2, HWM, t3;
+  uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r;
+  int32_t vps1, vps0, vqs0, vqs1;
+  int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r;
+  uint32_t N128;
 
   N128 = 0x80808080;
-  t1  = 0x03000300;
-  t2  = 0x04000400;
-  t3  = 0x01000100;
+  t1 = 0x03000300;
+  t2 = 0x04000400;
+  t3 = 0x01000100;
   HWM = 0xFF00FF00;
 
   vps0 = (*ps0) ^ N128;
@@ -72,7 +71,7 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev,
   hev_r = hev << 8;
   hev_r = hev_r & HWM;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */
       "subq_s.ph    %[vpx_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
       "subq_s.ph    %[vpx_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
@@ -99,20 +98,17 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev,
       "and          %[vpx_filter_l], %[vpx_filter_l], %[mask_l]       \n\t"
       "and          %[vpx_filter_r], %[vpx_filter_r], %[mask_r]       \n\t"
 
-      : [vpx_filter_l] "=&r" (vpx_filter_l),
-        [vpx_filter_r] "=&r" (vpx_filter_r),
-        [subr_l] "=&r" (subr_l), [subr_r] "=&r" (subr_r),
-        [invhev_l] "=&r" (invhev_l), [invhev_r] "=&r" (invhev_r)
-      : [vps0_l] "r" (vps0_l), [vps0_r] "r" (vps0_r), [vps1_l] "r" (vps1_l),
-        [vps1_r] "r" (vps1_r), [vqs0_l] "r" (vqs0_l), [vqs0_r] "r" (vqs0_r),
-        [vqs1_l] "r" (vqs1_l), [vqs1_r] "r" (vqs1_r),
-        [mask_l] "r" (mask_l), [mask_r] "r" (mask_r),
-        [hev_l] "r" (hev_l), [hev_r] "r" (hev_r),
-        [HWM] "r" (HWM)
-  );
+      : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r),
+        [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r),
+        [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r)
+      : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
+        [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r),
+        [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r), [mask_l] "r"(mask_l),
+        [mask_r] "r"(mask_r), [hev_l] "r"(hev_l), [hev_r] "r"(hev_r),
+        [HWM] "r"(HWM));
 
   /* save bottom 3 bits so that we round one side +4 and the other +3 */
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */
       "addq_s.ph    %[Filter1_l],    %[vpx_filter_l], %[t2]           \n\t"
       "addq_s.ph    %[Filter1_r],    %[vpx_filter_r], %[t2]           \n\t"
@@ -137,15 +133,14 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev,
       "subq_s.ph    %[vqs0_l],       %[vqs0_l],       %[Filter1_l]    \n\t"
       "subq_s.ph    %[vqs0_r],       %[vqs0_r],       %[Filter1_r]    \n\t"
 
-      : [Filter1_l] "=&r" (Filter1_l), [Filter1_r] "=&r" (Filter1_r),
-        [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r),
-        [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r),
-        [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r)
-      : [t1] "r" (t1), [t2] "r" (t2), [HWM] "r" (HWM),
-        [vpx_filter_l] "r" (vpx_filter_l), [vpx_filter_r] "r" (vpx_filter_r)
-  );
+      : [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r),
+        [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r),
+        [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
+        [vqs0_r] "+r"(vqs0_r)
+      : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM),
+        [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r));
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* (vpx_filter += 1) >>= 1 */
       "addqh.ph    %[Filter1_l],    %[Filter1_l],     %[t3]           \n\t"
       "addqh.ph    %[Filter1_r],    %[Filter1_r],     %[t3]           \n\t"
@@ -162,11 +157,10 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev,
       "subq_s.ph    %[vqs1_l],       %[vqs1_l],       %[Filter1_l]    \n\t"
       "subq_s.ph    %[vqs1_r],       %[vqs1_r],       %[Filter1_r]    \n\t"
 
-      : [Filter1_l] "+r" (Filter1_l), [Filter1_r] "+r" (Filter1_r),
-        [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r),
-        [vqs1_l] "+r" (vqs1_l), [vqs1_r] "+r" (vqs1_r)
-      : [t3] "r" (t3), [invhev_l] "r" (invhev_l), [invhev_r] "r" (invhev_r)
-  );
+      : [Filter1_l] "+r"(Filter1_l), [Filter1_r] "+r"(Filter1_r),
+        [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l),
+        [vqs1_r] "+r"(vqs1_r)
+      : [t3] "r"(t3), [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r));
 
   /* Create quad-bytes from halfword pairs */
   vqs0_l = vqs0_l & HWM;
@@ -174,16 +168,15 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev,
   vps0_l = vps0_l & HWM;
   vps1_l = vps1_l & HWM;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "shrl.ph      %[vqs0_r],       %[vqs0_r],       8   \n\t"
       "shrl.ph      %[vps0_r],       %[vps0_r],       8   \n\t"
       "shrl.ph      %[vqs1_r],       %[vqs1_r],       8   \n\t"
       "shrl.ph      %[vps1_r],       %[vps1_r],       8   \n\t"
 
-      : [vps1_r] "+r" (vps1_r), [vqs1_r] "+r" (vqs1_r),
-        [vps0_r] "+r" (vps0_r), [vqs0_r] "+r" (vqs0_r)
-      :
-  );
+      : [vps1_r] "+r"(vps1_r), [vqs1_r] "+r"(vqs1_r), [vps0_r] "+r"(vps0_r),
+        [vqs0_r] "+r"(vqs0_r)
+      :);
 
   vqs0 = vqs0_l | vqs0_r;
   vqs1 = vqs1_l | vqs1_r;
@@ -196,24 +189,23 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev,
   *qs1 = vqs1 ^ N128;
 }
 
-static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev,
-                                 uint32_t ps1, uint32_t ps0,
-                                 uint32_t qs0, uint32_t qs1,
+static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, uint32_t ps1,
+                                 uint32_t ps0, uint32_t qs0, uint32_t qs1,
                                  uint32_t *p1_f0, uint32_t *p0_f0,
                                  uint32_t *q0_f0, uint32_t *q1_f0) {
-  int32_t   vpx_filter_l, vpx_filter_r;
-  int32_t   Filter1_l, Filter1_r, Filter2_l, Filter2_r;
-  int32_t   subr_r, subr_l;
-  uint32_t  t1, t2, HWM, t3;
-  uint32_t  hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r;
-  int32_t   vps1, vps0, vqs0, vqs1;
-  int32_t   vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r;
-  uint32_t  N128;
+  int32_t vpx_filter_l, vpx_filter_r;
+  int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
+  int32_t subr_r, subr_l;
+  uint32_t t1, t2, HWM, t3;
+  uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r;
+  int32_t vps1, vps0, vqs0, vqs1;
+  int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r;
+  uint32_t N128;
 
   N128 = 0x80808080;
-  t1  = 0x03000300;
-  t2  = 0x04000400;
-  t3  = 0x01000100;
+  t1 = 0x03000300;
+  t2 = 0x04000400;
+  t3 = 0x01000100;
   HWM = 0xFF00FF00;
 
   vps0 = (ps0) ^ N128;
@@ -246,7 +238,7 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev,
   hev_r = hev << 8;
   hev_r = hev_r & HWM;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */
       "subq_s.ph    %[vpx_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
       "subq_s.ph    %[vpx_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
@@ -273,19 +265,17 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev,
       "and          %[vpx_filter_l], %[vpx_filter_l], %[mask_l]       \n\t"
       "and          %[vpx_filter_r], %[vpx_filter_r], %[mask_r]       \n\t"
 
-      : [vpx_filter_l] "=&r" (vpx_filter_l),
-        [vpx_filter_r] "=&r" (vpx_filter_r),
-        [subr_l] "=&r" (subr_l), [subr_r] "=&r" (subr_r),
-        [invhev_l] "=&r" (invhev_l), [invhev_r] "=&r" (invhev_r)
-      : [vps0_l] "r" (vps0_l), [vps0_r] "r" (vps0_r), [vps1_l] "r" (vps1_l),
-        [vps1_r] "r" (vps1_r), [vqs0_l] "r" (vqs0_l), [vqs0_r] "r" (vqs0_r),
-        [vqs1_l] "r" (vqs1_l), [vqs1_r] "r" (vqs1_r),
-        [mask_l] "r" (mask_l), [mask_r] "r" (mask_r),
-        [hev_l] "r" (hev_l), [hev_r] "r" (hev_r), [HWM] "r" (HWM)
-  );
+      : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r),
+        [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r),
+        [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r)
+      : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
+        [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r),
+        [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r), [mask_l] "r"(mask_l),
+        [mask_r] "r"(mask_r), [hev_l] "r"(hev_l), [hev_r] "r"(hev_r),
+        [HWM] "r"(HWM));
 
   /* save bottom 3 bits so that we round one side +4 and the other +3 */
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */
       "addq_s.ph    %[Filter1_l],    %[vpx_filter_l], %[t2]           \n\t"
       "addq_s.ph    %[Filter1_r],    %[vpx_filter_r], %[t2]           \n\t"
@@ -310,15 +300,14 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev,
       "subq_s.ph    %[vqs0_l],       %[vqs0_l],       %[Filter1_l]    \n\t"
       "subq_s.ph    %[vqs0_r],       %[vqs0_r],       %[Filter1_r]    \n\t"
 
-      : [Filter1_l] "=&r" (Filter1_l), [Filter1_r] "=&r" (Filter1_r),
-        [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r),
-        [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r),
-        [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r)
-      : [t1] "r" (t1), [t2] "r" (t2), [HWM] "r" (HWM),
-        [vpx_filter_l] "r" (vpx_filter_l), [vpx_filter_r] "r" (vpx_filter_r)
-  );
+      : [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r),
+        [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r),
+        [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
+        [vqs0_r] "+r"(vqs0_r)
+      : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM),
+        [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r));
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* (vpx_filter += 1) >>= 1 */
       "addqh.ph    %[Filter1_l],    %[Filter1_l],     %[t3]           \n\t"
       "addqh.ph    %[Filter1_r],    %[Filter1_r],     %[t3]           \n\t"
@@ -335,11 +324,10 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev,
       "subq_s.ph    %[vqs1_l],       %[vqs1_l],       %[Filter1_l]    \n\t"
       "subq_s.ph    %[vqs1_r],       %[vqs1_r],       %[Filter1_r]    \n\t"
 
-      : [Filter1_l] "+r" (Filter1_l), [Filter1_r] "+r" (Filter1_r),
-        [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r),
-        [vqs1_l] "+r" (vqs1_l), [vqs1_r] "+r" (vqs1_r)
-      : [t3] "r" (t3), [invhev_l] "r" (invhev_l), [invhev_r] "r" (invhev_r)
-  );
+      : [Filter1_l] "+r"(Filter1_l), [Filter1_r] "+r"(Filter1_r),
+        [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l),
+        [vqs1_r] "+r"(vqs1_r)
+      : [t3] "r"(t3), [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r));
 
   /* Create quad-bytes from halfword pairs */
   vqs0_l = vqs0_l & HWM;
@@ -347,16 +335,15 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev,
   vps0_l = vps0_l & HWM;
   vps1_l = vps1_l & HWM;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "shrl.ph      %[vqs0_r],       %[vqs0_r],       8   \n\t"
       "shrl.ph      %[vps0_r],       %[vps0_r],       8   \n\t"
       "shrl.ph      %[vqs1_r],       %[vqs1_r],       8   \n\t"
       "shrl.ph      %[vps1_r],       %[vps1_r],       8   \n\t"
 
-      : [vps1_r] "+r" (vps1_r), [vqs1_r] "+r" (vqs1_r),
-        [vps0_r] "+r" (vps0_r), [vqs0_r] "+r" (vqs0_r)
-      :
-  );
+      : [vps1_r] "+r"(vps1_r), [vqs1_r] "+r"(vqs1_r), [vps0_r] "+r"(vps0_r),
+        [vqs0_r] "+r"(vqs0_r)
+      :);
 
   vqs0 = vqs0_l | vqs0_r;
   vqs1 = vqs1_l | vqs1_r;
@@ -369,18 +356,17 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev,
   *q1_f0 = vqs1 ^ N128;
 }
 
-static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2,
-                                  uint32_t *op1, uint32_t *op0,
-                                  uint32_t *oq0, uint32_t *oq1,
+static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2, uint32_t *op1,
+                                  uint32_t *op0, uint32_t *oq0, uint32_t *oq1,
                                   uint32_t *oq2, uint32_t *oq3) {
   /* use a 7 tap filter [1, 1, 1, 2, 1, 1, 1] for flat line */
   const uint32_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
   const uint32_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3;
-  uint32_t       res_op2, res_op1, res_op0;
-  uint32_t       res_oq0, res_oq1, res_oq2;
-  uint32_t       tmp;
-  uint32_t       add_p210_q012;
-  uint32_t       u32Four = 0x00040004;
+  uint32_t res_op2, res_op1, res_op0;
+  uint32_t res_oq0, res_oq1, res_oq2;
+  uint32_t tmp;
+  uint32_t add_p210_q012;
+  uint32_t u32Four = 0x00040004;
 
   /* *op2 = ROUND_POWER_OF_TWO(p3 + p3 + p3 + p2 + p2 + p1 + p0 + q0, 3)  1 */
   /* *op1 = ROUND_POWER_OF_TWO(p3 + p3 + p2 + p1 + p1 + p0 + q0 + q1, 3)  2 */
@@ -389,7 +375,7 @@ static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2,
   /* *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + q1 + q1 + q2 + q3 + q3, 3)  5 */
   /* *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + q2 + q2 + q3 + q3 + q3, 3)  6 */
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "addu.ph    %[add_p210_q012],  %[p2],             %[p1]            \n\t"
       "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[p0]            \n\t"
       "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[q0]            \n\t"
@@ -428,15 +414,12 @@ static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2,
       "shrl.ph    %[res_op0],        %[res_op0],        3                \n\t"
       "shrl.ph    %[res_oq2],        %[res_oq2],        3                \n\t"
 
-      : [add_p210_q012] "=&r" (add_p210_q012),
-        [tmp] "=&r" (tmp), [res_op2] "=&r" (res_op2),
-        [res_op1] "=&r" (res_op1), [res_op0] "=&r" (res_op0),
-        [res_oq0] "=&r" (res_oq0), [res_oq1] "=&r" (res_oq1),
-        [res_oq2] "=&r" (res_oq2)
-      : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [q1] "r" (q1),
-        [p2] "r" (p2), [q2] "r" (q2), [p3] "r" (p3), [q3] "r" (q3),
-        [u32Four] "r" (u32Four)
-  );
+      : [add_p210_q012] "=&r"(add_p210_q012), [tmp] "=&r"(tmp),
+        [res_op2] "=&r"(res_op2), [res_op1] "=&r"(res_op1),
+        [res_op0] "=&r"(res_op0), [res_oq0] "=&r"(res_oq0),
+        [res_oq1] "=&r"(res_oq1), [res_oq2] "=&r"(res_oq2)
+      : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [q1] "r"(q1), [p2] "r"(p2),
+        [q2] "r"(q2), [p3] "r"(p3), [q3] "r"(q3), [u32Four] "r"(u32Four));
 
   *op2 = res_op2;
   *op1 = res_op1;
@@ -446,20 +429,18 @@ static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2,
   *oq2 = res_oq2;
 }
 
-static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2,
-                                   uint32_t p1, uint32_t p0,
-                                   uint32_t q0, uint32_t q1,
-                                   uint32_t q2, uint32_t q3,
-                                   uint32_t *op2_f1,
+static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2, uint32_t p1,
+                                   uint32_t p0, uint32_t q0, uint32_t q1,
+                                   uint32_t q2, uint32_t q3, uint32_t *op2_f1,
                                    uint32_t *op1_f1, uint32_t *op0_f1,
                                    uint32_t *oq0_f1, uint32_t *oq1_f1,
                                    uint32_t *oq2_f1) {
   /* use a 7 tap filter [1, 1, 1, 2, 1, 1, 1] for flat line */
-  uint32_t  res_op2, res_op1, res_op0;
-  uint32_t  res_oq0, res_oq1, res_oq2;
-  uint32_t  tmp;
-  uint32_t  add_p210_q012;
-  uint32_t  u32Four = 0x00040004;
+  uint32_t res_op2, res_op1, res_op0;
+  uint32_t res_oq0, res_oq1, res_oq2;
+  uint32_t tmp;
+  uint32_t add_p210_q012;
+  uint32_t u32Four = 0x00040004;
 
   /* *op2 = ROUND_POWER_OF_TWO(p3 + p3 + p3 + p2 + p2 + p1 + p0 + q0, 3)   1 */
   /* *op1 = ROUND_POWER_OF_TWO(p3 + p3 + p2 + p1 + p1 + p0 + q0 + q1, 3)   2 */
@@ -468,7 +449,7 @@ static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2,
   /* *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + q1 + q1 + q2 + q3 + q3, 3)   5 */
   /* *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + q2 + q2 + q3 + q3 + q3, 3)   6 */
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "addu.ph    %[add_p210_q012],  %[p2],             %[p1]             \n\t"
       "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[p0]             \n\t"
       "addu.ph    %[add_p210_q012],  %[add_p210_q012],  %[q0]             \n\t"
@@ -507,14 +488,12 @@ static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2,
       "shrl.ph    %[res_op0],        %[res_op0],        3                 \n\t"
       "shrl.ph    %[res_oq2],        %[res_oq2],        3                 \n\t"
 
-      : [add_p210_q012] "=&r" (add_p210_q012), [tmp] "=&r" (tmp),
-        [res_op2] "=&r" (res_op2), [res_op1] "=&r" (res_op1),
-        [res_op0] "=&r" (res_op0), [res_oq0] "=&r" (res_oq0),
-        [res_oq1] "=&r" (res_oq1), [res_oq2] "=&r" (res_oq2)
-      : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [q1] "r" (q1),
-        [p2] "r" (p2), [q2] "r" (q2), [p3] "r" (p3), [q3] "r" (q3),
-        [u32Four] "r" (u32Four)
-  );
+      : [add_p210_q012] "=&r"(add_p210_q012), [tmp] "=&r"(tmp),
+        [res_op2] "=&r"(res_op2), [res_op1] "=&r"(res_op1),
+        [res_op0] "=&r"(res_op0), [res_oq0] "=&r"(res_oq0),
+        [res_oq1] "=&r"(res_oq1), [res_oq2] "=&r"(res_oq2)
+      : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [q1] "r"(q1), [p2] "r"(p2),
+        [q2] "r"(q2), [p3] "r"(p3), [q3] "r"(q3), [u32Four] "r"(u32Four));
 
   *op2_f1 = res_op2;
   *op1_f1 = res_op1;
@@ -524,25 +503,22 @@ static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2,
   *oq2_f1 = res_oq2;
 }
 
-static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6,
-                                       uint32_t *op5, uint32_t *op4,
-                                       uint32_t *op3, uint32_t *op2,
-                                       uint32_t *op1, uint32_t *op0,
-                                       uint32_t *oq0, uint32_t *oq1,
-                                       uint32_t *oq2, uint32_t *oq3,
-                                       uint32_t *oq4, uint32_t *oq5,
-                                       uint32_t *oq6, uint32_t *oq7) {
+static INLINE void wide_mbfilter_dspr2(
+    uint32_t *op7, uint32_t *op6, uint32_t *op5, uint32_t *op4, uint32_t *op3,
+    uint32_t *op2, uint32_t *op1, uint32_t *op0, uint32_t *oq0, uint32_t *oq1,
+    uint32_t *oq2, uint32_t *oq3, uint32_t *oq4, uint32_t *oq5, uint32_t *oq6,
+    uint32_t *oq7) {
   const uint32_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4;
   const uint32_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
   const uint32_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3;
   const uint32_t q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7;
-  uint32_t       res_op6, res_op5, res_op4, res_op3, res_op2, res_op1, res_op0;
-  uint32_t       res_oq0, res_oq1, res_oq2, res_oq3, res_oq4, res_oq5, res_oq6;
-  uint32_t       tmp;
-  uint32_t       add_p6toq6;
-  uint32_t       u32Eight = 0x00080008;
+  uint32_t res_op6, res_op5, res_op4, res_op3, res_op2, res_op1, res_op0;
+  uint32_t res_oq0, res_oq1, res_oq2, res_oq3, res_oq4, res_oq5, res_oq6;
+  uint32_t tmp;
+  uint32_t add_p6toq6;
+  uint32_t u32Eight = 0x00080008;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* addition of p6,p5,p4,p3,p2,p1,p0,q0,q1,q2,q3,q4,q5,q6
          which is used most of the time */
       "addu.ph      %[add_p6toq6],     %[p6],              %[p5]         \n\t"
@@ -560,15 +536,13 @@ static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6,
       "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[q6]         \n\t"
       "addu.ph      %[add_p6toq6],     %[add_p6toq6],      %[u32Eight]   \n\t"
 
-      : [add_p6toq6] "=&r" (add_p6toq6)
-      : [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4),
-        [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0),
-        [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2), [q3] "r" (q3),
-        [q4] "r" (q4), [q5] "r" (q5), [q6] "r" (q6),
-        [u32Eight] "r" (u32Eight)
-  );
+      : [add_p6toq6] "=&r"(add_p6toq6)
+      : [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2),
+        [p1] "r"(p1), [p0] "r"(p0), [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2),
+        [q3] "r"(q3), [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6),
+        [u32Eight] "r"(u32Eight));
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 +
                                    p3 + p2 + p1 + p0 + q0, 4) */
       "shll.ph       %[tmp],            %[p7],            3               \n\t"
@@ -643,16 +617,14 @@ static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6,
       "addu.ph       %[res_op0],        %[res_op0],       %[add_p6toq6]   \n\t"
       "shrl.ph       %[res_op0],        %[res_op0],       4               \n\t"
 
-      : [res_op6] "=&r" (res_op6), [res_op5] "=&r" (res_op5),
-        [res_op4] "=&r" (res_op4), [res_op3] "=&r" (res_op3),
-        [res_op2] "=&r" (res_op2), [res_op1] "=&r" (res_op1),
-        [res_op0] "=&r" (res_op0), [tmp] "=&r" (tmp)
-      : [p7] "r" (p7), [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4),
-        [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0),
-        [q2] "r" (q2), [q1] "r" (q1),
-        [q3] "r" (q3), [q4] "r" (q4), [q5] "r" (q5), [q6] "r" (q6),
-        [add_p6toq6] "r" (add_p6toq6)
-  );
+      : [res_op6] "=&r"(res_op6), [res_op5] "=&r"(res_op5),
+        [res_op4] "=&r"(res_op4), [res_op3] "=&r"(res_op3),
+        [res_op2] "=&r"(res_op2), [res_op1] "=&r"(res_op1),
+        [res_op0] "=&r"(res_op0), [tmp] "=&r"(tmp)
+      : [p7] "r"(p7), [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3),
+        [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [q2] "r"(q2), [q1] "r"(q1),
+        [q3] "r"(q3), [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6),
+        [add_p6toq6] "r"(add_p6toq6));
 
   *op6 = res_op6;
   *op5 = res_op5;
@@ -662,7 +634,7 @@ static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6,
   *op1 = res_op1;
   *op0 = res_op0;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 +
                                    q1 + q2 + q3 + q4 + q5 + q6 + q7, 4); */
       "addu.ph       %[res_oq0],        %[q7],            %[q0]           \n\t"
@@ -737,16 +709,14 @@ static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6,
       "subu.ph       %[res_oq6],        %[res_oq6],       %[p6]           \n\t"
       "shrl.ph       %[res_oq6],        %[res_oq6],       4               \n\t"
 
-      : [res_oq6] "=&r" (res_oq6), [res_oq5] "=&r" (res_oq5),
-        [res_oq4] "=&r" (res_oq4), [res_oq3] "=&r" (res_oq3),
-        [res_oq2] "=&r" (res_oq2), [res_oq1] "=&r" (res_oq1),
-        [res_oq0] "=&r" (res_oq0), [tmp] "=&r" (tmp)
-      : [q7] "r" (q7), [q6] "r" (q6), [q5] "r" (q5), [q4] "r" (q4),
-        [q3] "r" (q3), [q2] "r" (q2), [q1] "r" (q1), [q0] "r" (q0),
-        [p1] "r" (p1), [p2] "r" (p2),
-        [p3] "r" (p3), [p4] "r" (p4), [p5] "r" (p5), [p6] "r" (p6),
-        [add_p6toq6] "r" (add_p6toq6)
-  );
+      : [res_oq6] "=&r"(res_oq6), [res_oq5] "=&r"(res_oq5),
+        [res_oq4] "=&r"(res_oq4), [res_oq3] "=&r"(res_oq3),
+        [res_oq2] "=&r"(res_oq2), [res_oq1] "=&r"(res_oq1),
+        [res_oq0] "=&r"(res_oq0), [tmp] "=&r"(tmp)
+      : [q7] "r"(q7), [q6] "r"(q6), [q5] "r"(q5), [q4] "r"(q4), [q3] "r"(q3),
+        [q2] "r"(q2), [q1] "r"(q1), [q0] "r"(q0), [p1] "r"(p1), [p2] "r"(p2),
+        [p3] "r"(p3), [p4] "r"(p4), [p5] "r"(p5), [p6] "r"(p6),
+        [add_p6toq6] "r"(add_p6toq6));
 
   *oq0 = res_oq0;
   *oq1 = res_oq1;
diff --git a/vpx_dsp/mips/loopfilter_macros_dspr2.h b/vpx_dsp/mips/loopfilter_macros_dspr2.h
index a990b4061bab30490803a13b6a7f4d0aff37fa5c..769371dff8aa2e4f8ee23af309a523a69bb7d2c1 100644
--- a/vpx_dsp/mips/loopfilter_macros_dspr2.h
+++ b/vpx_dsp/mips/loopfilter_macros_dspr2.h
@@ -22,453 +22,410 @@ extern "C" {
 #endif
 
 #if HAVE_DSPR2
-#define STORE_F0() {                                                    \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q1_f0],    1(%[s4])           \n\t"                   \
-        "sb     %[q0_f0],    0(%[s4])           \n\t"                   \
-        "sb     %[p0_f0],   -1(%[s4])           \n\t"                   \
-        "sb     %[p1_f0],   -2(%[s4])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q1_f0] "r" (q1_f0), [q0_f0] "r" (q0_f0),                     \
-          [p0_f0] "r" (p0_f0), [p1_f0] "r" (p1_f0),                     \
-          [s4] "r" (s4)                                                 \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "srl    %[q1_f0],   %[q1_f0],   8       \n\t"                   \
-        "srl    %[q0_f0],   %[q0_f0],   8       \n\t"                   \
-        "srl    %[p0_f0],   %[p0_f0],   8       \n\t"                   \
-        "srl    %[p1_f0],   %[p1_f0],   8       \n\t"                   \
-                                                                        \
-        : [q1_f0] "+r" (q1_f0), [q0_f0] "+r" (q0_f0),                   \
-          [p0_f0] "+r" (p0_f0), [p1_f0] "+r" (p1_f0)                    \
-        :                                                               \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q1_f0],    1(%[s3])           \n\t"                   \
-        "sb     %[q0_f0],    0(%[s3])           \n\t"                   \
-        "sb     %[p0_f0],   -1(%[s3])           \n\t"                   \
-        "sb     %[p1_f0],   -2(%[s3])           \n\t"                   \
-                                                                        \
-        : [p1_f0] "+r" (p1_f0)                                          \
-        : [q1_f0] "r" (q1_f0), [q0_f0] "r" (q0_f0),                     \
-          [s3] "r" (s3), [p0_f0] "r" (p0_f0)                            \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "srl    %[q1_f0],   %[q1_f0],   8       \n\t"                   \
-        "srl    %[q0_f0],   %[q0_f0],   8       \n\t"                   \
-        "srl    %[p0_f0],   %[p0_f0],   8       \n\t"                   \
-        "srl    %[p1_f0],   %[p1_f0],   8       \n\t"                   \
-                                                                        \
-        : [q1_f0] "+r" (q1_f0), [q0_f0] "+r" (q0_f0),                   \
-          [p0_f0] "+r" (p0_f0), [p1_f0] "+r" (p1_f0)                    \
-        :                                                               \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q1_f0],    1(%[s2])           \n\t"                   \
-        "sb     %[q0_f0],    0(%[s2])           \n\t"                   \
-        "sb     %[p0_f0],   -1(%[s2])           \n\t"                   \
-        "sb     %[p1_f0],   -2(%[s2])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q1_f0] "r" (q1_f0), [q0_f0] "r" (q0_f0),                     \
-          [p0_f0] "r" (p0_f0), [p1_f0] "r" (p1_f0),                     \
-          [s2] "r" (s2)                                                 \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "srl    %[q1_f0],   %[q1_f0],   8       \n\t"                   \
-        "srl    %[q0_f0],   %[q0_f0],   8       \n\t"                   \
-        "srl    %[p0_f0],   %[p0_f0],   8       \n\t"                   \
-        "srl    %[p1_f0],   %[p1_f0],   8       \n\t"                   \
-                                                                        \
-        : [q1_f0] "+r" (q1_f0), [q0_f0] "+r" (q0_f0),                   \
-          [p0_f0] "+r" (p0_f0), [p1_f0] "+r" (p1_f0)                    \
-        :                                                               \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q1_f0],    1(%[s1])           \n\t"                   \
-        "sb     %[q0_f0],    0(%[s1])           \n\t"                   \
-        "sb     %[p0_f0],   -1(%[s1])           \n\t"                   \
-        "sb     %[p1_f0],   -2(%[s1])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q1_f0] "r" (q1_f0), [q0_f0] "r" (q0_f0),                     \
-          [p0_f0] "r" (p0_f0), [p1_f0] "r" (p1_f0),                     \
-          [s1] "r" (s1)                                                 \
-    );                                                                  \
-}
+#define STORE_F0()                                                       \
+  {                                                                      \
+    __asm__ __volatile__(                                                \
+        "sb     %[q1_f0],    1(%[s4])           \n\t"                    \
+        "sb     %[q0_f0],    0(%[s4])           \n\t"                    \
+        "sb     %[p0_f0],   -1(%[s4])           \n\t"                    \
+        "sb     %[p1_f0],   -2(%[s4])           \n\t"                    \
+                                                                         \
+        :                                                                \
+        : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [p0_f0] "r"(p0_f0),    \
+          [p1_f0] "r"(p1_f0), [s4] "r"(s4));                             \
+                                                                         \
+    __asm__ __volatile__(                                                \
+        "srl    %[q1_f0],   %[q1_f0],   8       \n\t"                    \
+        "srl    %[q0_f0],   %[q0_f0],   8       \n\t"                    \
+        "srl    %[p0_f0],   %[p0_f0],   8       \n\t"                    \
+        "srl    %[p1_f0],   %[p1_f0],   8       \n\t"                    \
+                                                                         \
+        : [q1_f0] "+r"(q1_f0), [q0_f0] "+r"(q0_f0), [p0_f0] "+r"(p0_f0), \
+          [p1_f0] "+r"(p1_f0)                                            \
+        :);                                                              \
+                                                                         \
+    __asm__ __volatile__(                                                \
+        "sb     %[q1_f0],    1(%[s3])           \n\t"                    \
+        "sb     %[q0_f0],    0(%[s3])           \n\t"                    \
+        "sb     %[p0_f0],   -1(%[s3])           \n\t"                    \
+        "sb     %[p1_f0],   -2(%[s3])           \n\t"                    \
+                                                                         \
+        : [p1_f0] "+r"(p1_f0)                                            \
+        : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [s3] "r"(s3),          \
+          [p0_f0] "r"(p0_f0));                                           \
+                                                                         \
+    __asm__ __volatile__(                                                \
+        "srl    %[q1_f0],   %[q1_f0],   8       \n\t"                    \
+        "srl    %[q0_f0],   %[q0_f0],   8       \n\t"                    \
+        "srl    %[p0_f0],   %[p0_f0],   8       \n\t"                    \
+        "srl    %[p1_f0],   %[p1_f0],   8       \n\t"                    \
+                                                                         \
+        : [q1_f0] "+r"(q1_f0), [q0_f0] "+r"(q0_f0), [p0_f0] "+r"(p0_f0), \
+          [p1_f0] "+r"(p1_f0)                                            \
+        :);                                                              \
+                                                                         \
+    __asm__ __volatile__(                                                \
+        "sb     %[q1_f0],    1(%[s2])           \n\t"                    \
+        "sb     %[q0_f0],    0(%[s2])           \n\t"                    \
+        "sb     %[p0_f0],   -1(%[s2])           \n\t"                    \
+        "sb     %[p1_f0],   -2(%[s2])           \n\t"                    \
+                                                                         \
+        :                                                                \
+        : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [p0_f0] "r"(p0_f0),    \
+          [p1_f0] "r"(p1_f0), [s2] "r"(s2));                             \
+                                                                         \
+    __asm__ __volatile__(                                                \
+        "srl    %[q1_f0],   %[q1_f0],   8       \n\t"                    \
+        "srl    %[q0_f0],   %[q0_f0],   8       \n\t"                    \
+        "srl    %[p0_f0],   %[p0_f0],   8       \n\t"                    \
+        "srl    %[p1_f0],   %[p1_f0],   8       \n\t"                    \
+                                                                         \
+        : [q1_f0] "+r"(q1_f0), [q0_f0] "+r"(q0_f0), [p0_f0] "+r"(p0_f0), \
+          [p1_f0] "+r"(p1_f0)                                            \
+        :);                                                              \
+                                                                         \
+    __asm__ __volatile__(                                                \
+        "sb     %[q1_f0],    1(%[s1])           \n\t"                    \
+        "sb     %[q0_f0],    0(%[s1])           \n\t"                    \
+        "sb     %[p0_f0],   -1(%[s1])           \n\t"                    \
+        "sb     %[p1_f0],   -2(%[s1])           \n\t"                    \
+                                                                         \
+        :                                                                \
+        : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [p0_f0] "r"(p0_f0),    \
+          [p1_f0] "r"(p1_f0), [s1] "r"(s1));                             \
+  }
 
-#define STORE_F1() {                                                    \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q2_r],     2(%[s4])           \n\t"                   \
-        "sb     %[q1_r],     1(%[s4])           \n\t"                   \
-        "sb     %[q0_r],     0(%[s4])           \n\t"                   \
-        "sb     %[p0_r],    -1(%[s4])           \n\t"                   \
-        "sb     %[p1_r],    -2(%[s4])           \n\t"                   \
-        "sb     %[p2_r],    -3(%[s4])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q2_r] "r" (q2_r), [q1_r] "r" (q1_r), [q0_r] "r" (q0_r),      \
-          [p0_r] "r" (p0_r), [p1_r] "r" (p1_r), [p2_r] "r" (p2_r),      \
-          [s4] "r" (s4)                                                 \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "srl    %[q2_r],    %[q2_r],    16      \n\t"                   \
-        "srl    %[q1_r],    %[q1_r],    16      \n\t"                   \
-        "srl    %[q0_r],    %[q0_r],    16      \n\t"                   \
-        "srl    %[p0_r],    %[p0_r],    16      \n\t"                   \
-        "srl    %[p1_r],    %[p1_r],    16      \n\t"                   \
-        "srl    %[p2_r],    %[p2_r],    16      \n\t"                   \
-                                                                        \
-        : [q2_r] "+r" (q2_r), [q1_r] "+r" (q1_r), [q0_r] "+r" (q0_r),   \
-          [p0_r] "+r" (p0_r), [p1_r] "+r" (p1_r), [p2_r] "+r" (p2_r)    \
-        :                                                               \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q2_r],     2(%[s3])           \n\t"                   \
-        "sb     %[q1_r],     1(%[s3])           \n\t"                   \
-        "sb     %[q0_r],     0(%[s3])           \n\t"                   \
-        "sb     %[p0_r],    -1(%[s3])           \n\t"                   \
-        "sb     %[p1_r],    -2(%[s3])           \n\t"                   \
-        "sb     %[p2_r],    -3(%[s3])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q2_r] "r" (q2_r), [q1_r] "r" (q1_r), [q0_r] "r" (q0_r),      \
-          [p0_r] "r" (p0_r), [p1_r] "r" (p1_r), [p2_r] "r" (p2_r),      \
-          [s3] "r" (s3)                                                 \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q2_l],     2(%[s2])           \n\t"                   \
-        "sb     %[q1_l],     1(%[s2])           \n\t"                   \
-        "sb     %[q0_l],     0(%[s2])           \n\t"                   \
-        "sb     %[p0_l],    -1(%[s2])           \n\t"                   \
-        "sb     %[p1_l],    -2(%[s2])           \n\t"                   \
-        "sb     %[p2_l],    -3(%[s2])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q2_l] "r" (q2_l), [q1_l] "r" (q1_l), [q0_l] "r" (q0_l),      \
-          [p0_l] "r" (p0_l), [p1_l] "r" (p1_l), [p2_l] "r" (p2_l),      \
-          [s2] "r" (s2)                                                 \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "srl    %[q2_l],    %[q2_l],    16      \n\t"                   \
-        "srl    %[q1_l],    %[q1_l],    16      \n\t"                   \
-        "srl    %[q0_l],    %[q0_l],    16      \n\t"                   \
-        "srl    %[p0_l],    %[p0_l],    16      \n\t"                   \
-        "srl    %[p1_l],    %[p1_l],    16      \n\t"                   \
-        "srl    %[p2_l],    %[p2_l],    16      \n\t"                   \
-                                                                        \
-        : [q2_l] "+r" (q2_l), [q1_l] "+r" (q1_l), [q0_l] "+r" (q0_l),   \
-          [p0_l] "+r" (p0_l), [p1_l] "+r" (p1_l), [p2_l] "+r" (p2_l)    \
-        :                                                               \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q2_l],     2(%[s1])           \n\t"                   \
-        "sb     %[q1_l],     1(%[s1])           \n\t"                   \
-        "sb     %[q0_l],     0(%[s1])           \n\t"                   \
-        "sb     %[p0_l],    -1(%[s1])           \n\t"                   \
-        "sb     %[p1_l],    -2(%[s1])           \n\t"                   \
-        "sb     %[p2_l],    -3(%[s1])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q2_l] "r" (q2_l), [q1_l] "r" (q1_l), [q0_l] "r" (q0_l),      \
-          [p0_l] "r" (p0_l), [p1_l] "r" (p1_l), [p2_l] "r" (p2_l),      \
-          [s1] "r" (s1)                                                 \
-    );                                                                  \
-}
+#define STORE_F1()                                                             \
+  {                                                                            \
+    __asm__ __volatile__(                                                      \
+        "sb     %[q2_r],     2(%[s4])           \n\t"                          \
+        "sb     %[q1_r],     1(%[s4])           \n\t"                          \
+        "sb     %[q0_r],     0(%[s4])           \n\t"                          \
+        "sb     %[p0_r],    -1(%[s4])           \n\t"                          \
+        "sb     %[p1_r],    -2(%[s4])           \n\t"                          \
+        "sb     %[p2_r],    -3(%[s4])           \n\t"                          \
+                                                                               \
+        :                                                                      \
+        : [q2_r] "r"(q2_r), [q1_r] "r"(q1_r), [q0_r] "r"(q0_r),                \
+          [p0_r] "r"(p0_r), [p1_r] "r"(p1_r), [p2_r] "r"(p2_r), [s4] "r"(s4)); \
+                                                                               \
+    __asm__ __volatile__(                                                      \
+        "srl    %[q2_r],    %[q2_r],    16      \n\t"                          \
+        "srl    %[q1_r],    %[q1_r],    16      \n\t"                          \
+        "srl    %[q0_r],    %[q0_r],    16      \n\t"                          \
+        "srl    %[p0_r],    %[p0_r],    16      \n\t"                          \
+        "srl    %[p1_r],    %[p1_r],    16      \n\t"                          \
+        "srl    %[p2_r],    %[p2_r],    16      \n\t"                          \
+                                                                               \
+        : [q2_r] "+r"(q2_r), [q1_r] "+r"(q1_r), [q0_r] "+r"(q0_r),             \
+          [p0_r] "+r"(p0_r), [p1_r] "+r"(p1_r), [p2_r] "+r"(p2_r)              \
+        :);                                                                    \
+                                                                               \
+    __asm__ __volatile__(                                                      \
+        "sb     %[q2_r],     2(%[s3])           \n\t"                          \
+        "sb     %[q1_r],     1(%[s3])           \n\t"                          \
+        "sb     %[q0_r],     0(%[s3])           \n\t"                          \
+        "sb     %[p0_r],    -1(%[s3])           \n\t"                          \
+        "sb     %[p1_r],    -2(%[s3])           \n\t"                          \
+        "sb     %[p2_r],    -3(%[s3])           \n\t"                          \
+                                                                               \
+        :                                                                      \
+        : [q2_r] "r"(q2_r), [q1_r] "r"(q1_r), [q0_r] "r"(q0_r),                \
+          [p0_r] "r"(p0_r), [p1_r] "r"(p1_r), [p2_r] "r"(p2_r), [s3] "r"(s3)); \
+                                                                               \
+    __asm__ __volatile__(                                                      \
+        "sb     %[q2_l],     2(%[s2])           \n\t"                          \
+        "sb     %[q1_l],     1(%[s2])           \n\t"                          \
+        "sb     %[q0_l],     0(%[s2])           \n\t"                          \
+        "sb     %[p0_l],    -1(%[s2])           \n\t"                          \
+        "sb     %[p1_l],    -2(%[s2])           \n\t"                          \
+        "sb     %[p2_l],    -3(%[s2])           \n\t"                          \
+                                                                               \
+        :                                                                      \
+        : [q2_l] "r"(q2_l), [q1_l] "r"(q1_l), [q0_l] "r"(q0_l),                \
+          [p0_l] "r"(p0_l), [p1_l] "r"(p1_l), [p2_l] "r"(p2_l), [s2] "r"(s2)); \
+                                                                               \
+    __asm__ __volatile__(                                                      \
+        "srl    %[q2_l],    %[q2_l],    16      \n\t"                          \
+        "srl    %[q1_l],    %[q1_l],    16      \n\t"                          \
+        "srl    %[q0_l],    %[q0_l],    16      \n\t"                          \
+        "srl    %[p0_l],    %[p0_l],    16      \n\t"                          \
+        "srl    %[p1_l],    %[p1_l],    16      \n\t"                          \
+        "srl    %[p2_l],    %[p2_l],    16      \n\t"                          \
+                                                                               \
+        : [q2_l] "+r"(q2_l), [q1_l] "+r"(q1_l), [q0_l] "+r"(q0_l),             \
+          [p0_l] "+r"(p0_l), [p1_l] "+r"(p1_l), [p2_l] "+r"(p2_l)              \
+        :);                                                                    \
+                                                                               \
+    __asm__ __volatile__(                                                      \
+        "sb     %[q2_l],     2(%[s1])           \n\t"                          \
+        "sb     %[q1_l],     1(%[s1])           \n\t"                          \
+        "sb     %[q0_l],     0(%[s1])           \n\t"                          \
+        "sb     %[p0_l],    -1(%[s1])           \n\t"                          \
+        "sb     %[p1_l],    -2(%[s1])           \n\t"                          \
+        "sb     %[p2_l],    -3(%[s1])           \n\t"                          \
+                                                                               \
+        :                                                                      \
+        : [q2_l] "r"(q2_l), [q1_l] "r"(q1_l), [q0_l] "r"(q0_l),                \
+          [p0_l] "r"(p0_l), [p1_l] "r"(p1_l), [p2_l] "r"(p2_l), [s1] "r"(s1)); \
+  }
 
-#define STORE_F2() {                                                    \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q6_r],     6(%[s4])           \n\t"                   \
-        "sb     %[q5_r],     5(%[s4])           \n\t"                   \
-        "sb     %[q4_r],     4(%[s4])           \n\t"                   \
-        "sb     %[q3_r],     3(%[s4])           \n\t"                   \
-        "sb     %[q2_r],     2(%[s4])           \n\t"                   \
-        "sb     %[q1_r],     1(%[s4])           \n\t"                   \
-        "sb     %[q0_r],     0(%[s4])           \n\t"                   \
-        "sb     %[p0_r],    -1(%[s4])           \n\t"                   \
-        "sb     %[p1_r],    -2(%[s4])           \n\t"                   \
-        "sb     %[p2_r],    -3(%[s4])           \n\t"                   \
-        "sb     %[p3_r],    -4(%[s4])           \n\t"                   \
-        "sb     %[p4_r],    -5(%[s4])           \n\t"                   \
-        "sb     %[p5_r],    -6(%[s4])           \n\t"                   \
-        "sb     %[p6_r],    -7(%[s4])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q6_r] "r" (q6_r), [q5_r] "r" (q5_r), [q4_r] "r" (q4_r),      \
-          [q3_r] "r" (q3_r), [q2_r] "r" (q2_r), [q1_r] "r" (q1_r),      \
-          [q0_r] "r" (q0_r),                                            \
-          [p0_r] "r" (p0_r), [p1_r] "r" (p1_r), [p2_r] "r" (p2_r),      \
-          [p3_r] "r" (p3_r), [p4_r] "r" (p4_r), [p5_r] "r" (p5_r),      \
-          [p6_r] "r" (p6_r),                                            \
-          [s4] "r" (s4)                                                 \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "srl    %[q6_r],    %[q6_r],    16      \n\t"                   \
-        "srl    %[q5_r],    %[q5_r],    16      \n\t"                   \
-        "srl    %[q4_r],    %[q4_r],    16      \n\t"                   \
-        "srl    %[q3_r],    %[q3_r],    16      \n\t"                   \
-        "srl    %[q2_r],    %[q2_r],    16      \n\t"                   \
-        "srl    %[q1_r],    %[q1_r],    16      \n\t"                   \
-        "srl    %[q0_r],    %[q0_r],    16      \n\t"                   \
-        "srl    %[p0_r],    %[p0_r],    16      \n\t"                   \
-        "srl    %[p1_r],    %[p1_r],    16      \n\t"                   \
-        "srl    %[p2_r],    %[p2_r],    16      \n\t"                   \
-        "srl    %[p3_r],    %[p3_r],    16      \n\t"                   \
-        "srl    %[p4_r],    %[p4_r],    16      \n\t"                   \
-        "srl    %[p5_r],    %[p5_r],    16      \n\t"                   \
-        "srl    %[p6_r],    %[p6_r],    16      \n\t"                   \
-                                                                        \
-        : [q6_r] "+r" (q6_r), [q5_r] "+r" (q5_r), [q4_r] "+r" (q4_r),   \
-          [q3_r] "+r" (q3_r), [q2_r] "+r" (q2_r), [q1_r] "+r" (q1_r),   \
-          [q0_r] "+r" (q0_r),                                           \
-          [p0_r] "+r" (p0_r), [p1_r] "+r" (p1_r), [p2_r] "+r" (p2_r),   \
-          [p3_r] "+r" (p3_r), [p4_r] "+r" (p4_r), [p5_r] "+r" (p5_r),   \
-          [p6_r] "+r" (p6_r)                                            \
-        :                                                               \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q6_r],     6(%[s3])           \n\t"                   \
-        "sb     %[q5_r],     5(%[s3])           \n\t"                   \
-        "sb     %[q4_r],     4(%[s3])           \n\t"                   \
-        "sb     %[q3_r],     3(%[s3])           \n\t"                   \
-        "sb     %[q2_r],     2(%[s3])           \n\t"                   \
-        "sb     %[q1_r],     1(%[s3])           \n\t"                   \
-        "sb     %[q0_r],     0(%[s3])           \n\t"                   \
-        "sb     %[p0_r],    -1(%[s3])           \n\t"                   \
-        "sb     %[p1_r],    -2(%[s3])           \n\t"                   \
-        "sb     %[p2_r],    -3(%[s3])           \n\t"                   \
-        "sb     %[p3_r],    -4(%[s3])           \n\t"                   \
-        "sb     %[p4_r],    -5(%[s3])           \n\t"                   \
-        "sb     %[p5_r],    -6(%[s3])           \n\t"                   \
-        "sb     %[p6_r],    -7(%[s3])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q6_r] "r" (q6_r), [q5_r] "r" (q5_r), [q4_r] "r" (q4_r),      \
-          [q3_r] "r" (q3_r), [q2_r] "r" (q2_r), [q1_r] "r" (q1_r),      \
-          [q0_r] "r" (q0_r),                                            \
-          [p0_r] "r" (p0_r), [p1_r] "r" (p1_r), [p2_r] "r" (p2_r),      \
-          [p3_r] "r" (p3_r), [p4_r] "r" (p4_r), [p5_r] "r" (p5_r),      \
-          [p6_r] "r" (p6_r),                                            \
-          [s3] "r" (s3)                                                 \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q6_l],     6(%[s2])           \n\t"                   \
-        "sb     %[q5_l],     5(%[s2])           \n\t"                   \
-        "sb     %[q4_l],     4(%[s2])           \n\t"                   \
-        "sb     %[q3_l],     3(%[s2])           \n\t"                   \
-        "sb     %[q2_l],     2(%[s2])           \n\t"                   \
-        "sb     %[q1_l],     1(%[s2])           \n\t"                   \
-        "sb     %[q0_l],     0(%[s2])           \n\t"                   \
-        "sb     %[p0_l],    -1(%[s2])           \n\t"                   \
-        "sb     %[p1_l],    -2(%[s2])           \n\t"                   \
-        "sb     %[p2_l],    -3(%[s2])           \n\t"                   \
-        "sb     %[p3_l],    -4(%[s2])           \n\t"                   \
-        "sb     %[p4_l],    -5(%[s2])           \n\t"                   \
-        "sb     %[p5_l],    -6(%[s2])           \n\t"                   \
-        "sb     %[p6_l],    -7(%[s2])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q6_l] "r" (q6_l), [q5_l] "r" (q5_l), [q4_l] "r" (q4_l),      \
-          [q3_l] "r" (q3_l), [q2_l] "r" (q2_l), [q1_l] "r" (q1_l),      \
-          [q0_l] "r" (q0_l),                                            \
-          [p0_l] "r" (p0_l), [p1_l] "r" (p1_l), [p2_l] "r" (p2_l),      \
-          [p3_l] "r" (p3_l), [p4_l] "r" (p4_l), [p5_l] "r" (p5_l),      \
-          [p6_l] "r" (p6_l),                                            \
-          [s2] "r" (s2)                                                 \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "srl    %[q6_l],    %[q6_l],    16     \n\t"                    \
-        "srl    %[q5_l],    %[q5_l],    16     \n\t"                    \
-        "srl    %[q4_l],    %[q4_l],    16     \n\t"                    \
-        "srl    %[q3_l],    %[q3_l],    16     \n\t"                    \
-        "srl    %[q2_l],    %[q2_l],    16     \n\t"                    \
-        "srl    %[q1_l],    %[q1_l],    16     \n\t"                    \
-        "srl    %[q0_l],    %[q0_l],    16     \n\t"                    \
-        "srl    %[p0_l],    %[p0_l],    16     \n\t"                    \
-        "srl    %[p1_l],    %[p1_l],    16     \n\t"                    \
-        "srl    %[p2_l],    %[p2_l],    16     \n\t"                    \
-        "srl    %[p3_l],    %[p3_l],    16     \n\t"                    \
-        "srl    %[p4_l],    %[p4_l],    16     \n\t"                    \
-        "srl    %[p5_l],    %[p5_l],    16     \n\t"                    \
-        "srl    %[p6_l],    %[p6_l],    16     \n\t"                    \
-                                                                        \
-        : [q6_l] "+r" (q6_l), [q5_l] "+r" (q5_l), [q4_l] "+r" (q4_l),   \
-          [q3_l] "+r" (q3_l), [q2_l] "+r" (q2_l), [q1_l] "+r" (q1_l),   \
-          [q0_l] "+r" (q0_l),                                           \
-          [p0_l] "+r" (p0_l), [p1_l] "+r" (p1_l), [p2_l] "+r" (p2_l),   \
-          [p3_l] "+r" (p3_l), [p4_l] "+r" (p4_l), [p5_l] "+r" (p5_l),   \
-          [p6_l] "+r" (p6_l)                                            \
-        :                                                               \
-    );                                                                  \
-                                                                        \
-    __asm__ __volatile__ (                                              \
-        "sb     %[q6_l],     6(%[s1])           \n\t"                   \
-        "sb     %[q5_l],     5(%[s1])           \n\t"                   \
-        "sb     %[q4_l],     4(%[s1])           \n\t"                   \
-        "sb     %[q3_l],     3(%[s1])           \n\t"                   \
-        "sb     %[q2_l],     2(%[s1])           \n\t"                   \
-        "sb     %[q1_l],     1(%[s1])           \n\t"                   \
-        "sb     %[q0_l],     0(%[s1])           \n\t"                   \
-        "sb     %[p0_l],    -1(%[s1])           \n\t"                   \
-        "sb     %[p1_l],    -2(%[s1])           \n\t"                   \
-        "sb     %[p2_l],    -3(%[s1])           \n\t"                   \
-        "sb     %[p3_l],    -4(%[s1])           \n\t"                   \
-        "sb     %[p4_l],    -5(%[s1])           \n\t"                   \
-        "sb     %[p5_l],    -6(%[s1])           \n\t"                   \
-        "sb     %[p6_l],    -7(%[s1])           \n\t"                   \
-                                                                        \
-        :                                                               \
-        : [q6_l] "r" (q6_l), [q5_l] "r" (q5_l), [q4_l] "r" (q4_l),      \
-          [q3_l] "r" (q3_l), [q2_l] "r" (q2_l), [q1_l] "r" (q1_l),      \
-          [q0_l] "r" (q0_l),                                            \
-          [p0_l] "r" (p0_l), [p1_l] "r" (p1_l), [p2_l] "r" (p2_l),      \
-          [p3_l] "r" (p3_l), [p4_l] "r" (p4_l), [p5_l] "r" (p5_l),      \
-          [p6_l] "r" (p6_l),                                            \
-          [s1] "r" (s1)                                                 \
-    );                                                                  \
-}
+#define STORE_F2()                                                 \
+  {                                                                \
+    __asm__ __volatile__(                                          \
+        "sb     %[q6_r],     6(%[s4])           \n\t"              \
+        "sb     %[q5_r],     5(%[s4])           \n\t"              \
+        "sb     %[q4_r],     4(%[s4])           \n\t"              \
+        "sb     %[q3_r],     3(%[s4])           \n\t"              \
+        "sb     %[q2_r],     2(%[s4])           \n\t"              \
+        "sb     %[q1_r],     1(%[s4])           \n\t"              \
+        "sb     %[q0_r],     0(%[s4])           \n\t"              \
+        "sb     %[p0_r],    -1(%[s4])           \n\t"              \
+        "sb     %[p1_r],    -2(%[s4])           \n\t"              \
+        "sb     %[p2_r],    -3(%[s4])           \n\t"              \
+        "sb     %[p3_r],    -4(%[s4])           \n\t"              \
+        "sb     %[p4_r],    -5(%[s4])           \n\t"              \
+        "sb     %[p5_r],    -6(%[s4])           \n\t"              \
+        "sb     %[p6_r],    -7(%[s4])           \n\t"              \
+                                                                   \
+        :                                                          \
+        : [q6_r] "r"(q6_r), [q5_r] "r"(q5_r), [q4_r] "r"(q4_r),    \
+          [q3_r] "r"(q3_r), [q2_r] "r"(q2_r), [q1_r] "r"(q1_r),    \
+          [q0_r] "r"(q0_r), [p0_r] "r"(p0_r), [p1_r] "r"(p1_r),    \
+          [p2_r] "r"(p2_r), [p3_r] "r"(p3_r), [p4_r] "r"(p4_r),    \
+          [p5_r] "r"(p5_r), [p6_r] "r"(p6_r), [s4] "r"(s4));       \
+                                                                   \
+    __asm__ __volatile__(                                          \
+        "srl    %[q6_r],    %[q6_r],    16      \n\t"              \
+        "srl    %[q5_r],    %[q5_r],    16      \n\t"              \
+        "srl    %[q4_r],    %[q4_r],    16      \n\t"              \
+        "srl    %[q3_r],    %[q3_r],    16      \n\t"              \
+        "srl    %[q2_r],    %[q2_r],    16      \n\t"              \
+        "srl    %[q1_r],    %[q1_r],    16      \n\t"              \
+        "srl    %[q0_r],    %[q0_r],    16      \n\t"              \
+        "srl    %[p0_r],    %[p0_r],    16      \n\t"              \
+        "srl    %[p1_r],    %[p1_r],    16      \n\t"              \
+        "srl    %[p2_r],    %[p2_r],    16      \n\t"              \
+        "srl    %[p3_r],    %[p3_r],    16      \n\t"              \
+        "srl    %[p4_r],    %[p4_r],    16      \n\t"              \
+        "srl    %[p5_r],    %[p5_r],    16      \n\t"              \
+        "srl    %[p6_r],    %[p6_r],    16      \n\t"              \
+                                                                   \
+        : [q6_r] "+r"(q6_r), [q5_r] "+r"(q5_r), [q4_r] "+r"(q4_r), \
+          [q3_r] "+r"(q3_r), [q2_r] "+r"(q2_r), [q1_r] "+r"(q1_r), \
+          [q0_r] "+r"(q0_r), [p0_r] "+r"(p0_r), [p1_r] "+r"(p1_r), \
+          [p2_r] "+r"(p2_r), [p3_r] "+r"(p3_r), [p4_r] "+r"(p4_r), \
+          [p5_r] "+r"(p5_r), [p6_r] "+r"(p6_r)                     \
+        :);                                                        \
+                                                                   \
+    __asm__ __volatile__(                                          \
+        "sb     %[q6_r],     6(%[s3])           \n\t"              \
+        "sb     %[q5_r],     5(%[s3])           \n\t"              \
+        "sb     %[q4_r],     4(%[s3])           \n\t"              \
+        "sb     %[q3_r],     3(%[s3])           \n\t"              \
+        "sb     %[q2_r],     2(%[s3])           \n\t"              \
+        "sb     %[q1_r],     1(%[s3])           \n\t"              \
+        "sb     %[q0_r],     0(%[s3])           \n\t"              \
+        "sb     %[p0_r],    -1(%[s3])           \n\t"              \
+        "sb     %[p1_r],    -2(%[s3])           \n\t"              \
+        "sb     %[p2_r],    -3(%[s3])           \n\t"              \
+        "sb     %[p3_r],    -4(%[s3])           \n\t"              \
+        "sb     %[p4_r],    -5(%[s3])           \n\t"              \
+        "sb     %[p5_r],    -6(%[s3])           \n\t"              \
+        "sb     %[p6_r],    -7(%[s3])           \n\t"              \
+                                                                   \
+        :                                                          \
+        : [q6_r] "r"(q6_r), [q5_r] "r"(q5_r), [q4_r] "r"(q4_r),    \
+          [q3_r] "r"(q3_r), [q2_r] "r"(q2_r), [q1_r] "r"(q1_r),    \
+          [q0_r] "r"(q0_r), [p0_r] "r"(p0_r), [p1_r] "r"(p1_r),    \
+          [p2_r] "r"(p2_r), [p3_r] "r"(p3_r), [p4_r] "r"(p4_r),    \
+          [p5_r] "r"(p5_r), [p6_r] "r"(p6_r), [s3] "r"(s3));       \
+                                                                   \
+    __asm__ __volatile__(                                          \
+        "sb     %[q6_l],     6(%[s2])           \n\t"              \
+        "sb     %[q5_l],     5(%[s2])           \n\t"              \
+        "sb     %[q4_l],     4(%[s2])           \n\t"              \
+        "sb     %[q3_l],     3(%[s2])           \n\t"              \
+        "sb     %[q2_l],     2(%[s2])           \n\t"              \
+        "sb     %[q1_l],     1(%[s2])           \n\t"              \
+        "sb     %[q0_l],     0(%[s2])           \n\t"              \
+        "sb     %[p0_l],    -1(%[s2])           \n\t"              \
+        "sb     %[p1_l],    -2(%[s2])           \n\t"              \
+        "sb     %[p2_l],    -3(%[s2])           \n\t"              \
+        "sb     %[p3_l],    -4(%[s2])           \n\t"              \
+        "sb     %[p4_l],    -5(%[s2])           \n\t"              \
+        "sb     %[p5_l],    -6(%[s2])           \n\t"              \
+        "sb     %[p6_l],    -7(%[s2])           \n\t"              \
+                                                                   \
+        :                                                          \
+        : [q6_l] "r"(q6_l), [q5_l] "r"(q5_l), [q4_l] "r"(q4_l),    \
+          [q3_l] "r"(q3_l), [q2_l] "r"(q2_l), [q1_l] "r"(q1_l),    \
+          [q0_l] "r"(q0_l), [p0_l] "r"(p0_l), [p1_l] "r"(p1_l),    \
+          [p2_l] "r"(p2_l), [p3_l] "r"(p3_l), [p4_l] "r"(p4_l),    \
+          [p5_l] "r"(p5_l), [p6_l] "r"(p6_l), [s2] "r"(s2));       \
+                                                                   \
+    __asm__ __volatile__(                                          \
+        "srl    %[q6_l],    %[q6_l],    16     \n\t"               \
+        "srl    %[q5_l],    %[q5_l],    16     \n\t"               \
+        "srl    %[q4_l],    %[q4_l],    16     \n\t"               \
+        "srl    %[q3_l],    %[q3_l],    16     \n\t"               \
+        "srl    %[q2_l],    %[q2_l],    16     \n\t"               \
+        "srl    %[q1_l],    %[q1_l],    16     \n\t"               \
+        "srl    %[q0_l],    %[q0_l],    16     \n\t"               \
+        "srl    %[p0_l],    %[p0_l],    16     \n\t"               \
+        "srl    %[p1_l],    %[p1_l],    16     \n\t"               \
+        "srl    %[p2_l],    %[p2_l],    16     \n\t"               \
+        "srl    %[p3_l],    %[p3_l],    16     \n\t"               \
+        "srl    %[p4_l],    %[p4_l],    16     \n\t"               \
+        "srl    %[p5_l],    %[p5_l],    16     \n\t"               \
+        "srl    %[p6_l],    %[p6_l],    16     \n\t"               \
+                                                                   \
+        : [q6_l] "+r"(q6_l), [q5_l] "+r"(q5_l), [q4_l] "+r"(q4_l), \
+          [q3_l] "+r"(q3_l), [q2_l] "+r"(q2_l), [q1_l] "+r"(q1_l), \
+          [q0_l] "+r"(q0_l), [p0_l] "+r"(p0_l), [p1_l] "+r"(p1_l), \
+          [p2_l] "+r"(p2_l), [p3_l] "+r"(p3_l), [p4_l] "+r"(p4_l), \
+          [p5_l] "+r"(p5_l), [p6_l] "+r"(p6_l)                     \
+        :);                                                        \
+                                                                   \
+    __asm__ __volatile__(                                          \
+        "sb     %[q6_l],     6(%[s1])           \n\t"              \
+        "sb     %[q5_l],     5(%[s1])           \n\t"              \
+        "sb     %[q4_l],     4(%[s1])           \n\t"              \
+        "sb     %[q3_l],     3(%[s1])           \n\t"              \
+        "sb     %[q2_l],     2(%[s1])           \n\t"              \
+        "sb     %[q1_l],     1(%[s1])           \n\t"              \
+        "sb     %[q0_l],     0(%[s1])           \n\t"              \
+        "sb     %[p0_l],    -1(%[s1])           \n\t"              \
+        "sb     %[p1_l],    -2(%[s1])           \n\t"              \
+        "sb     %[p2_l],    -3(%[s1])           \n\t"              \
+        "sb     %[p3_l],    -4(%[s1])           \n\t"              \
+        "sb     %[p4_l],    -5(%[s1])           \n\t"              \
+        "sb     %[p5_l],    -6(%[s1])           \n\t"              \
+        "sb     %[p6_l],    -7(%[s1])           \n\t"              \
+                                                                   \
+        :                                                          \
+        : [q6_l] "r"(q6_l), [q5_l] "r"(q5_l), [q4_l] "r"(q4_l),    \
+          [q3_l] "r"(q3_l), [q2_l] "r"(q2_l), [q1_l] "r"(q1_l),    \
+          [q0_l] "r"(q0_l), [p0_l] "r"(p0_l), [p1_l] "r"(p1_l),    \
+          [p2_l] "r"(p2_l), [p3_l] "r"(p3_l), [p4_l] "r"(p4_l),    \
+          [p5_l] "r"(p5_l), [p6_l] "r"(p6_l), [s1] "r"(s1));       \
+  }
 
-#define PACK_LEFT_0TO3() {                                              \
-    __asm__ __volatile__ (                                              \
-        "preceu.ph.qbl   %[p3_l],   %[p3]   \n\t"                       \
-        "preceu.ph.qbl   %[p2_l],   %[p2]   \n\t"                       \
-        "preceu.ph.qbl   %[p1_l],   %[p1]   \n\t"                       \
-        "preceu.ph.qbl   %[p0_l],   %[p0]   \n\t"                       \
-        "preceu.ph.qbl   %[q0_l],   %[q0]   \n\t"                       \
-        "preceu.ph.qbl   %[q1_l],   %[q1]   \n\t"                       \
-        "preceu.ph.qbl   %[q2_l],   %[q2]   \n\t"                       \
-        "preceu.ph.qbl   %[q3_l],   %[q3]   \n\t"                       \
-                                                                        \
-        : [p3_l] "=&r" (p3_l), [p2_l] "=&r" (p2_l),                     \
-          [p1_l] "=&r" (p1_l), [p0_l] "=&r" (p0_l),                     \
-          [q0_l] "=&r" (q0_l), [q1_l] "=&r" (q1_l),                     \
-          [q2_l] "=&r" (q2_l), [q3_l] "=&r" (q3_l)                      \
-        : [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0),   \
-          [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2), [q3] "r" (q3)    \
-    );                                                                  \
-}
+#define PACK_LEFT_0TO3()                                              \
+  {                                                                   \
+    __asm__ __volatile__(                                             \
+        "preceu.ph.qbl   %[p3_l],   %[p3]   \n\t"                     \
+        "preceu.ph.qbl   %[p2_l],   %[p2]   \n\t"                     \
+        "preceu.ph.qbl   %[p1_l],   %[p1]   \n\t"                     \
+        "preceu.ph.qbl   %[p0_l],   %[p0]   \n\t"                     \
+        "preceu.ph.qbl   %[q0_l],   %[q0]   \n\t"                     \
+        "preceu.ph.qbl   %[q1_l],   %[q1]   \n\t"                     \
+        "preceu.ph.qbl   %[q2_l],   %[q2]   \n\t"                     \
+        "preceu.ph.qbl   %[q3_l],   %[q3]   \n\t"                     \
+                                                                      \
+        : [p3_l] "=&r"(p3_l), [p2_l] "=&r"(p2_l), [p1_l] "=&r"(p1_l), \
+          [p0_l] "=&r"(p0_l), [q0_l] "=&r"(q0_l), [q1_l] "=&r"(q1_l), \
+          [q2_l] "=&r"(q2_l), [q3_l] "=&r"(q3_l)                      \
+        : [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0),     \
+          [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3));    \
+  }
 
-#define PACK_LEFT_4TO7() {                                              \
-    __asm__ __volatile__ (                                              \
-        "preceu.ph.qbl   %[p7_l],   %[p7]   \n\t"                       \
-        "preceu.ph.qbl   %[p6_l],   %[p6]   \n\t"                       \
-        "preceu.ph.qbl   %[p5_l],   %[p5]   \n\t"                       \
-        "preceu.ph.qbl   %[p4_l],   %[p4]   \n\t"                       \
-        "preceu.ph.qbl   %[q4_l],   %[q4]   \n\t"                       \
-        "preceu.ph.qbl   %[q5_l],   %[q5]   \n\t"                       \
-        "preceu.ph.qbl   %[q6_l],   %[q6]   \n\t"                       \
-        "preceu.ph.qbl   %[q7_l],   %[q7]   \n\t"                       \
-                                                                        \
-        : [p7_l] "=&r" (p7_l), [p6_l] "=&r" (p6_l),                     \
-          [p5_l] "=&r" (p5_l), [p4_l] "=&r" (p4_l),                     \
-          [q4_l] "=&r" (q4_l), [q5_l] "=&r" (q5_l),                     \
-          [q6_l] "=&r" (q6_l), [q7_l] "=&r" (q7_l)                      \
-        : [p7] "r" (p7), [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4),   \
-          [q4] "r" (q4), [q5] "r" (q5), [q6] "r" (q6), [q7] "r" (q7)    \
-    );                                                                  \
-}
+#define PACK_LEFT_4TO7()                                              \
+  {                                                                   \
+    __asm__ __volatile__(                                             \
+        "preceu.ph.qbl   %[p7_l],   %[p7]   \n\t"                     \
+        "preceu.ph.qbl   %[p6_l],   %[p6]   \n\t"                     \
+        "preceu.ph.qbl   %[p5_l],   %[p5]   \n\t"                     \
+        "preceu.ph.qbl   %[p4_l],   %[p4]   \n\t"                     \
+        "preceu.ph.qbl   %[q4_l],   %[q4]   \n\t"                     \
+        "preceu.ph.qbl   %[q5_l],   %[q5]   \n\t"                     \
+        "preceu.ph.qbl   %[q6_l],   %[q6]   \n\t"                     \
+        "preceu.ph.qbl   %[q7_l],   %[q7]   \n\t"                     \
+                                                                      \
+        : [p7_l] "=&r"(p7_l), [p6_l] "=&r"(p6_l), [p5_l] "=&r"(p5_l), \
+          [p4_l] "=&r"(p4_l), [q4_l] "=&r"(q4_l), [q5_l] "=&r"(q5_l), \
+          [q6_l] "=&r"(q6_l), [q7_l] "=&r"(q7_l)                      \
+        : [p7] "r"(p7), [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4),     \
+          [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), [q7] "r"(q7));    \
+  }
 
-#define PACK_RIGHT_0TO3() {                                             \
-    __asm__ __volatile__ (                                              \
-        "preceu.ph.qbr   %[p3_r],   %[p3]  \n\t"                        \
-        "preceu.ph.qbr   %[p2_r],   %[p2]   \n\t"                       \
-        "preceu.ph.qbr   %[p1_r],   %[p1]   \n\t"                       \
-        "preceu.ph.qbr   %[p0_r],   %[p0]   \n\t"                       \
-        "preceu.ph.qbr   %[q0_r],   %[q0]   \n\t"                       \
-        "preceu.ph.qbr   %[q1_r],   %[q1]   \n\t"                       \
-        "preceu.ph.qbr   %[q2_r],   %[q2]   \n\t"                       \
-        "preceu.ph.qbr   %[q3_r],   %[q3]   \n\t"                       \
-                                                                        \
-        : [p3_r] "=&r" (p3_r), [p2_r] "=&r" (p2_r),                     \
-          [p1_r] "=&r" (p1_r), [p0_r] "=&r" (p0_r),                     \
-          [q0_r] "=&r" (q0_r), [q1_r] "=&r" (q1_r),                     \
-          [q2_r] "=&r" (q2_r), [q3_r] "=&r" (q3_r)                      \
-        : [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0),   \
-          [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2), [q3] "r" (q3)    \
-    );                                                                  \
-}
+#define PACK_RIGHT_0TO3()                                             \
+  {                                                                   \
+    __asm__ __volatile__(                                             \
+        "preceu.ph.qbr   %[p3_r],   %[p3]  \n\t"                      \
+        "preceu.ph.qbr   %[p2_r],   %[p2]   \n\t"                     \
+        "preceu.ph.qbr   %[p1_r],   %[p1]   \n\t"                     \
+        "preceu.ph.qbr   %[p0_r],   %[p0]   \n\t"                     \
+        "preceu.ph.qbr   %[q0_r],   %[q0]   \n\t"                     \
+        "preceu.ph.qbr   %[q1_r],   %[q1]   \n\t"                     \
+        "preceu.ph.qbr   %[q2_r],   %[q2]   \n\t"                     \
+        "preceu.ph.qbr   %[q3_r],   %[q3]   \n\t"                     \
+                                                                      \
+        : [p3_r] "=&r"(p3_r), [p2_r] "=&r"(p2_r), [p1_r] "=&r"(p1_r), \
+          [p0_r] "=&r"(p0_r), [q0_r] "=&r"(q0_r), [q1_r] "=&r"(q1_r), \
+          [q2_r] "=&r"(q2_r), [q3_r] "=&r"(q3_r)                      \
+        : [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0),     \
+          [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3));    \
+  }
 
-#define PACK_RIGHT_4TO7() {                                             \
-    __asm__ __volatile__ (                                              \
-        "preceu.ph.qbr   %[p7_r],   %[p7]   \n\t"                       \
-        "preceu.ph.qbr   %[p6_r],   %[p6]   \n\t"                       \
-        "preceu.ph.qbr   %[p5_r],   %[p5]   \n\t"                       \
-        "preceu.ph.qbr   %[p4_r],   %[p4]   \n\t"                       \
-        "preceu.ph.qbr   %[q4_r],   %[q4]   \n\t"                       \
-        "preceu.ph.qbr   %[q5_r],   %[q5]   \n\t"                       \
-        "preceu.ph.qbr   %[q6_r],   %[q6]   \n\t"                       \
-        "preceu.ph.qbr   %[q7_r],   %[q7]   \n\t"                       \
-                                                                        \
-        : [p7_r] "=&r" (p7_r), [p6_r] "=&r" (p6_r),                     \
-          [p5_r] "=&r" (p5_r), [p4_r] "=&r" (p4_r),                     \
-          [q4_r] "=&r" (q4_r), [q5_r] "=&r" (q5_r),                     \
-          [q6_r] "=&r" (q6_r), [q7_r] "=&r" (q7_r)                      \
-        : [p7] "r" (p7), [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4),   \
-          [q4] "r" (q4), [q5] "r" (q5), [q6] "r" (q6), [q7] "r" (q7)    \
-    );                                                                  \
-}
+#define PACK_RIGHT_4TO7()                                             \
+  {                                                                   \
+    __asm__ __volatile__(                                             \
+        "preceu.ph.qbr   %[p7_r],   %[p7]   \n\t"                     \
+        "preceu.ph.qbr   %[p6_r],   %[p6]   \n\t"                     \
+        "preceu.ph.qbr   %[p5_r],   %[p5]   \n\t"                     \
+        "preceu.ph.qbr   %[p4_r],   %[p4]   \n\t"                     \
+        "preceu.ph.qbr   %[q4_r],   %[q4]   \n\t"                     \
+        "preceu.ph.qbr   %[q5_r],   %[q5]   \n\t"                     \
+        "preceu.ph.qbr   %[q6_r],   %[q6]   \n\t"                     \
+        "preceu.ph.qbr   %[q7_r],   %[q7]   \n\t"                     \
+                                                                      \
+        : [p7_r] "=&r"(p7_r), [p6_r] "=&r"(p6_r), [p5_r] "=&r"(p5_r), \
+          [p4_r] "=&r"(p4_r), [q4_r] "=&r"(q4_r), [q5_r] "=&r"(q5_r), \
+          [q6_r] "=&r"(q6_r), [q7_r] "=&r"(q7_r)                      \
+        : [p7] "r"(p7), [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4),     \
+          [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), [q7] "r"(q7));    \
+  }
 
-#define COMBINE_LEFT_RIGHT_0TO2() {                                     \
-    __asm__ __volatile__ (                                              \
-        "precr.qb.ph    %[p2],  %[p2_l],    %[p2_r]    \n\t"            \
-        "precr.qb.ph    %[p1],  %[p1_l],    %[p1_r]    \n\t"            \
-        "precr.qb.ph    %[p0],  %[p0_l],    %[p0_r]    \n\t"            \
-        "precr.qb.ph    %[q0],  %[q0_l],    %[q0_r]    \n\t"            \
-        "precr.qb.ph    %[q1],  %[q1_l],    %[q1_r]    \n\t"            \
-        "precr.qb.ph    %[q2],  %[q2_l],    %[q2_r]    \n\t"            \
-                                                                        \
-        : [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0),            \
-          [q0] "=&r" (q0), [q1] "=&r" (q1), [q2] "=&r" (q2)             \
-        : [p2_l] "r" (p2_l), [p2_r] "r" (p2_r),                         \
-          [p1_l] "r" (p1_l), [p1_r] "r" (p1_r),                         \
-          [p0_l] "r" (p0_l), [p0_r] "r" (p0_r),                         \
-          [q0_l] "r" (q0_l), [q0_r] "r" (q0_r),                         \
-          [q1_l] "r" (q1_l), [q1_r] "r" (q1_r),                         \
-          [q2_l] "r" (q2_l), [q2_r] "r" (q2_r)                          \
-    );                                                                  \
-}
+#define COMBINE_LEFT_RIGHT_0TO2()                                         \
+  {                                                                       \
+    __asm__ __volatile__(                                                 \
+        "precr.qb.ph    %[p2],  %[p2_l],    %[p2_r]    \n\t"              \
+        "precr.qb.ph    %[p1],  %[p1_l],    %[p1_r]    \n\t"              \
+        "precr.qb.ph    %[p0],  %[p0_l],    %[p0_r]    \n\t"              \
+        "precr.qb.ph    %[q0],  %[q0_l],    %[q0_r]    \n\t"              \
+        "precr.qb.ph    %[q1],  %[q1_l],    %[q1_r]    \n\t"              \
+        "precr.qb.ph    %[q2],  %[q2_l],    %[q2_r]    \n\t"              \
+                                                                          \
+        : [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0), [q0] "=&r"(q0), \
+          [q1] "=&r"(q1), [q2] "=&r"(q2)                                  \
+        : [p2_l] "r"(p2_l), [p2_r] "r"(p2_r), [p1_l] "r"(p1_l),           \
+          [p1_r] "r"(p1_r), [p0_l] "r"(p0_l), [p0_r] "r"(p0_r),           \
+          [q0_l] "r"(q0_l), [q0_r] "r"(q0_r), [q1_l] "r"(q1_l),           \
+          [q1_r] "r"(q1_r), [q2_l] "r"(q2_l), [q2_r] "r"(q2_r));          \
+  }
 
-#define COMBINE_LEFT_RIGHT_3TO6() {                                     \
-    __asm__ __volatile__ (                                              \
-        "precr.qb.ph    %[p6],  %[p6_l],    %[p6_r]    \n\t"            \
-        "precr.qb.ph    %[p5],  %[p5_l],    %[p5_r]    \n\t"            \
-        "precr.qb.ph    %[p4],  %[p4_l],    %[p4_r]    \n\t"            \
-        "precr.qb.ph    %[p3],  %[p3_l],    %[p3_r]    \n\t"            \
-        "precr.qb.ph    %[q3],  %[q3_l],    %[q3_r]    \n\t"            \
-        "precr.qb.ph    %[q4],  %[q4_l],    %[q4_r]    \n\t"            \
-        "precr.qb.ph    %[q5],  %[q5_l],    %[q5_r]    \n\t"            \
-        "precr.qb.ph    %[q6],  %[q6_l],    %[q6_r]    \n\t"            \
-                                                                        \
-        : [p6] "=&r" (p6),[p5] "=&r" (p5),                              \
-          [p4] "=&r" (p4),[p3] "=&r" (p3),                              \
-          [q3] "=&r" (q3),[q4] "=&r" (q4),                              \
-          [q5] "=&r" (q5),[q6] "=&r" (q6)                               \
-        : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l),                         \
-          [p4_l] "r" (p4_l), [p3_l] "r" (p3_l),                         \
-          [p6_r] "r" (p6_r), [p5_r] "r" (p5_r),                         \
-          [p4_r] "r" (p4_r), [p3_r] "r" (p3_r),                         \
-          [q3_l] "r" (q3_l), [q4_l] "r" (q4_l),                         \
-          [q5_l] "r" (q5_l), [q6_l] "r" (q6_l),                         \
-          [q3_r] "r" (q3_r), [q4_r] "r" (q4_r),                         \
-          [q5_r] "r" (q5_r), [q6_r] "r" (q6_r)                          \
-    );                                                                  \
-}
+#define COMBINE_LEFT_RIGHT_3TO6()                                         \
+  {                                                                       \
+    __asm__ __volatile__(                                                 \
+        "precr.qb.ph    %[p6],  %[p6_l],    %[p6_r]    \n\t"              \
+        "precr.qb.ph    %[p5],  %[p5_l],    %[p5_r]    \n\t"              \
+        "precr.qb.ph    %[p4],  %[p4_l],    %[p4_r]    \n\t"              \
+        "precr.qb.ph    %[p3],  %[p3_l],    %[p3_r]    \n\t"              \
+        "precr.qb.ph    %[q3],  %[q3_l],    %[q3_r]    \n\t"              \
+        "precr.qb.ph    %[q4],  %[q4_l],    %[q4_r]    \n\t"              \
+        "precr.qb.ph    %[q5],  %[q5_l],    %[q5_r]    \n\t"              \
+        "precr.qb.ph    %[q6],  %[q6_l],    %[q6_r]    \n\t"              \
+                                                                          \
+        : [p6] "=&r"(p6), [p5] "=&r"(p5), [p4] "=&r"(p4), [p3] "=&r"(p3), \
+          [q3] "=&r"(q3), [q4] "=&r"(q4), [q5] "=&r"(q5), [q6] "=&r"(q6)  \
+        : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l),           \
+          [p3_l] "r"(p3_l), [p6_r] "r"(p6_r), [p5_r] "r"(p5_r),           \
+          [p4_r] "r"(p4_r), [p3_r] "r"(p3_r), [q3_l] "r"(q3_l),           \
+          [q4_l] "r"(q4_l), [q5_l] "r"(q5_l), [q6_l] "r"(q6_l),           \
+          [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r),           \
+          [q6_r] "r"(q6_r));                                              \
+  }
 
 #endif  // #if HAVE_DSPR2
 #ifdef __cplusplus
diff --git a/vpx_dsp/mips/loopfilter_masks_dspr2.h b/vpx_dsp/mips/loopfilter_masks_dspr2.h
index 9bf292705a62d50b2d112cec0c5592f7ec6e1c68..0a0cf577e332ead7d31826ef8f226ab339db7467 100644
--- a/vpx_dsp/mips/loopfilter_masks_dspr2.h
+++ b/vpx_dsp/mips/loopfilter_masks_dspr2.h
@@ -25,18 +25,17 @@ extern "C" {
 /* processing 4 pixels at the same time
  * compute hev and mask in the same function */
 static INLINE void filter_hev_mask_dspr2(uint32_t limit, uint32_t flimit,
-                                         uint32_t p1, uint32_t p0,
-                                         uint32_t p3, uint32_t p2,
-                                         uint32_t q0, uint32_t q1,
+                                         uint32_t p1, uint32_t p0, uint32_t p3,
+                                         uint32_t p2, uint32_t q0, uint32_t q1,
                                          uint32_t q2, uint32_t q3,
                                          uint32_t thresh, uint32_t *hev,
                                          uint32_t *mask) {
-  uint32_t  c, r, r3, r_k;
-  uint32_t  s1, s2, s3;
-  uint32_t  ones = 0xFFFFFFFF;
-  uint32_t  hev1;
+  uint32_t c, r, r3, r_k;
+  uint32_t s1, s2, s3;
+  uint32_t ones = 0xFFFFFFFF;
+  uint32_t hev1;
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* mask |= (abs(p3 - p2) > limit) */
       "subu_s.qb      %[c],   %[p3],     %[p2]        \n\t"
       "subu_s.qb      %[r_k], %[p2],     %[p3]        \n\t"
@@ -88,14 +87,12 @@ static INLINE void filter_hev_mask_dspr2(uint32_t limit, uint32_t flimit,
       "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
       "or             %[r],   %[r],      %[c]         \n\t"
 
-      : [c] "=&r" (c), [r_k] "=&r" (r_k),
-        [r] "=&r" (r), [r3] "=&r" (r3)
-      : [limit] "r" (limit), [p3] "r" (p3), [p2] "r" (p2),
-        [p1] "r" (p1), [p0] "r" (p0), [q1] "r" (q1), [q0] "r" (q0),
-        [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh)
-  );
+      : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r3] "=&r"(r3)
+      : [limit] "r"(limit), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
+        [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3),
+        [thresh] "r"(thresh));
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* abs(p0 - q0) */
       "subu_s.qb      %[c],   %[p0],     %[q0]        \n\t"
       "subu_s.qb      %[r_k], %[q0],     %[p0]        \n\t"
@@ -119,34 +116,27 @@ static INLINE void filter_hev_mask_dspr2(uint32_t limit, uint32_t flimit,
       "wrdsp          %[r]                            \n\t"
       "pick.qb        %[s2],  $0,         %[ones]     \n\t"
 
-      : [c] "=&r" (c), [r_k] "=&r" (r_k), [s1] "=&r" (s1), [hev1] "=&r" (hev1),
-        [s2] "=&r" (s2), [r] "+r" (r), [s3] "=&r" (s3)
-      : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [r3] "r" (r3),
-        [q1] "r" (q1), [ones] "r" (ones), [flimit] "r" (flimit)
-  );
+      : [c] "=&r"(c), [r_k] "=&r"(r_k), [s1] "=&r"(s1), [hev1] "=&r"(hev1),
+        [s2] "=&r"(s2), [r] "+r"(r), [s3] "=&r"(s3)
+      : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [r3] "r"(r3), [q1] "r"(q1),
+        [ones] "r"(ones), [flimit] "r"(flimit));
 
   *hev = hev1;
   *mask = s2;
 }
 
-static INLINE void filter_hev_mask_flatmask4_dspr2(uint32_t limit,
-                                                   uint32_t flimit,
-                                                   uint32_t thresh,
-                                                   uint32_t p1, uint32_t p0,
-                                                   uint32_t p3, uint32_t p2,
-                                                   uint32_t q0, uint32_t q1,
-                                                   uint32_t q2, uint32_t q3,
-                                                   uint32_t *hev,
-                                                   uint32_t *mask,
-                                                   uint32_t *flat) {
-  uint32_t  c, r, r3, r_k, r_flat;
-  uint32_t  s1, s2, s3;
-  uint32_t  ones = 0xFFFFFFFF;
-  uint32_t  flat_thresh = 0x01010101;
-  uint32_t  hev1;
-  uint32_t  flat1;
-
-  __asm__ __volatile__ (
+static INLINE void filter_hev_mask_flatmask4_dspr2(
+    uint32_t limit, uint32_t flimit, uint32_t thresh, uint32_t p1, uint32_t p0,
+    uint32_t p3, uint32_t p2, uint32_t q0, uint32_t q1, uint32_t q2,
+    uint32_t q3, uint32_t *hev, uint32_t *mask, uint32_t *flat) {
+  uint32_t c, r, r3, r_k, r_flat;
+  uint32_t s1, s2, s3;
+  uint32_t ones = 0xFFFFFFFF;
+  uint32_t flat_thresh = 0x01010101;
+  uint32_t hev1;
+  uint32_t flat1;
+
+  __asm__ __volatile__(
       /* mask |= (abs(p3 - p2) > limit) */
       "subu_s.qb      %[c],       %[p3],          %[p2]        \n\t"
       "subu_s.qb      %[r_k],     %[p2],          %[p3]        \n\t"
@@ -236,15 +226,13 @@ static INLINE void filter_hev_mask_flatmask4_dspr2(uint32_t limit,
       "cmpgu.lt.qb    %[c],       %[limit],       %[r_k]       \n\t"
       "or             %[r],       %[r],           %[c]         \n\t"
 
-      : [c] "=&r" (c), [r_k] "=&r" (r_k), [r] "=&r" (r), [r3] "=&r" (r3),
-        [r_flat] "=&r" (r_flat), [flat1] "=&r" (flat1)
-      : [limit] "r" (limit), [p3] "r" (p3), [p2] "r" (p2),
-        [p1] "r" (p1), [p0] "r" (p0), [q1] "r" (q1), [q0] "r" (q0),
-        [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh),
-        [flat_thresh] "r" (flat_thresh), [ones] "r" (ones)
-  );
+      : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r3] "=&r"(r3),
+        [r_flat] "=&r"(r_flat), [flat1] "=&r"(flat1)
+      : [limit] "r"(limit), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
+        [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3),
+        [thresh] "r"(thresh), [flat_thresh] "r"(flat_thresh), [ones] "r"(ones));
 
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       /* abs(p0 - q0) */
       "subu_s.qb      %[c],   %[p0],     %[q0]        \n\t"
       "subu_s.qb      %[r_k], %[q0],     %[p0]        \n\t"
@@ -268,29 +256,25 @@ static INLINE void filter_hev_mask_flatmask4_dspr2(uint32_t limit,
       "wrdsp          %[r]                            \n\t"
       "pick.qb        %[s2],   $0,        %[ones]     \n\t"
 
-      : [c] "=&r" (c), [r_k] "=&r" (r_k), [s1] "=&r" (s1), [hev1] "=&r" (hev1),
-        [s2] "=&r" (s2), [r] "+r" (r), [s3] "=&r" (s3)
-      : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [r3] "r" (r3),
-        [q1] "r" (q1), [ones] "r" (ones), [flimit] "r" (flimit)
-  );
+      : [c] "=&r"(c), [r_k] "=&r"(r_k), [s1] "=&r"(s1), [hev1] "=&r"(hev1),
+        [s2] "=&r"(s2), [r] "+r"(r), [s3] "=&r"(s3)
+      : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [r3] "r"(r3), [q1] "r"(q1),
+        [ones] "r"(ones), [flimit] "r"(flimit));
 
   *hev = hev1;
   *mask = s2;
   *flat = flat1;
 }
 
-static INLINE void flatmask5(uint32_t p4, uint32_t p3,
-                             uint32_t p2, uint32_t p1,
-                             uint32_t p0, uint32_t q0,
-                             uint32_t q1, uint32_t q2,
-                             uint32_t q3, uint32_t q4,
-                             uint32_t *flat2) {
-  uint32_t  c, r, r_k, r_flat;
-  uint32_t  ones = 0xFFFFFFFF;
-  uint32_t  flat_thresh = 0x01010101;
-  uint32_t  flat1, flat3;
-
-  __asm__ __volatile__ (
+static INLINE void flatmask5(uint32_t p4, uint32_t p3, uint32_t p2, uint32_t p1,
+                             uint32_t p0, uint32_t q0, uint32_t q1, uint32_t q2,
+                             uint32_t q3, uint32_t q4, uint32_t *flat2) {
+  uint32_t c, r, r_k, r_flat;
+  uint32_t ones = 0xFFFFFFFF;
+  uint32_t flat_thresh = 0x01010101;
+  uint32_t flat1, flat3;
+
+  __asm__ __volatile__(
       /* flat |= (abs(p4 - p0) > thresh) */
       "subu_s.qb      %[c],   %[p4],           %[p0]        \n\t"
       "subu_s.qb      %[r_k], %[p0],           %[p4]        \n\t"
@@ -355,13 +339,11 @@ static INLINE void flatmask5(uint32_t p4, uint32_t p3,
       /* flat & flatmask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3) */
       "and            %[flat1],  %[flat3],        %[flat1]     \n\t"
 
-      : [c] "=&r" (c), [r_k] "=&r" (r_k), [r] "=&r" (r),
-        [r_flat] "=&r" (r_flat), [flat1] "=&r" (flat1), [flat3] "=&r" (flat3)
-      : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2),
-        [p1] "r" (p1), [p0] "r" (p0), [q0] "r" (q0), [q1] "r" (q1),
-        [q2] "r" (q2), [q3] "r" (q3), [q4] "r" (q4),
-        [flat_thresh] "r" (flat_thresh), [ones] "r" (ones)
-  );
+      : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r_flat] "=&r"(r_flat),
+        [flat1] "=&r"(flat1), [flat3] "=&r"(flat3)
+      : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0),
+        [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3), [q4] "r"(q4),
+        [flat_thresh] "r"(flat_thresh), [ones] "r"(ones));
 
   *flat2 = flat1;
 }
diff --git a/vpx_dsp/mips/loopfilter_mb_dspr2.c b/vpx_dsp/mips/loopfilter_mb_dspr2.c
index 4138f569782ee61a5484accee3225423fc2c2784..0b2fcfd7e1b49216d20db1a81a578dcf8eff446b 100644
--- a/vpx_dsp/mips/loopfilter_mb_dspr2.c
+++ b/vpx_dsp/mips/loopfilter_mb_dspr2.c
@@ -19,37 +19,33 @@
 #include "vpx_mem/vpx_mem.h"
 
 #if HAVE_DSPR2
-void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
-                                int pitch,
-                                const uint8_t *blimit,
-                                const uint8_t *limit,
-                                const uint8_t *thresh,
-                                int count) {
-  uint32_t  mask;
-  uint32_t  hev, flat;
-  uint8_t   i;
-  uint8_t   *sp3, *sp2, *sp1, *sp0, *sq0, *sq1, *sq2, *sq3;
-  uint32_t  thresh_vec, flimit_vec, limit_vec;
-  uint32_t  uflimit, ulimit, uthresh;
-  uint32_t  p1_f0, p0_f0, q0_f0, q1_f0;
-  uint32_t  p3, p2, p1, p0, q0, q1, q2, q3;
-  uint32_t  p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l;
-  uint32_t  p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r;
+void vpx_lpf_horizontal_8_dspr2(unsigned char *s, int pitch,
+                                const uint8_t *blimit, const uint8_t *limit,
+                                const uint8_t *thresh, int count) {
+  uint32_t mask;
+  uint32_t hev, flat;
+  uint8_t i;
+  uint8_t *sp3, *sp2, *sp1, *sp0, *sq0, *sq1, *sq2, *sq3;
+  uint32_t thresh_vec, flimit_vec, limit_vec;
+  uint32_t uflimit, ulimit, uthresh;
+  uint32_t p1_f0, p0_f0, q0_f0, q1_f0;
+  uint32_t p3, p2, p1, p0, q0, q1, q2, q3;
+  uint32_t p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l;
+  uint32_t p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r;
 
   uflimit = *blimit;
-  ulimit  = *limit;
+  ulimit = *limit;
   uthresh = *thresh;
 
   /* create quad-byte */
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "replv.qb       %[thresh_vec],    %[uthresh]    \n\t"
       "replv.qb       %[flimit_vec],    %[uflimit]    \n\t"
       "replv.qb       %[limit_vec],     %[ulimit]     \n\t"
 
-      : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec),
-        [limit_vec] "=r" (limit_vec)
-      : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit)
-  );
+      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+        [limit_vec] "=r"(limit_vec)
+      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
 
   /* prefetch data for store */
   prefetch_store(s);
@@ -64,7 +60,7 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
     sq2 = sq1 + pitch;
     sq3 = sq2 + pitch;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lw     %[p3],      (%[sp3])    \n\t"
         "lw     %[p2],      (%[sp2])    \n\t"
         "lw     %[p1],      (%[sp1])    \n\t"
@@ -74,46 +70,39 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
         "lw     %[q2],      (%[sq2])    \n\t"
         "lw     %[q3],      (%[sq3])    \n\t"
 
-        : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0),
-          [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1), [q0] "=&r" (q0)
-        : [sp3] "r" (sp3), [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-          [sq3] "r" (sq3), [sq2] "r" (sq2), [sq1] "r" (sq1), [sq0] "r" (sq0)
-    );
+        : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0),
+          [q3] "=&r"(q3), [q2] "=&r"(q2), [q1] "=&r"(q1), [q0] "=&r"(q0)
+        : [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0),
+          [sq3] "r"(sq3), [sq2] "r"(sq2), [sq1] "r"(sq1), [sq0] "r"(sq0));
 
-    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec,
-                                    p1, p0, p3, p2, q0, q1, q2, q3,
-                                    &hev, &mask, &flat);
+    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0,
+                                    p3, p2, q0, q1, q2, q3, &hev, &mask, &flat);
 
     if ((flat == 0) && (mask != 0)) {
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "sw       %[p1_f0],   (%[sp1])    \n\t"
           "sw       %[p0_f0],   (%[sp0])    \n\t"
           "sw       %[q0_f0],   (%[sq0])    \n\t"
           "sw       %[q1_f0],   (%[sq1])    \n\t"
 
           :
-          : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-            [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-            [sp1] "r" (sp1), [sp0] "r" (sp0),
-            [sq0] "r" (sq0), [sq1] "r" (sq1)
-      );
+          : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+            [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+            [sq1] "r"(sq1));
     } else if ((mask & flat) == 0xFFFFFFFF) {
       /* left 2 element operation */
       PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
-                     &q0_l, &q1_l, &q2_l, &q3_l);
+      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
 
       /* right 2 element operation */
       PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
-                     &q0_r, &q1_r, &q2_r, &q3_r);
+      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
 
       COMBINE_LEFT_RIGHT_0TO2()
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "sw       %[p2],      (%[sp2])    \n\t"
           "sw       %[p1],      (%[sp1])    \n\t"
           "sw       %[p0],      (%[sp0])    \n\t"
@@ -122,28 +111,23 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
           "sw       %[q2],      (%[sq2])    \n\t"
 
           :
-          : [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0),
-            [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2),
-            [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-            [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-      );
+          : [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [q0] "r"(q0),
+            [q1] "r"(q1), [q2] "r"(q2), [sp2] "r"(sp2), [sp1] "r"(sp1),
+            [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2));
     } else if ((flat != 0) && (mask != 0)) {
       /* filtering */
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
 
       /* left 2 element operation */
       PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
-                     &q0_l, &q1_l, &q2_l, &q3_l);
+      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
 
       /* right 2 element operation */
       PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
-                     &q0_r, &q1_r, &q2_r, &q3_r);
+      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
 
       if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_r],    (%[sp2])    \n\t"
             "sb     %[p1_r],    (%[sp1])    \n\t"
             "sb     %[p0_r],    (%[sp0])    \n\t"
@@ -152,27 +136,24 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
             "sb     %[q2_r],    (%[sq2])    \n\t"
 
             :
-            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
-              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
+              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+              [sq1] "r"(sq1), [sq2] "r"(sq2));
       } else if (mask & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  (%[sp1])    \n\t"
             "sb         %[p0_f0],  (%[sp0])    \n\t"
             "sb         %[q0_f0],  (%[sq0])    \n\t"
             "sb         %[q1_f0],  (%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_r],    %[p2_r],    16      \n\t"
           "srl      %[p1_r],    %[p1_r],    16      \n\t"
           "srl      %[p0_r],    %[p0_r],    16      \n\t"
@@ -184,15 +165,14 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
           "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
 
-          : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r),
-            [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r),
+            [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_r],    +1(%[sp2])    \n\t"
             "sb     %[p1_r],    +1(%[sp1])    \n\t"
             "sb     %[p0_r],    +1(%[sp0])    \n\t"
@@ -201,41 +181,36 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
             "sb     %[q2_r],    +1(%[sq2])    \n\t"
 
             :
-            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
-              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
+              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+              [sq1] "r"(sq1), [sq2] "r"(sq2));
       } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   +1(%[sp1])    \n\t"
             "sb     %[p0_f0],   +1(%[sp0])    \n\t"
             "sb     %[q0_f0],   +1(%[sq0])    \n\t"
             "sb     %[q1_f0],   +1(%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
           "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
           "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
 
-          : [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0),
-            [q0] "+r" (q0), [q1] "+r" (q1), [q2] "+r" (q2),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), [q0] "+r"(q0),
+            [q1] "+r"(q1), [q2] "+r"(q2), [p1_f0] "+r"(p1_f0),
+            [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_l],    +2(%[sp2])    \n\t"
             "sb     %[p1_l],    +2(%[sp1])    \n\t"
             "sb     %[p0_l],    +2(%[sp0])    \n\t"
@@ -244,27 +219,24 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
             "sb     %[q2_l],    +2(%[sq2])    \n\t"
 
             :
-            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
-              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
+              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+              [sq1] "r"(sq1), [sq2] "r"(sq2));
       } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   +2(%[sp1])    \n\t"
             "sb     %[p0_f0],   +2(%[sp0])    \n\t"
             "sb     %[q0_f0],   +2(%[sq0])    \n\t"
             "sb     %[q1_f0],   +2(%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_l],    %[p2_l],    16      \n\t"
           "srl      %[p1_l],    %[p1_l],    16      \n\t"
           "srl      %[p0_l],    %[p0_l],    16      \n\t"
@@ -276,15 +248,14 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
           "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
 
-          : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l),
-            [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l),
+            [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_l],    +3(%[sp2])    \n\t"
             "sb     %[p1_l],    +3(%[sp1])    \n\t"
             "sb     %[p0_l],    +3(%[sp0])    \n\t"
@@ -293,24 +264,21 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
             "sb     %[q2_l],    +3(%[sq2])    \n\t"
 
             :
-            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
-              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
+              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+              [sq1] "r"(sq1), [sq2] "r"(sq2));
       } else if (mask & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   +3(%[sp1])    \n\t"
             "sb     %[p0_f0],   +3(%[sp0])    \n\t"
             "sb     %[q0_f0],   +3(%[sq0])    \n\t"
             "sb     %[q1_f0],   +3(%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
     }
 
@@ -318,37 +286,33 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s,
   }
 }
 
-void vpx_lpf_vertical_8_dspr2(unsigned char *s,
-                              int pitch,
-                              const uint8_t *blimit,
-                              const uint8_t *limit,
-                              const uint8_t *thresh,
-                              int count) {
-  uint8_t   i;
-  uint32_t  mask, hev, flat;
-  uint8_t   *s1, *s2, *s3, *s4;
-  uint32_t  prim1, prim2, sec3, sec4, prim3, prim4;
-  uint32_t  thresh_vec, flimit_vec, limit_vec;
-  uint32_t  uflimit, ulimit, uthresh;
-  uint32_t  p3, p2, p1, p0, q3, q2, q1, q0;
-  uint32_t  p1_f0, p0_f0, q0_f0, q1_f0;
-  uint32_t  p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l;
-  uint32_t  p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r;
+void vpx_lpf_vertical_8_dspr2(unsigned char *s, int pitch,
+                              const uint8_t *blimit, const uint8_t *limit,
+                              const uint8_t *thresh, int count) {
+  uint8_t i;
+  uint32_t mask, hev, flat;
+  uint8_t *s1, *s2, *s3, *s4;
+  uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+  uint32_t thresh_vec, flimit_vec, limit_vec;
+  uint32_t uflimit, ulimit, uthresh;
+  uint32_t p3, p2, p1, p0, q3, q2, q1, q0;
+  uint32_t p1_f0, p0_f0, q0_f0, q1_f0;
+  uint32_t p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l;
+  uint32_t p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r;
 
   uflimit = *blimit;
-  ulimit  = *limit;
+  ulimit = *limit;
   uthresh = *thresh;
 
   /* create quad-byte */
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "replv.qb     %[thresh_vec],  %[uthresh]    \n\t"
       "replv.qb     %[flimit_vec],  %[uflimit]    \n\t"
       "replv.qb     %[limit_vec],   %[ulimit]     \n\t"
 
-      : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec),
-        [limit_vec] "=r" (limit_vec)
-      : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit)
-  );
+      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+        [limit_vec] "=r"(limit_vec)
+      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
 
   prefetch_store(s + pitch);
 
@@ -357,9 +321,9 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
     s2 = s + pitch;
     s3 = s2 + pitch;
     s4 = s3 + pitch;
-    s  = s4 + pitch;
+    s = s4 + pitch;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lw     %[p0],  -4(%[s1])    \n\t"
         "lw     %[p1],  -4(%[s2])    \n\t"
         "lw     %[p2],  -4(%[s3])    \n\t"
@@ -369,10 +333,9 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
         "lw     %[q1],    (%[s3])    \n\t"
         "lw     %[q0],    (%[s4])    \n\t"
 
-        : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0),
-          [q0] "=&r" (q0), [q1] "=&r" (q1), [q2] "=&r" (q2), [q3] "=&r" (q3)
-        : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4)
-    );
+        : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0),
+          [q0] "=&r"(q0), [q1] "=&r"(q1), [q2] "=&r"(q2), [q3] "=&r"(q3)
+        : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
 
     /* transpose p3, p2, p1, p0
        original (when loaded from memory)
@@ -389,7 +352,7 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
          p2         p3_1  p2_1  p1_1  p0_1
          p3         p3_0  p2_0  p1_0  p0_0
     */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "precrq.qb.ph   %[prim1],   %[p0],      %[p1]       \n\t"
         "precr.qb.ph    %[prim2],   %[p0],      %[p1]       \n\t"
         "precrq.qb.ph   %[prim3],   %[p2],      %[p3]       \n\t"
@@ -405,12 +368,10 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
         "append         %[p1],      %[sec3],    16          \n\t"
         "append         %[p3],      %[sec4],    16          \n\t"
 
-        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
-          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
-          [p0] "+r" (p0), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3),
-          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
-        :
-    );
+        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+          [prim4] "=&r"(prim4), [p0] "+r"(p0), [p1] "+r"(p1), [p2] "+r"(p2),
+          [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+        :);
 
     /* transpose q0, q1, q2, q3
        original (when loaded from memory)
@@ -427,7 +388,7 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
          q1         q0_1  q1_1  q2_1  q3_1
          q0         q0_0  q1_0  q2_0  q3_0
     */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "precrq.qb.ph   %[prim1],   %[q3],      %[q2]       \n\t"
         "precr.qb.ph    %[prim2],   %[q3],      %[q2]       \n\t"
         "precrq.qb.ph   %[prim3],   %[q1],      %[q0]       \n\t"
@@ -443,49 +404,40 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
         "append         %[q2],      %[sec3],    16          \n\t"
         "append         %[q0],      %[sec4],    16          \n\t"
 
-        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
-          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
-          [q3] "+r" (q3), [q2] "+r" (q2), [q1] "+r" (q1), [q0] "+r" (q0),
-          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
-        :
-    );
+        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+          [prim4] "=&r"(prim4), [q3] "+r"(q3), [q2] "+r"(q2), [q1] "+r"(q1),
+          [q0] "+r"(q0), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+        :);
 
-    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec,
-                                    p1, p0, p3, p2, q0, q1, q2, q3,
-                                    &hev, &mask, &flat);
+    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0,
+                                    p3, p2, q0, q1, q2, q3, &hev, &mask, &flat);
 
     if ((flat == 0) && (mask != 0)) {
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
       STORE_F0()
     } else if ((mask & flat) == 0xFFFFFFFF) {
       /* left 2 element operation */
       PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
-                     &q0_l, &q1_l, &q2_l, &q3_l);
+      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
 
       /* right 2 element operation */
       PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
-                     &q0_r, &q1_r, &q2_r, &q3_r);
+      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
 
       STORE_F1()
     } else if ((flat != 0) && (mask != 0)) {
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
 
       /* left 2 element operation */
       PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
-                     &q0_l, &q1_l, &q2_l, &q3_l);
+      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
 
       /* right 2 element operation */
       PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
-                     &q0_r, &q1_r, &q2_r, &q3_r);
+      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
 
       if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_r],  -3(%[s4])    \n\t"
             "sb         %[p1_r],  -2(%[s4])    \n\t"
             "sb         %[p0_r],  -1(%[s4])    \n\t"
@@ -494,25 +446,22 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
             "sb         %[q2_r],  +2(%[s4])    \n\t"
 
             :
-            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
-              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [s4] "r" (s4)
-        );
+            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
+              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [s4] "r"(s4));
       } else if (mask & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  -2(%[s4])    \n\t"
             "sb         %[p0_f0],  -1(%[s4])    \n\t"
             "sb         %[q0_f0],    (%[s4])    \n\t"
             "sb         %[q1_f0],  +1(%[s4])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s4] "r" (s4)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s4] "r"(s4));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_r],    %[p2_r],    16      \n\t"
           "srl      %[p1_r],    %[p1_r],    16      \n\t"
           "srl      %[p0_r],    %[p0_r],    16      \n\t"
@@ -524,15 +473,14 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
           "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
 
-          : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r),
-            [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r),
+            [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_r],  -3(%[s3])    \n\t"
             "sb         %[p1_r],  -2(%[s3])    \n\t"
             "sb         %[p0_r],  -1(%[s3])    \n\t"
@@ -541,66 +489,58 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
             "sb         %[q2_r],  +2(%[s3])    \n\t"
 
             :
-            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
-              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [s3] "r" (s3)
-        );
+            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
+              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [s3] "r"(s3));
       } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  -2(%[s3])    \n\t"
             "sb         %[p0_f0],  -1(%[s3])    \n\t"
             "sb         %[q0_f0],    (%[s3])    \n\t"
             "sb         %[q1_f0],  +1(%[s3])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s3] "r" (s3)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s3] "r"(s3));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
           "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
           "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
 
-          : [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0),
-            [q0] "+r" (q0), [q1] "+r" (q1), [q2] "+r" (q2),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), [q0] "+r"(q0),
+            [q1] "+r"(q1), [q2] "+r"(q2), [p1_f0] "+r"(p1_f0),
+            [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__ (
-          "sb         %[p2_l],  -3(%[s2])    \n\t"
-          "sb         %[p1_l],  -2(%[s2])    \n\t"
-          "sb         %[p0_l],  -1(%[s2])    \n\t"
-          "sb         %[q0_l],    (%[s2])    \n\t"
-          "sb         %[q1_l],  +1(%[s2])    \n\t"
-          "sb         %[q2_l],  +2(%[s2])    \n\t"
+        __asm__ __volatile__(
+            "sb         %[p2_l],  -3(%[s2])    \n\t"
+            "sb         %[p1_l],  -2(%[s2])    \n\t"
+            "sb         %[p0_l],  -1(%[s2])    \n\t"
+            "sb         %[q0_l],    (%[s2])    \n\t"
+            "sb         %[q1_l],  +1(%[s2])    \n\t"
+            "sb         %[q2_l],  +2(%[s2])    \n\t"
 
-          :
-          : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
-            [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-            [s2] "r" (s2)
-        );
+            :
+            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
+              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [s2] "r"(s2));
       } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  -2(%[s2])    \n\t"
             "sb         %[p0_f0],  -1(%[s2])    \n\t"
             "sb         %[q0_f0],    (%[s2])    \n\t"
             "sb         %[q1_f0],  +1(%[s2])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s2] "r" (s2)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s2] "r"(s2));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_l],    %[p2_l],    16      \n\t"
           "srl      %[p1_l],    %[p1_l],    16      \n\t"
           "srl      %[p0_l],    %[p0_l],    16      \n\t"
@@ -612,15 +552,14 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
           "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
 
-          : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l),
-            [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l),
+            [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_l],  -3(%[s1])    \n\t"
             "sb         %[p1_l],  -2(%[s1])    \n\t"
             "sb         %[p0_l],  -1(%[s1])    \n\t"
@@ -629,21 +568,19 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s,
             "sb         %[q2_l],  +2(%[s1])    \n\t"
 
             :
-            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
-              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-              [s1] "r" (s1)
-        );
+            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
+              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [s1] "r"(s1));
       } else if (mask & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  -2(%[s1])    \n\t"
             "sb         %[p0_f0],  -1(%[s1])    \n\t"
             "sb         %[q0_f0],    (%[s1])    \n\t"
             "sb         %[q1_f0],  +1(%[s1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), [q0_f0] "r" (q0_f0),
-              [q1_f0] "r" (q1_f0), [s1] "r" (s1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s1] "r"(s1));
       }
     }
   }
diff --git a/vpx_dsp/mips/loopfilter_mb_horiz_dspr2.c b/vpx_dsp/mips/loopfilter_mb_horiz_dspr2.c
index 8a48650738b3d97b132b640f6d503c8bc3fde283..b6aa2a097ffeaf0f68fc63db9f9ce4a465fa311e 100644
--- a/vpx_dsp/mips/loopfilter_mb_horiz_dspr2.c
+++ b/vpx_dsp/mips/loopfilter_mb_horiz_dspr2.c
@@ -19,42 +19,38 @@
 #include "vpx_mem/vpx_mem.h"
 
 #if HAVE_DSPR2
-void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
-                                 int pitch,
-                                 const uint8_t *blimit,
-                                 const uint8_t *limit,
-                                 const uint8_t *thresh,
-                                 int count) {
-  uint32_t  mask;
-  uint32_t  hev, flat, flat2;
-  uint8_t   i;
-  uint8_t   *sp7, *sp6, *sp5, *sp4, *sp3, *sp2, *sp1, *sp0;
-  uint8_t   *sq0, *sq1, *sq2, *sq3, *sq4, *sq5, *sq6, *sq7;
-  uint32_t  thresh_vec, flimit_vec, limit_vec;
-  uint32_t  uflimit, ulimit, uthresh;
-  uint32_t  p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-  uint32_t  p1_f0, p0_f0, q0_f0, q1_f0;
-  uint32_t  p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l;
-  uint32_t  q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l;
-  uint32_t  p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r;
-  uint32_t  q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r;
-  uint32_t  p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1;
-  uint32_t  q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1;
+void vpx_lpf_horizontal_16_dspr2(unsigned char *s, int pitch,
+                                 const uint8_t *blimit, const uint8_t *limit,
+                                 const uint8_t *thresh, int count) {
+  uint32_t mask;
+  uint32_t hev, flat, flat2;
+  uint8_t i;
+  uint8_t *sp7, *sp6, *sp5, *sp4, *sp3, *sp2, *sp1, *sp0;
+  uint8_t *sq0, *sq1, *sq2, *sq3, *sq4, *sq5, *sq6, *sq7;
+  uint32_t thresh_vec, flimit_vec, limit_vec;
+  uint32_t uflimit, ulimit, uthresh;
+  uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
+  uint32_t p1_f0, p0_f0, q0_f0, q1_f0;
+  uint32_t p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l;
+  uint32_t q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l;
+  uint32_t p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r;
+  uint32_t q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r;
+  uint32_t p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1;
+  uint32_t q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1;
 
   uflimit = *blimit;
-  ulimit  = *limit;
+  ulimit = *limit;
   uthresh = *thresh;
 
   /* create quad-byte */
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "replv.qb       %[thresh_vec],    %[uthresh]      \n\t"
       "replv.qb       %[flimit_vec],    %[uflimit]      \n\t"
       "replv.qb       %[limit_vec],     %[ulimit]       \n\t"
 
-      : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec),
-        [limit_vec] "=r" (limit_vec)
-      : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit)
-  );
+      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+        [limit_vec] "=r"(limit_vec)
+      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
 
   /* prefetch data for store */
   prefetch_store(s);
@@ -77,7 +73,7 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
     sq6 = sq5 + pitch;
     sq7 = sq6 + pitch;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lw     %[p7],      (%[sp7])            \n\t"
         "lw     %[p6],      (%[sp6])            \n\t"
         "lw     %[p5],      (%[sp5])            \n\t"
@@ -87,13 +83,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
         "lw     %[p1],      (%[sp1])            \n\t"
         "lw     %[p0],      (%[sp0])            \n\t"
 
-        : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0),
-          [p7] "=&r" (p7), [p6] "=&r" (p6), [p5] "=&r" (p5), [p4] "=&r" (p4)
-        : [sp3] "r" (sp3), [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-          [sp4] "r" (sp4), [sp5] "r" (sp5), [sp6] "r" (sp6), [sp7] "r" (sp7)
-    );
+        : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0),
+          [p7] "=&r"(p7), [p6] "=&r"(p6), [p5] "=&r"(p5), [p4] "=&r"(p4)
+        : [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0),
+          [sp4] "r"(sp4), [sp5] "r"(sp5), [sp6] "r"(sp6), [sp7] "r"(sp7));
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lw     %[q0],      (%[sq0])            \n\t"
         "lw     %[q1],      (%[sq1])            \n\t"
         "lw     %[q2],      (%[sq2])            \n\t"
@@ -103,57 +98,50 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
         "lw     %[q6],      (%[sq6])            \n\t"
         "lw     %[q7],      (%[sq7])            \n\t"
 
-        : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1), [q0] "=&r" (q0),
-          [q7] "=&r" (q7), [q6] "=&r" (q6), [q5] "=&r" (q5), [q4] "=&r" (q4)
-        : [sq3] "r" (sq3), [sq2] "r" (sq2), [sq1] "r" (sq1), [sq0] "r" (sq0),
-          [sq4] "r" (sq4), [sq5] "r" (sq5), [sq6] "r" (sq6), [sq7] "r" (sq7)
-    );
+        : [q3] "=&r"(q3), [q2] "=&r"(q2), [q1] "=&r"(q1), [q0] "=&r"(q0),
+          [q7] "=&r"(q7), [q6] "=&r"(q6), [q5] "=&r"(q5), [q4] "=&r"(q4)
+        : [sq3] "r"(sq3), [sq2] "r"(sq2), [sq1] "r"(sq1), [sq0] "r"(sq0),
+          [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6), [sq7] "r"(sq7));
 
-    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec,
-                                    p1, p0, p3, p2, q0, q1, q2, q3,
-                                    &hev, &mask, &flat);
+    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0,
+                                    p3, p2, q0, q1, q2, q3, &hev, &mask, &flat);
 
     flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2);
 
     /* f0 */
     if (((flat2 == 0) && (flat == 0) && (mask != 0)) ||
         ((flat2 != 0) && (flat == 0) && (mask != 0))) {
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "sw       %[p1_f0],   (%[sp1])            \n\t"
           "sw       %[p0_f0],   (%[sp0])            \n\t"
           "sw       %[q0_f0],   (%[sq0])            \n\t"
           "sw       %[q1_f0],   (%[sq1])            \n\t"
 
           :
-          : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-            [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-            [sp1] "r" (sp1), [sp0] "r" (sp0),
-            [sq0] "r" (sq0), [sq1] "r" (sq1)
-      );
+          : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+            [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+            [sq1] "r"(sq1));
     } else if ((flat2 == 0XFFFFFFFF) && (flat == 0xFFFFFFFF) &&
                (mask == 0xFFFFFFFF)) {
       /* f2 */
       PACK_LEFT_0TO3()
       PACK_LEFT_4TO7()
-      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l,
-                          &p3_l, &p2_l, &p1_l, &p0_l,
-                          &q0_l, &q1_l, &q2_l, &q3_l,
-                          &q4_l, &q5_l, &q6_l, &q7_l);
+      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l,
+                          &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l,
+                          &q6_l, &q7_l);
 
       PACK_RIGHT_0TO3()
       PACK_RIGHT_4TO7()
-      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r,
-                          &p3_r, &p2_r, &p1_r, &p0_r,
-                          &q0_r, &q1_r, &q2_r, &q3_r,
-                          &q4_r, &q5_r, &q6_r, &q7_r);
+      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r,
+                          &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r,
+                          &q6_r, &q7_r);
 
       COMBINE_LEFT_RIGHT_0TO2()
       COMBINE_LEFT_RIGHT_3TO6()
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "sw         %[p6], (%[sp6])    \n\t"
           "sw         %[p5], (%[sp5])    \n\t"
           "sw         %[p4], (%[sp4])    \n\t"
@@ -163,13 +151,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
           "sw         %[p0], (%[sp0])    \n\t"
 
           :
-          : [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3),
-            [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0),
-            [sp6] "r" (sp6), [sp5] "r" (sp5), [sp4] "r" (sp4), [sp3] "r" (sp3),
-            [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0)
-      );
+          : [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3),
+            [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [sp6] "r"(sp6),
+            [sp5] "r"(sp5), [sp4] "r"(sp4), [sp3] "r"(sp3), [sp2] "r"(sp2),
+            [sp1] "r"(sp1), [sp0] "r"(sp0));
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "sw         %[q6], (%[sq6])    \n\t"
           "sw         %[q5], (%[sq5])    \n\t"
           "sw         %[q4], (%[sq4])    \n\t"
@@ -179,26 +166,23 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
           "sw         %[q0], (%[sq0])    \n\t"
 
           :
-          : [q6] "r" (q6), [q5] "r" (q5), [q4] "r" (q4), [q3] "r" (q3),
-            [q2] "r" (q2), [q1] "r" (q1), [q0] "r" (q0),
-            [sq6] "r" (sq6), [sq5] "r" (sq5), [sq4] "r" (sq4), [sq3] "r" (sq3),
-            [sq2] "r" (sq2), [sq1] "r" (sq1), [sq0] "r" (sq0)
-      );
+          : [q6] "r"(q6), [q5] "r"(q5), [q4] "r"(q4), [q3] "r"(q3),
+            [q2] "r"(q2), [q1] "r"(q1), [q0] "r"(q0), [sq6] "r"(sq6),
+            [sq5] "r"(sq5), [sq4] "r"(sq4), [sq3] "r"(sq3), [sq2] "r"(sq2),
+            [sq1] "r"(sq1), [sq0] "r"(sq0));
     } else if ((flat2 == 0) && (flat == 0xFFFFFFFF) && (mask == 0xFFFFFFFF)) {
       /* f1 */
       /* left 2 element operation */
       PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
-                     &q0_l, &q1_l, &q2_l, &q3_l);
+      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
 
       /* right 2 element operation */
       PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
-                     &q0_r, &q1_r, &q2_r, &q3_r);
+      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
 
       COMBINE_LEFT_RIGHT_0TO2()
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "sw         %[p2], (%[sp2])    \n\t"
           "sw         %[p1], (%[sp1])    \n\t"
           "sw         %[p0], (%[sp0])    \n\t"
@@ -207,28 +191,23 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
           "sw         %[q2], (%[sq2])    \n\t"
 
           :
-          : [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0),
-            [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2),
-            [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-            [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-      );
+          : [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [q0] "r"(q0),
+            [q1] "r"(q1), [q2] "r"(q2), [sp2] "r"(sp2), [sp1] "r"(sp1),
+            [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2));
     } else if ((flat2 == 0) && (flat != 0) && (mask != 0)) {
       /* f0+f1 */
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
 
       /* left 2 element operation */
       PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
-                     &q0_l, &q1_l, &q2_l, &q3_l);
+      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
 
       /* right 2 element operation */
       PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
-                     &q0_r, &q1_r, &q2_r, &q3_r);
+      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
 
       if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_r],  (%[sp2])    \n\t"
             "sb         %[p1_r],  (%[sp1])    \n\t"
             "sb         %[p0_r],  (%[sp0])    \n\t"
@@ -237,27 +216,24 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q2_r],  (%[sq2])    \n\t"
 
             :
-            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
-              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
+              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+              [sq1] "r"(sq1), [sq2] "r"(sq2));
       } else if (mask & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  (%[sp1])    \n\t"
             "sb         %[p0_f0],  (%[sp0])    \n\t"
             "sb         %[q0_f0],  (%[sq0])    \n\t"
             "sb         %[q1_f0],  (%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_r],    %[p2_r],    16      \n\t"
           "srl      %[p1_r],    %[p1_r],    16      \n\t"
           "srl      %[p0_r],    %[p0_r],    16      \n\t"
@@ -269,15 +245,14 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
           "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
 
-          : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r),
-            [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r),
+            [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_r],  +1(%[sp2])    \n\t"
             "sb         %[p1_r],  +1(%[sp1])    \n\t"
             "sb         %[p0_r],  +1(%[sp0])    \n\t"
@@ -286,39 +261,35 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q2_r],  +1(%[sq2])    \n\t"
 
             :
-            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
-              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
+              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+              [sq1] "r"(sq1), [sq2] "r"(sq2));
       } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  +1(%[sp1])    \n\t"
             "sb         %[p0_f0],  +1(%[sp0])    \n\t"
             "sb         %[q0_f0],  +1(%[sq0])    \n\t"
             "sb         %[q1_f0],  +1(%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
           "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
           "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
 
-          : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_l],  +2(%[sp2])    \n\t"
             "sb         %[p1_l],  +2(%[sp1])    \n\t"
             "sb         %[p0_l],  +2(%[sp0])    \n\t"
@@ -327,27 +298,24 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q2_l],  +2(%[sq2])    \n\t"
 
             :
-            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
-              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
+              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+              [sq1] "r"(sq1), [sq2] "r"(sq2));
       } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  +2(%[sp1])    \n\t"
             "sb         %[p0_f0],  +2(%[sp0])    \n\t"
             "sb         %[q0_f0],  +2(%[sq0])    \n\t"
             "sb         %[q1_f0],  +2(%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_l],    %[p2_l],    16      \n\t"
           "srl      %[p1_l],    %[p1_l],    16      \n\t"
           "srl      %[p0_l],    %[p0_l],    16      \n\t"
@@ -359,15 +327,14 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
           "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
 
-          : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l),
-            [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l),
+            [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_l],  +3(%[sp2])    \n\t"
             "sb         %[p1_l],  +3(%[sp1])    \n\t"
             "sb         %[p0_l],  +3(%[sp0])    \n\t"
@@ -376,61 +343,51 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q2_l],  +3(%[sq2])    \n\t"
 
             :
-            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
-              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
+              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0),
+              [sq1] "r"(sq1), [sq2] "r"(sq2));
       } else if (mask & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  +3(%[sp1])    \n\t"
             "sb         %[p0_f0],  +3(%[sp0])    \n\t"
             "sb         %[q0_f0],  +3(%[sq0])    \n\t"
             "sb         %[q1_f0],  +3(%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
     } else if ((flat2 != 0) && (flat != 0) && (mask != 0)) {
       /* f0 + f1 + f2 */
       /* f0  function */
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
 
       /* f1  function */
       /* left 2 element operation */
       PACK_LEFT_0TO3()
-      mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l,
-                      q0_l, q1_l, q2_l, q3_l,
-                      &p2_l_f1, &p1_l_f1, &p0_l_f1,
-                      &q0_l_f1, &q1_l_f1, &q2_l_f1);
+      mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, &p2_l_f1,
+                      &p1_l_f1, &p0_l_f1, &q0_l_f1, &q1_l_f1, &q2_l_f1);
 
       /* right 2 element operation */
       PACK_RIGHT_0TO3()
-      mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r,
-                      q0_r, q1_r, q2_r, q3_r,
-                      &p2_r_f1, &p1_r_f1, &p0_r_f1,
-                      &q0_r_f1, &q1_r_f1, &q2_r_f1);
+      mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, &p2_r_f1,
+                      &p1_r_f1, &p0_r_f1, &q0_r_f1, &q1_r_f1, &q2_r_f1);
 
       /* f2  function */
       PACK_LEFT_4TO7()
-      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l,
-                          &p3_l, &p2_l, &p1_l, &p0_l,
-                          &q0_l, &q1_l, &q2_l, &q3_l,
-                          &q4_l, &q5_l, &q6_l, &q7_l);
+      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l,
+                          &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l,
+                          &q6_l, &q7_l);
 
       PACK_RIGHT_4TO7()
-      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r,
-                          &p3_r, &p2_r, &p1_r, &p0_r,
-                          &q0_r, &q1_r, &q2_r, &q3_r,
-                          &q4_r, &q5_r, &q6_r, &q7_r);
+      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r,
+                          &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r,
+                          &q6_r, &q7_r);
 
       if (mask & flat & flat2 & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p6_r],  (%[sp6])    \n\t"
             "sb         %[p5_r],  (%[sp5])    \n\t"
             "sb         %[p4_r],  (%[sp4])    \n\t"
@@ -440,14 +397,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[p0_r],  (%[sp0])    \n\t"
 
             :
-            : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r), [p4_r] "r" (p4_r),
-              [p3_r] "r" (p3_r), [p2_r] "r" (p2_r), [p1_r] "r" (p1_r),
-              [sp6] "r" (sp6), [sp5] "r" (sp5), [sp4] "r" (sp4),
-              [sp3] "r" (sp3), [sp2] "r" (sp2), [sp1] "r" (sp1),
-              [p0_r] "r" (p0_r), [sp0] "r" (sp0)
-        );
-
-        __asm__ __volatile__ (
+            : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r),
+              [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r),
+              [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4), [sp3] "r"(sp3),
+              [sp2] "r"(sp2), [sp1] "r"(sp1), [p0_r] "r"(p0_r), [sp0] "r"(sp0));
+
+        __asm__ __volatile__(
             "sb         %[q0_r],  (%[sq0])    \n\t"
             "sb         %[q1_r],  (%[sq1])    \n\t"
             "sb         %[q2_r],  (%[sq2])    \n\t"
@@ -457,15 +412,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q6_r],  (%[sq6])    \n\t"
 
             :
-            : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [q3_r] "r" (q3_r), [q4_r] "r" (q4_r), [q5_r] "r" (q5_r),
-              [q6_r] "r" (q6_r),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2),
-              [sq3] "r" (sq3), [sq4] "r" (sq4), [sq5] "r" (sq5),
-              [sq6] "r" (sq6)
-        );
+            : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r),
+              [q6_r] "r"(q6_r), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2),
+              [sq3] "r"(sq3), [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6));
       } else if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_r_f1],  (%[sp2])    \n\t"
             "sb         %[p1_r_f1],  (%[sp1])    \n\t"
             "sb         %[p0_r_f1],  (%[sp0])    \n\t"
@@ -474,27 +426,25 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q2_r_f1],  (%[sq2])    \n\t"
 
             :
-            : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1),
-              [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1),
-              [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1),
+              [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1),
+              [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [sp2] "r"(sp2),
+              [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1),
+              [sq2] "r"(sq2));
       } else if (mask & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  (%[sp1])    \n\t"
             "sb         %[p0_f0],  (%[sp0])    \n\t"
             "sb         %[q0_f0],  (%[sq0])    \n\t"
             "sb         %[q1_f0],  (%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), [q0_f0] "r" (q0_f0),
-              [q1_f0] "r" (q1_f0), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl        %[p6_r], %[p6_r], 16     \n\t"
           "srl        %[p5_r], %[p5_r], 16     \n\t"
           "srl        %[p4_r], %[p4_r], 16     \n\t"
@@ -510,15 +460,14 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
           "srl        %[q5_r], %[q5_r], 16     \n\t"
           "srl        %[q6_r], %[q6_r], 16     \n\t"
 
-          : [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r),
-            [q3_r] "+r" (q3_r), [q4_r] "+r" (q4_r), [q5_r] "+r" (q5_r),
-            [p6_r] "+r" (p6_r), [p5_r] "+r" (p5_r), [p4_r] "+r" (p4_r),
-            [p3_r] "+r" (p3_r), [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r),
-            [q6_r] "+r" (q6_r), [p0_r] "+r" (p0_r)
-          :
-      );
+          : [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
+            [q3_r] "+r"(q3_r), [q4_r] "+r"(q4_r), [q5_r] "+r"(q5_r),
+            [p6_r] "+r"(p6_r), [p5_r] "+r"(p5_r), [p4_r] "+r"(p4_r),
+            [p3_r] "+r"(p3_r), [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r),
+            [q6_r] "+r"(q6_r), [p0_r] "+r"(p0_r)
+          :);
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl        %[p2_r_f1], %[p2_r_f1], 16     \n\t"
           "srl        %[p1_r_f1], %[p1_r_f1], 16     \n\t"
           "srl        %[p0_r_f1], %[p0_r_f1], 16     \n\t"
@@ -530,16 +479,15 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
           "srl        %[q0_f0],   %[q0_f0],   8      \n\t"
           "srl        %[q1_f0],   %[q1_f0],   8      \n\t"
 
-          : [p2_r_f1] "+r" (p2_r_f1), [p1_r_f1] "+r" (p1_r_f1),
-            [p0_r_f1] "+r" (p0_r_f1), [q0_r_f1] "+r" (q0_r_f1),
-            [q1_r_f1] "+r" (q1_r_f1), [q2_r_f1] "+r" (q2_r_f1),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_r_f1] "+r"(p2_r_f1), [p1_r_f1] "+r"(p1_r_f1),
+            [p0_r_f1] "+r"(p0_r_f1), [q0_r_f1] "+r"(q0_r_f1),
+            [q1_r_f1] "+r"(q1_r_f1), [q2_r_f1] "+r"(q2_r_f1),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & flat2 & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p6_r],  +1(%[sp6])    \n\t"
             "sb         %[p5_r],  +1(%[sp5])    \n\t"
             "sb         %[p4_r],  +1(%[sp4])    \n\t"
@@ -549,14 +497,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[p0_r],  +1(%[sp0])    \n\t"
 
             :
-            : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r), [p4_r] "r" (p4_r),
-              [p3_r] "r" (p3_r), [p2_r] "r" (p2_r), [p1_r] "r" (p1_r),
-              [p0_r] "r" (p0_r), [sp6] "r" (sp6), [sp5] "r" (sp5),
-              [sp4] "r" (sp4), [sp3] "r" (sp3),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0)
-        );
-
-        __asm__ __volatile__ (
+            : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r),
+              [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r),
+              [p0_r] "r"(p0_r), [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4),
+              [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0));
+
+        __asm__ __volatile__(
             "sb         %[q0_r],  +1(%[sq0])    \n\t"
             "sb         %[q1_r],  +1(%[sq1])    \n\t"
             "sb         %[q2_r],  +1(%[sq2])    \n\t"
@@ -566,14 +512,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q6_r],  +1(%[sq6])    \n\t"
 
             :
-            : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [q3_r] "r" (q3_r), [q4_r] "r" (q4_r), [q5_r] "r" (q5_r),
-              [q6_r] "r" (q6_r), [sq0] "r" (sq0), [sq1] "r" (sq1),
-              [sq2] "r" (sq2), [sq3] "r" (sq3),
-              [sq4] "r" (sq4), [sq5] "r" (sq5), [sq6] "r" (sq6)
-        );
+            : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r),
+              [q6_r] "r"(q6_r), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2),
+              [sq3] "r"(sq3), [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6));
       } else if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_r_f1],  +1(%[sp2])    \n\t"
             "sb         %[p1_r_f1],  +1(%[sp1])    \n\t"
             "sb         %[p0_r_f1],  +1(%[sp0])    \n\t"
@@ -582,39 +526,36 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q2_r_f1],  +1(%[sq2])    \n\t"
 
             :
-            : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1),
-              [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1),
-              [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1),
+              [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1),
+              [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [sp2] "r"(sp2),
+              [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1),
+              [sq2] "r"(sq2));
       } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  +1(%[sp1])    \n\t"
             "sb         %[p0_f0],  +1(%[sp0])    \n\t"
             "sb         %[q0_f0],  +1(%[sq0])    \n\t"
             "sb         %[q1_f0],  +1(%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), [q0_f0] "r" (q0_f0),
-              [q1_f0] "r" (q1_f0), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl        %[p1_f0], %[p1_f0], 8     \n\t"
           "srl        %[p0_f0], %[p0_f0], 8     \n\t"
           "srl        %[q0_f0], %[q0_f0], 8     \n\t"
           "srl        %[q1_f0], %[q1_f0], 8     \n\t"
 
-          : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & flat2 & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p6_l],  +2(%[sp6])    \n\t"
             "sb         %[p5_l],  +2(%[sp5])    \n\t"
             "sb         %[p4_l],  +2(%[sp4])    \n\t"
@@ -624,14 +565,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[p0_l],  +2(%[sp0])    \n\t"
 
             :
-            : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l),
-              [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l),
-              [p0_l] "r" (p0_l), [sp6] "r" (sp6), [sp5] "r" (sp5),
-              [sp4] "r" (sp4), [sp3] "r" (sp3),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0)
-        );
-
-        __asm__ __volatile__ (
+            : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l),
+              [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l),
+              [p0_l] "r"(p0_l), [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4),
+              [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0));
+
+        __asm__ __volatile__(
             "sb         %[q0_l],  +2(%[sq0])    \n\t"
             "sb         %[q1_l],  +2(%[sq1])    \n\t"
             "sb         %[q2_l],  +2(%[sq2])    \n\t"
@@ -641,14 +580,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q6_l],  +2(%[sq6])    \n\t"
 
             :
-            : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-              [q3_l] "r" (q3_l), [q4_l] "r" (q4_l), [q5_l] "r" (q5_l),
-              [q6_l] "r" (q6_l), [sq0] "r" (sq0), [sq1] "r" (sq1),
-              [sq2] "r" (sq2), [sq3] "r" (sq3),
-              [sq4] "r" (sq4), [sq5] "r" (sq5), [sq6] "r" (sq6)
-        );
+            : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l),
+              [q6_l] "r"(q6_l), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2),
+              [sq3] "r"(sq3), [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6));
       } else if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p2_l_f1],  +2(%[sp2])    \n\t"
             "sb         %[p1_l_f1],  +2(%[sp1])    \n\t"
             "sb         %[p0_l_f1],  +2(%[sp0])    \n\t"
@@ -657,27 +594,25 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb         %[q2_l_f1],  +2(%[sq2])    \n\t"
 
             :
-            : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1),
-              [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1),
-              [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1),
+              [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1),
+              [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [sp2] "r"(sp2),
+              [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1),
+              [sq2] "r"(sq2));
       } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  +2(%[sp1])    \n\t"
             "sb         %[p0_f0],  +2(%[sp0])    \n\t"
             "sb         %[q0_f0],  +2(%[sq0])    \n\t"
             "sb         %[q1_f0],  +2(%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), [q0_f0] "r" (q0_f0),
-              [q1_f0] "r" (q1_f0), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p6_l],    %[p6_l],    16   \n\t"
           "srl      %[p5_l],    %[p5_l],    16   \n\t"
           "srl      %[p4_l],    %[p4_l],    16   \n\t"
@@ -693,15 +628,14 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
           "srl      %[q5_l],    %[q5_l],    16   \n\t"
           "srl      %[q6_l],    %[q6_l],    16   \n\t"
 
-          : [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
-            [q3_l] "+r" (q3_l), [q4_l] "+r" (q4_l), [q5_l] "+r" (q5_l),
-            [q6_l] "+r" (q6_l), [p6_l] "+r" (p6_l), [p5_l] "+r" (p5_l),
-            [p4_l] "+r" (p4_l), [p3_l] "+r" (p3_l), [p2_l] "+r" (p2_l),
-            [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l)
-          :
-      );
+          : [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
+            [q3_l] "+r"(q3_l), [q4_l] "+r"(q4_l), [q5_l] "+r"(q5_l),
+            [q6_l] "+r"(q6_l), [p6_l] "+r"(p6_l), [p5_l] "+r"(p5_l),
+            [p4_l] "+r"(p4_l), [p3_l] "+r"(p3_l), [p2_l] "+r"(p2_l),
+            [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l)
+          :);
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_l_f1],   %[p2_l_f1],   16   \n\t"
           "srl      %[p1_l_f1],   %[p1_l_f1],   16   \n\t"
           "srl      %[p0_l_f1],   %[p0_l_f1],   16   \n\t"
@@ -713,16 +647,15 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
           "srl      %[q0_f0],     %[q0_f0],     8    \n\t"
           "srl      %[q1_f0],     %[q1_f0],     8    \n\t"
 
-          : [p2_l_f1] "+r" (p2_l_f1), [p1_l_f1] "+r" (p1_l_f1),
-            [p0_l_f1] "+r" (p0_l_f1), [q0_l_f1] "+r" (q0_l_f1),
-            [q1_l_f1] "+r" (q1_l_f1), [q2_l_f1] "+r" (q2_l_f1),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_l_f1] "+r"(p2_l_f1), [p1_l_f1] "+r"(p1_l_f1),
+            [p0_l_f1] "+r"(p0_l_f1), [q0_l_f1] "+r"(q0_l_f1),
+            [q1_l_f1] "+r"(q1_l_f1), [q2_l_f1] "+r"(q2_l_f1),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & flat2 & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p6_l],    +3(%[sp6])    \n\t"
             "sb     %[p5_l],    +3(%[sp5])    \n\t"
             "sb     %[p4_l],    +3(%[sp4])    \n\t"
@@ -732,14 +665,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb     %[p0_l],    +3(%[sp0])    \n\t"
 
             :
-            : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l),
-              [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l),
-              [p0_l] "r" (p0_l), [sp6] "r" (sp6), [sp5] "r" (sp5),
-              [sp4] "r" (sp4), [sp3] "r" (sp3), [sp2] "r" (sp2),
-              [sp1] "r" (sp1), [sp0] "r" (sp0)
-        );
-
-        __asm__ __volatile__ (
+            : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l),
+              [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l),
+              [p0_l] "r"(p0_l), [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4),
+              [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0));
+
+        __asm__ __volatile__(
             "sb     %[q0_l],    +3(%[sq0])    \n\t"
             "sb     %[q1_l],    +3(%[sq1])    \n\t"
             "sb     %[q2_l],    +3(%[sq2])    \n\t"
@@ -749,15 +680,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb     %[q6_l],    +3(%[sq6])    \n\t"
 
             :
-            : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l),
-              [q2_l] "r" (q2_l), [q3_l] "r" (q3_l),
-              [q4_l] "r" (q4_l), [q5_l] "r" (q5_l),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2),
-              [sq3] "r" (sq3), [sq4] "r" (sq4), [sq5] "r" (sq5),
-              [q6_l] "r" (q6_l), [sq6] "r" (sq6)
-        );
+            : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l),
+              [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2), [sq3] "r"(sq3),
+              [sq4] "r"(sq4), [sq5] "r"(sq5), [q6_l] "r"(q6_l), [sq6] "r"(sq6));
       } else if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_l_f1],     +3(%[sp2])    \n\t"
             "sb     %[p1_l_f1],     +3(%[sp1])    \n\t"
             "sb     %[p0_l_f1],     +3(%[sp0])    \n\t"
@@ -766,25 +694,22 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s,
             "sb     %[q2_l_f1],     +3(%[sq2])    \n\t"
 
             :
-            : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1),
-              [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1),
-              [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1),
-              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
-        );
+            : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1),
+              [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1),
+              [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [sp2] "r"(sp2),
+              [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1),
+              [sq2] "r"(sq2));
       } else if (mask & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   +3(%[sp1])    \n\t"
             "sb     %[p0_f0],   +3(%[sp0])    \n\t"
             "sb     %[q0_f0],   +3(%[sq0])    \n\t"
             "sb     %[q1_f0],   +3(%[sq1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [sp1] "r" (sp1), [sp0] "r" (sp0),
-              [sq0] "r" (sq0), [sq1] "r" (sq1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0),
+              [sq0] "r"(sq0), [sq1] "r"(sq1));
       }
     }
 
diff --git a/vpx_dsp/mips/loopfilter_mb_vert_dspr2.c b/vpx_dsp/mips/loopfilter_mb_vert_dspr2.c
index e580f014e933aa845f51eabf78cebb11a348c860..96e8d8858a117bdc4c6f3b5ddac48e3cf0759ff9 100644
--- a/vpx_dsp/mips/loopfilter_mb_vert_dspr2.c
+++ b/vpx_dsp/mips/loopfilter_mb_vert_dspr2.c
@@ -19,40 +19,36 @@
 #include "vpx_mem/vpx_mem.h"
 
 #if HAVE_DSPR2
-void vpx_lpf_vertical_16_dspr2(uint8_t *s,
-                               int pitch,
-                               const uint8_t *blimit,
-                               const uint8_t *limit,
-                               const uint8_t *thresh) {
-  uint8_t   i;
-  uint32_t  mask, hev, flat, flat2;
-  uint8_t   *s1, *s2, *s3, *s4;
-  uint32_t  prim1, prim2, sec3, sec4, prim3, prim4;
-  uint32_t  thresh_vec, flimit_vec, limit_vec;
-  uint32_t  uflimit, ulimit, uthresh;
-  uint32_t  p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
-  uint32_t  p1_f0, p0_f0, q0_f0, q1_f0;
-  uint32_t  p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l;
-  uint32_t  q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l;
-  uint32_t  p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r;
-  uint32_t  q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r;
-  uint32_t  p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1;
-  uint32_t  q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1;
+void vpx_lpf_vertical_16_dspr2(uint8_t *s, int pitch, const uint8_t *blimit,
+                               const uint8_t *limit, const uint8_t *thresh) {
+  uint8_t i;
+  uint32_t mask, hev, flat, flat2;
+  uint8_t *s1, *s2, *s3, *s4;
+  uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+  uint32_t thresh_vec, flimit_vec, limit_vec;
+  uint32_t uflimit, ulimit, uthresh;
+  uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
+  uint32_t p1_f0, p0_f0, q0_f0, q1_f0;
+  uint32_t p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l;
+  uint32_t q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l;
+  uint32_t p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r;
+  uint32_t q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r;
+  uint32_t p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1;
+  uint32_t q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1;
 
   uflimit = *blimit;
   ulimit = *limit;
   uthresh = *thresh;
 
   /* create quad-byte */
-  __asm__ __volatile__ (
+  __asm__ __volatile__(
       "replv.qb     %[thresh_vec],     %[uthresh]    \n\t"
       "replv.qb     %[flimit_vec],     %[uflimit]    \n\t"
       "replv.qb     %[limit_vec],      %[ulimit]     \n\t"
 
-      : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec),
-        [limit_vec] "=r" (limit_vec)
-      : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit)
-  );
+      : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+        [limit_vec] "=r"(limit_vec)
+      : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit));
 
   prefetch_store(s + pitch);
 
@@ -61,9 +57,9 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
     s2 = s + pitch;
     s3 = s2 + pitch;
     s4 = s3 + pitch;
-    s  = s4 + pitch;
+    s = s4 + pitch;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lw     %[p0],  -4(%[s1])    \n\t"
         "lw     %[p1],  -4(%[s2])    \n\t"
         "lw     %[p2],  -4(%[s3])    \n\t"
@@ -73,13 +69,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
         "lw     %[p6],  -8(%[s3])    \n\t"
         "lw     %[p7],  -8(%[s4])    \n\t"
 
-        : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1),
-          [p0] "=&r" (p0), [p7] "=&r" (p7), [p6] "=&r" (p6),
-          [p5] "=&r" (p5), [p4] "=&r" (p4)
-        : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4)
-    );
+        : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0),
+          [p7] "=&r"(p7), [p6] "=&r"(p6), [p5] "=&r"(p5), [p4] "=&r"(p4)
+        : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lw     %[q3],  (%[s1])     \n\t"
         "lw     %[q2],  (%[s2])     \n\t"
         "lw     %[q1],  (%[s3])     \n\t"
@@ -89,11 +83,9 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
         "lw     %[q5],  +4(%[s3])   \n\t"
         "lw     %[q4],  +4(%[s4])   \n\t"
 
-        : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1),
-          [q0] "=&r" (q0), [q7] "=&r" (q7), [q6] "=&r" (q6),
-          [q5] "=&r" (q5), [q4] "=&r" (q4)
-        : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4)
-    );
+        : [q3] "=&r"(q3), [q2] "=&r"(q2), [q1] "=&r"(q1), [q0] "=&r"(q0),
+          [q7] "=&r"(q7), [q6] "=&r"(q6), [q5] "=&r"(q5), [q4] "=&r"(q4)
+        : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4));
 
     /* transpose p3, p2, p1, p0
        original (when loaded from memory)
@@ -110,7 +102,7 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
          p2         p3_1  p2_1  p1_1  p0_1
          p3         p3_0  p2_0  p1_0  p0_0
     */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "precrq.qb.ph   %[prim1],   %[p0],      %[p1]       \n\t"
         "precr.qb.ph    %[prim2],   %[p0],      %[p1]       \n\t"
         "precrq.qb.ph   %[prim3],   %[p2],      %[p3]       \n\t"
@@ -126,12 +118,10 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
         "append         %[p1],      %[sec3],    16          \n\t"
         "append         %[p3],      %[sec4],    16          \n\t"
 
-        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
-          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
-          [p0] "+r" (p0), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3),
-          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
-        :
-    );
+        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+          [prim4] "=&r"(prim4), [p0] "+r"(p0), [p1] "+r"(p1), [p2] "+r"(p2),
+          [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+        :);
 
     /* transpose q0, q1, q2, q3
        original (when loaded from memory)
@@ -148,7 +138,7 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
          q1         q0_1  q1_1  q2_1  q3_1
          q0         q0_0  q1_0  q2_0  q3_0
     */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "precrq.qb.ph   %[prim1],   %[q3],      %[q2]       \n\t"
         "precr.qb.ph    %[prim2],   %[q3],      %[q2]       \n\t"
         "precrq.qb.ph   %[prim3],   %[q1],      %[q0]       \n\t"
@@ -164,12 +154,10 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
         "append         %[q2],      %[sec3],    16          \n\t"
         "append         %[q0],      %[sec4],    16          \n\t"
 
-        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
-          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
-          [q3] "+r" (q3), [q2] "+r" (q2), [q1] "+r" (q1), [q0] "+r" (q0),
-          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
-        :
-    );
+        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+          [prim4] "=&r"(prim4), [q3] "+r"(q3), [q2] "+r"(q2), [q1] "+r"(q1),
+          [q0] "+r"(q0), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+        :);
 
     /* transpose p7, p6, p5, p4
        original (when loaded from memory)
@@ -186,7 +174,7 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
          p6         p7_1  p6_1  p5_1  p4_1
          p7         p7_0  p6_0  p5_0  p4_0
     */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "precrq.qb.ph   %[prim1],   %[p4],      %[p5]       \n\t"
         "precr.qb.ph    %[prim2],   %[p4],      %[p5]       \n\t"
         "precrq.qb.ph   %[prim3],   %[p6],      %[p7]       \n\t"
@@ -202,12 +190,10 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
         "append         %[p5],      %[sec3],    16          \n\t"
         "append         %[p7],      %[sec4],    16          \n\t"
 
-        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
-          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
-          [p4] "+r" (p4), [p5] "+r" (p5), [p6] "+r" (p6), [p7] "+r" (p7),
-          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
-        :
-    );
+        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+          [prim4] "=&r"(prim4), [p4] "+r"(p4), [p5] "+r"(p5), [p6] "+r"(p6),
+          [p7] "+r"(p7), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+        :);
 
     /* transpose q4, q5, q6, q7
        original (when loaded from memory)
@@ -224,7 +210,7 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
          q5         q4_1  q5_1  q26_1  q7_1
          q4         q4_0  q5_0  q26_0  q7_0
     */
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "precrq.qb.ph   %[prim1],   %[q7],      %[q6]       \n\t"
         "precr.qb.ph    %[prim2],   %[q7],      %[q6]       \n\t"
         "precrq.qb.ph   %[prim3],   %[q5],      %[q4]       \n\t"
@@ -240,71 +226,60 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
         "append         %[q6],      %[sec3],    16          \n\t"
         "append         %[q4],      %[sec4],    16          \n\t"
 
-        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
-          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
-          [q7] "+r" (q7), [q6] "+r" (q6), [q5] "+r" (q5), [q4] "+r" (q4),
-          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
-        :
-    );
+        : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+          [prim4] "=&r"(prim4), [q7] "+r"(q7), [q6] "+r"(q6), [q5] "+r"(q5),
+          [q4] "+r"(q4), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+        :);
 
-    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec,
-                                    p1, p0, p3, p2, q0, q1, q2, q3,
-                                    &hev, &mask, &flat);
+    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0,
+                                    p3, p2, q0, q1, q2, q3, &hev, &mask, &flat);
 
     flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2);
 
     /* f0 */
     if (((flat2 == 0) && (flat == 0) && (mask != 0)) ||
         ((flat2 != 0) && (flat == 0) && (mask != 0))) {
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
       STORE_F0()
     } else if ((flat2 == 0XFFFFFFFF) && (flat == 0xFFFFFFFF) &&
                (mask == 0xFFFFFFFF)) {
       /* f2 */
       PACK_LEFT_0TO3()
       PACK_LEFT_4TO7()
-      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l,
-                          &p3_l, &p2_l, &p1_l, &p0_l,
-                          &q0_l, &q1_l, &q2_l, &q3_l,
-                          &q4_l, &q5_l, &q6_l, &q7_l);
+      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l,
+                          &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l,
+                          &q6_l, &q7_l);
 
       PACK_RIGHT_0TO3()
       PACK_RIGHT_4TO7()
-      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r,
-                          &p3_r, &p2_r, &p1_r, &p0_r,
-                          &q0_r, &q1_r, &q2_r, &q3_r,
-                          &q4_r, &q5_r, &q6_r, &q7_r);
+      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r,
+                          &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r,
+                          &q6_r, &q7_r);
 
       STORE_F2()
     } else if ((flat2 == 0) && (flat == 0xFFFFFFFF) && (mask == 0xFFFFFFFF)) {
       /* f1 */
       PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
-                     &q0_l, &q1_l, &q2_l, &q3_l);
+      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
 
       PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
-                     &q0_r, &q1_r, &q2_r, &q3_r);
+      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
 
       STORE_F1()
     } else if ((flat2 == 0) && (flat != 0) && (mask != 0)) {
       /* f0 + f1 */
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
 
       /* left 2 element operation */
       PACK_LEFT_0TO3()
-      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
-                     &q0_l, &q1_l, &q2_l, &q3_l);
+      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l);
 
       /* right 2 element operation */
       PACK_RIGHT_0TO3()
-      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
-                     &q0_r, &q1_r, &q2_r, &q3_r);
+      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r);
 
       if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_r],    -3(%[s4])    \n\t"
             "sb     %[p1_r],    -2(%[s4])    \n\t"
             "sb     %[p0_r],    -1(%[s4])    \n\t"
@@ -313,25 +288,22 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q2_r],    +2(%[s4])    \n\t"
 
             :
-            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
-              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [s4] "r" (s4)
-        );
+            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
+              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [s4] "r"(s4));
       } else if (mask & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb         %[p1_f0],  -2(%[s4])    \n\t"
             "sb         %[p0_f0],  -1(%[s4])    \n\t"
             "sb         %[q0_f0],    (%[s4])    \n\t"
             "sb         %[q1_f0],  +1(%[s4])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s4] "r" (s4)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s4] "r"(s4));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_r],    %[p2_r],    16      \n\t"
           "srl      %[p1_r],    %[p1_r],    16      \n\t"
           "srl      %[p0_r],    %[p0_r],    16      \n\t"
@@ -343,15 +315,14 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
           "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
 
-          : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r),
-            [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r),
+            [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_r],    -3(%[s3])    \n\t"
             "sb     %[p1_r],    -2(%[s3])    \n\t"
             "sb     %[p0_r],    -1(%[s3])    \n\t"
@@ -360,64 +331,57 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q2_r],    +2(%[s3])    \n\t"
 
             :
-            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
-              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
-              [s3] "r" (s3)
-        );
+            : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r),
+              [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [s3] "r"(s3));
       } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   -2(%[s3])    \n\t"
             "sb     %[p0_f0],   -1(%[s3])    \n\t"
             "sb     %[q0_f0],     (%[s3])    \n\t"
             "sb     %[q1_f0],   +1(%[s3])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s3] "r" (s3)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s3] "r"(s3));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
           "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
           "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
 
-          : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__ (
-          "sb       %[p2_l],    -3(%[s2])    \n\t"
-          "sb       %[p1_l],    -2(%[s2])    \n\t"
-          "sb       %[p0_l],    -1(%[s2])    \n\t"
-          "sb       %[q0_l],      (%[s2])    \n\t"
-          "sb       %[q1_l],    +1(%[s2])    \n\t"
-          "sb       %[q2_l],    +2(%[s2])    \n\t"
-
-          :
-          : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
-            [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-            [s2] "r" (s2)
-        );
+        __asm__ __volatile__(
+            "sb       %[p2_l],    -3(%[s2])    \n\t"
+            "sb       %[p1_l],    -2(%[s2])    \n\t"
+            "sb       %[p0_l],    -1(%[s2])    \n\t"
+            "sb       %[q0_l],      (%[s2])    \n\t"
+            "sb       %[q1_l],    +1(%[s2])    \n\t"
+            "sb       %[q2_l],    +2(%[s2])    \n\t"
+
+            :
+            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
+              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [s2] "r"(s2));
       } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   -2(%[s2])    \n\t"
             "sb     %[p0_f0],   -1(%[s2])    \n\t"
             "sb     %[q0_f0],     (%[s2])    \n\t"
             "sb     %[q1_f0],   +1(%[s2])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s2] "r" (s2)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s2] "r"(s2));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_l],    %[p2_l],    16      \n\t"
           "srl      %[p1_l],    %[p1_l],    16      \n\t"
           "srl      %[p0_l],    %[p0_l],    16      \n\t"
@@ -429,15 +393,14 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
           "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
 
-          : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l),
-            [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l),
+            [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_l],    -3(%[s1])    \n\t"
             "sb     %[p1_l],    -2(%[s1])    \n\t"
             "sb     %[p0_l],    -1(%[s1])    \n\t"
@@ -446,54 +409,44 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q2_l],    +2(%[s1])    \n\t"
 
             :
-            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
-              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-              [s1] "r" (s1)
-        );
+            : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l),
+              [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [s1] "r"(s1));
       } else if (mask & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   -2(%[s1])    \n\t"
             "sb     %[p0_f0],   -1(%[s1])    \n\t"
             "sb     %[q0_f0],     (%[s1])    \n\t"
             "sb     %[q1_f0],   +1(%[s1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s1] "r" (s1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s1] "r"(s1));
       }
     } else if ((flat2 != 0) && (flat != 0) && (mask != 0)) {
       /* f0+f1+f2 */
-      filter1_dspr2(mask, hev, p1, p0, q0, q1,
-                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
+      filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0);
 
       PACK_LEFT_0TO3()
-      mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l,
-                      q0_l, q1_l, q2_l, q3_l,
-                      &p2_l_f1, &p1_l_f1, &p0_l_f1,
-                      &q0_l_f1, &q1_l_f1, &q2_l_f1);
+      mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, &p2_l_f1,
+                      &p1_l_f1, &p0_l_f1, &q0_l_f1, &q1_l_f1, &q2_l_f1);
 
       PACK_RIGHT_0TO3()
-      mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r,
-                      q0_r, q1_r, q2_r, q3_r,
-                      &p2_r_f1, &p1_r_f1, &p0_r_f1,
-                      &q0_r_f1, &q1_r_f1, &q2_r_f1);
+      mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, &p2_r_f1,
+                      &p1_r_f1, &p0_r_f1, &q0_r_f1, &q1_r_f1, &q2_r_f1);
 
       PACK_LEFT_4TO7()
-      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l,
-                          &p3_l, &p2_l, &p1_l, &p0_l,
-                          &q0_l, &q1_l, &q2_l, &q3_l,
-                          &q4_l, &q5_l, &q6_l, &q7_l);
+      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l,
+                          &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l,
+                          &q6_l, &q7_l);
 
       PACK_RIGHT_4TO7()
-      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r,
-                          &p3_r, &p2_r, &p1_r, &p0_r,
-                          &q0_r, &q1_r, &q2_r, &q3_r,
-                          &q4_r, &q5_r, &q6_r, &q7_r);
+      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r,
+                          &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r,
+                          &q6_r, &q7_r);
 
       if (mask & flat & flat2 & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p6_r],    -7(%[s4])    \n\t"
             "sb     %[p5_r],    -6(%[s4])    \n\t"
             "sb     %[p4_r],    -5(%[s4])    \n\t"
@@ -503,13 +456,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[p0_r],    -1(%[s4])    \n\t"
 
             :
-            : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r),
-              [p4_r] "r" (p4_r), [p3_r] "r" (p3_r),
-              [p2_r] "r" (p2_r), [p1_r] "r" (p1_r),
-              [p0_r] "r" (p0_r), [s4] "r" (s4)
-        );
+            : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r),
+              [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r),
+              [p0_r] "r"(p0_r), [s4] "r"(s4));
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[q0_r],      (%[s4])    \n\t"
             "sb     %[q1_r],    +1(%[s4])    \n\t"
             "sb     %[q2_r],    +2(%[s4])    \n\t"
@@ -519,13 +470,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q6_r],    +6(%[s4])    \n\t"
 
             :
-            : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r),
-              [q2_r] "r" (q2_r), [q3_r] "r" (q3_r),
-              [q4_r] "r" (q4_r), [q5_r] "r" (q5_r),
-              [q6_r] "r" (q6_r), [s4] "r" (s4)
-        );
+            : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r),
+              [q6_r] "r"(q6_r), [s4] "r"(s4));
       } else if (mask & flat & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_r_f1],     -3(%[s4])    \n\t"
             "sb     %[p1_r_f1],     -2(%[s4])    \n\t"
             "sb     %[p0_r_f1],     -1(%[s4])    \n\t"
@@ -534,26 +483,22 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q2_r_f1],     +2(%[s4])    \n\t"
 
             :
-            : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1),
-              [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1),
-              [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1),
-              [s4] "r" (s4)
-        );
+            : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1),
+              [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1),
+              [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [s4] "r"(s4));
       } else if (mask & 0x000000FF) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   -2(%[s4])    \n\t"
             "sb     %[p0_f0],   -1(%[s4])    \n\t"
             "sb     %[q0_f0],     (%[s4])    \n\t"
             "sb     %[q1_f0],   +1(%[s4])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s4] "r" (s4)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s4] "r"(s4));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p6_r],        %[p6_r],        16     \n\t"
           "srl      %[p5_r],        %[p5_r],        16     \n\t"
           "srl      %[p4_r],        %[p4_r],        16     \n\t"
@@ -569,17 +514,14 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
           "srl      %[q5_r],        %[q5_r],        16     \n\t"
           "srl      %[q6_r],        %[q6_r],        16     \n\t"
 
-          : [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r),
-            [q2_r] "+r" (q2_r), [q3_r] "+r" (q3_r),
-            [q4_r] "+r" (q4_r), [q5_r] "+r" (q5_r),
-            [q6_r] "+r" (q6_r), [p6_r] "+r" (p6_r),
-            [p5_r] "+r" (p5_r), [p4_r] "+r" (p4_r),
-            [p3_r] "+r" (p3_r), [p2_r] "+r" (p2_r),
-            [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r)
-          :
-      );
-
-      __asm__ __volatile__ (
+          : [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r),
+            [q3_r] "+r"(q3_r), [q4_r] "+r"(q4_r), [q5_r] "+r"(q5_r),
+            [q6_r] "+r"(q6_r), [p6_r] "+r"(p6_r), [p5_r] "+r"(p5_r),
+            [p4_r] "+r"(p4_r), [p3_r] "+r"(p3_r), [p2_r] "+r"(p2_r),
+            [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r)
+          :);
+
+      __asm__ __volatile__(
           "srl      %[p2_r_f1],     %[p2_r_f1],     16      \n\t"
           "srl      %[p1_r_f1],     %[p1_r_f1],     16      \n\t"
           "srl      %[p0_r_f1],     %[p0_r_f1],     16      \n\t"
@@ -591,16 +533,15 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
           "srl      %[q0_f0],       %[q0_f0],       8       \n\t"
           "srl      %[q1_f0],       %[q1_f0],       8       \n\t"
 
-          : [p2_r_f1] "+r" (p2_r_f1), [p1_r_f1] "+r" (p1_r_f1),
-            [p0_r_f1] "+r" (p0_r_f1), [q0_r_f1] "+r" (q0_r_f1),
-            [q1_r_f1] "+r" (q1_r_f1), [q2_r_f1] "+r" (q2_r_f1),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_r_f1] "+r"(p2_r_f1), [p1_r_f1] "+r"(p1_r_f1),
+            [p0_r_f1] "+r"(p0_r_f1), [q0_r_f1] "+r"(q0_r_f1),
+            [q1_r_f1] "+r"(q1_r_f1), [q2_r_f1] "+r"(q2_r_f1),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & flat2 & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p6_r],    -7(%[s3])    \n\t"
             "sb     %[p5_r],    -6(%[s3])    \n\t"
             "sb     %[p4_r],    -5(%[s3])    \n\t"
@@ -610,12 +551,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[p0_r],    -1(%[s3])    \n\t"
 
             :
-            : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r), [p4_r] "r" (p4_r),
-              [p3_r] "r" (p3_r), [p2_r] "r" (p2_r), [p1_r] "r" (p1_r),
-              [p0_r] "r" (p0_r), [s3] "r" (s3)
-        );
+            : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r),
+              [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r),
+              [p0_r] "r"(p0_r), [s3] "r"(s3));
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[q0_r],      (%[s3])    \n\t"
             "sb     %[q1_r],    +1(%[s3])    \n\t"
             "sb     %[q2_r],    +2(%[s3])    \n\t"
@@ -625,13 +565,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q6_r],    +6(%[s3])    \n\t"
 
             :
-            : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r),
-              [q2_r] "r" (q2_r), [q3_r] "r" (q3_r),
-              [q4_r] "r" (q4_r), [q5_r] "r" (q5_r),
-              [q6_r] "r" (q6_r), [s3] "r" (s3)
-        );
+            : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r),
+              [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r),
+              [q6_r] "r"(q6_r), [s3] "r"(s3));
       } else if (mask & flat & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_r_f1],     -3(%[s3])    \n\t"
             "sb     %[p1_r_f1],     -2(%[s3])    \n\t"
             "sb     %[p0_r_f1],     -1(%[s3])    \n\t"
@@ -640,38 +578,33 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q2_r_f1],     +2(%[s3])    \n\t"
 
             :
-            : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1),
-              [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1),
-              [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1),
-              [s3] "r" (s3)
-        );
+            : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1),
+              [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1),
+              [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [s3] "r"(s3));
       } else if (mask & 0x0000FF00) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   -2(%[s3])    \n\t"
             "sb     %[p0_f0],   -1(%[s3])    \n\t"
             "sb     %[q0_f0],     (%[s3])    \n\t"
             "sb     %[q1_f0],   +1(%[s3])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s3] "r" (s3)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s3] "r"(s3));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
           "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
           "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
           "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
 
-          : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & flat2 & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p6_l],    -7(%[s2])    \n\t"
             "sb     %[p5_l],    -6(%[s2])    \n\t"
             "sb     %[p4_l],    -5(%[s2])    \n\t"
@@ -681,12 +614,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[p0_l],    -1(%[s2])    \n\t"
 
             :
-            : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l),
-              [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l),
-              [p0_l] "r" (p0_l), [s2] "r" (s2)
-        );
+            : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l),
+              [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l),
+              [p0_l] "r"(p0_l), [s2] "r"(s2));
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[q0_l],      (%[s2])    \n\t"
             "sb     %[q1_l],    +1(%[s2])    \n\t"
             "sb     %[q2_l],    +2(%[s2])    \n\t"
@@ -696,12 +628,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q6_l],    +6(%[s2])    \n\t"
 
             :
-            : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-              [q3_l] "r" (q3_l), [q4_l] "r" (q4_l), [q5_l] "r" (q5_l),
-              [q6_l] "r" (q6_l), [s2] "r" (s2)
-        );
+            : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l),
+              [q6_l] "r"(q6_l), [s2] "r"(s2));
       } else if (mask & flat & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_l_f1],     -3(%[s2])    \n\t"
             "sb     %[p1_l_f1],     -2(%[s2])    \n\t"
             "sb     %[p0_l_f1],     -1(%[s2])    \n\t"
@@ -710,26 +641,22 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q2_l_f1],     +2(%[s2])    \n\t"
 
             :
-            : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1),
-              [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1),
-              [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1),
-              [s2] "r" (s2)
-        );
+            : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1),
+              [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1),
+              [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [s2] "r"(s2));
       } else if (mask & 0x00FF0000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   -2(%[s2])    \n\t"
             "sb     %[p0_f0],   -1(%[s2])    \n\t"
             "sb     %[q0_f0],     (%[s2])    \n\t"
             "sb     %[q1_f0],   +1(%[s2])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s2] "r" (s2)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s2] "r"(s2));
       }
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p6_l],        %[p6_l],        16     \n\t"
           "srl      %[p5_l],        %[p5_l],        16     \n\t"
           "srl      %[p4_l],        %[p4_l],        16     \n\t"
@@ -745,15 +672,14 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
           "srl      %[q5_l],        %[q5_l],        16     \n\t"
           "srl      %[q6_l],        %[q6_l],        16     \n\t"
 
-          : [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
-            [q3_l] "+r" (q3_l), [q4_l] "+r" (q4_l), [q5_l] "+r" (q5_l),
-            [q6_l] "+r" (q6_l), [p6_l] "+r" (p6_l), [p5_l] "+r" (p5_l),
-            [p4_l] "+r" (p4_l), [p3_l] "+r" (p3_l), [p2_l] "+r" (p2_l),
-            [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l)
-          :
-      );
+          : [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l),
+            [q3_l] "+r"(q3_l), [q4_l] "+r"(q4_l), [q5_l] "+r"(q5_l),
+            [q6_l] "+r"(q6_l), [p6_l] "+r"(p6_l), [p5_l] "+r"(p5_l),
+            [p4_l] "+r"(p4_l), [p3_l] "+r"(p3_l), [p2_l] "+r"(p2_l),
+            [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l)
+          :);
 
-      __asm__ __volatile__ (
+      __asm__ __volatile__(
           "srl      %[p2_l_f1],     %[p2_l_f1],     16      \n\t"
           "srl      %[p1_l_f1],     %[p1_l_f1],     16      \n\t"
           "srl      %[p0_l_f1],     %[p0_l_f1],     16      \n\t"
@@ -765,16 +691,15 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
           "srl      %[q0_f0],       %[q0_f0],       8       \n\t"
           "srl      %[q1_f0],       %[q1_f0],       8       \n\t"
 
-          : [p2_l_f1] "+r" (p2_l_f1), [p1_l_f1] "+r" (p1_l_f1),
-            [p0_l_f1] "+r" (p0_l_f1), [q0_l_f1] "+r" (q0_l_f1),
-            [q1_l_f1] "+r" (q1_l_f1), [q2_l_f1] "+r" (q2_l_f1),
-            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
-            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
-          :
-      );
+          : [p2_l_f1] "+r"(p2_l_f1), [p1_l_f1] "+r"(p1_l_f1),
+            [p0_l_f1] "+r"(p0_l_f1), [q0_l_f1] "+r"(q0_l_f1),
+            [q1_l_f1] "+r"(q1_l_f1), [q2_l_f1] "+r"(q2_l_f1),
+            [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0),
+            [q1_f0] "+r"(q1_f0)
+          :);
 
       if (mask & flat & flat2 & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p6_l],    -7(%[s1])    \n\t"
             "sb     %[p5_l],    -6(%[s1])    \n\t"
             "sb     %[p4_l],    -5(%[s1])    \n\t"
@@ -784,13 +709,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[p0_l],    -1(%[s1])    \n\t"
 
             :
-            : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l),
-              [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l),
-              [p0_l] "r" (p0_l),
-              [s1] "r" (s1)
-        );
+            : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l),
+              [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l),
+              [p0_l] "r"(p0_l), [s1] "r"(s1));
 
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[q0_l],     (%[s1])    \n\t"
             "sb     %[q1_l],    1(%[s1])    \n\t"
             "sb     %[q2_l],    2(%[s1])    \n\t"
@@ -800,13 +723,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q6_l],    6(%[s1])    \n\t"
 
             :
-            : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
-              [q3_l] "r" (q3_l), [q4_l] "r" (q4_l), [q5_l] "r" (q5_l),
-              [q6_l] "r" (q6_l),
-              [s1] "r" (s1)
-        );
+            : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l),
+              [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l),
+              [q6_l] "r"(q6_l), [s1] "r"(s1));
       } else if (mask & flat & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p2_l_f1],     -3(%[s1])    \n\t"
             "sb     %[p1_l_f1],     -2(%[s1])    \n\t"
             "sb     %[p0_l_f1],     -1(%[s1])    \n\t"
@@ -815,23 +736,19 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s,
             "sb     %[q2_l_f1],     +2(%[s1])    \n\t"
 
             :
-            : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1),
-              [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1),
-              [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1),
-              [s1] "r" (s1)
-        );
+            : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1),
+              [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1),
+              [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [s1] "r"(s1));
       } else if (mask & 0xFF000000) {
-        __asm__ __volatile__ (
+        __asm__ __volatile__(
             "sb     %[p1_f0],   -2(%[s1])    \n\t"
             "sb     %[p0_f0],   -1(%[s1])    \n\t"
             "sb     %[q0_f0],     (%[s1])    \n\t"
             "sb     %[q1_f0],   +1(%[s1])    \n\t"
 
             :
-            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
-              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
-              [s1] "r" (s1)
-        );
+            : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0),
+              [q1_f0] "r"(q1_f0), [s1] "r"(s1));
       }
     }
   }
diff --git a/vpx_dsp/mips/loopfilter_msa.h b/vpx_dsp/mips/loopfilter_msa.h
index 9894701bf7b8fa18aa82beaaff8966f78772e508..d3c2bd4edb1230bc33f1874192233ef197006967 100644
--- a/vpx_dsp/mips/loopfilter_msa.h
+++ b/vpx_dsp/mips/loopfilter_msa.h
@@ -13,234 +13,238 @@
 
 #include "vpx_dsp/mips/macros_msa.h"
 
-#define VPX_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in,  \
-                           p1_out, p0_out, q0_out, q1_out) {             \
-  v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign;                    \
-  v16i8 filt, filt1, filt2, cnst4b, cnst3b;                              \
-  v8i16 q0_sub_p0_r, filt_r, cnst3h;                                     \
-                                                                         \
-  p1_m = (v16i8)__msa_xori_b(p1_in, 0x80);                               \
-  p0_m = (v16i8)__msa_xori_b(p0_in, 0x80);                               \
-  q0_m = (v16i8)__msa_xori_b(q0_in, 0x80);                               \
-  q1_m = (v16i8)__msa_xori_b(q1_in, 0x80);                               \
-                                                                         \
-  filt = __msa_subs_s_b(p1_m, q1_m);                                     \
-  filt = filt & (v16i8)hev_in;                                           \
-  q0_sub_p0 = q0_m - p0_m;                                               \
-  filt_sign = __msa_clti_s_b(filt, 0);                                   \
-                                                                         \
-  cnst3h = __msa_ldi_h(3);                                               \
-  q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0);               \
-  q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h);       \
-  filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt);                         \
-  filt_r += q0_sub_p0_r;                                                 \
-  filt_r = __msa_sat_s_h(filt_r, 7);                                     \
-                                                                         \
-  /* combine left and right part */                                      \
-  filt = __msa_pckev_b((v16i8)filt_r, (v16i8)filt_r);                    \
-                                                                         \
-  filt = filt & (v16i8)mask_in;                                          \
-  cnst4b = __msa_ldi_b(4);                                               \
-  filt1 = __msa_adds_s_b(filt, cnst4b);                                  \
-  filt1 >>= 3;                                                           \
-                                                                         \
-  cnst3b = __msa_ldi_b(3);                                               \
-  filt2 = __msa_adds_s_b(filt, cnst3b);                                  \
-  filt2 >>= 3;                                                           \
-                                                                         \
-  q0_m = __msa_subs_s_b(q0_m, filt1);                                    \
-  q0_out = __msa_xori_b((v16u8)q0_m, 0x80);                              \
-  p0_m = __msa_adds_s_b(p0_m, filt2);                                    \
-  p0_out = __msa_xori_b((v16u8)p0_m, 0x80);                              \
-                                                                         \
-  filt = __msa_srari_b(filt1, 1);                                        \
-  hev_in = __msa_xori_b((v16u8)hev_in, 0xff);                            \
-  filt = filt & (v16i8)hev_in;                                           \
-                                                                         \
-  q1_m = __msa_subs_s_b(q1_m, filt);                                     \
-  q1_out = __msa_xori_b((v16u8)q1_m, 0x80);                              \
-  p1_m = __msa_adds_s_b(p1_m, filt);                                     \
-  p1_out = __msa_xori_b((v16u8)p1_m, 0x80);                              \
-}
+#define VPX_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
+                           p1_out, p0_out, q0_out, q1_out)              \
+  {                                                                     \
+    v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign;                 \
+    v16i8 filt, filt1, filt2, cnst4b, cnst3b;                           \
+    v8i16 q0_sub_p0_r, filt_r, cnst3h;                                  \
+                                                                        \
+    p1_m = (v16i8)__msa_xori_b(p1_in, 0x80);                            \
+    p0_m = (v16i8)__msa_xori_b(p0_in, 0x80);                            \
+    q0_m = (v16i8)__msa_xori_b(q0_in, 0x80);                            \
+    q1_m = (v16i8)__msa_xori_b(q1_in, 0x80);                            \
+                                                                        \
+    filt = __msa_subs_s_b(p1_m, q1_m);                                  \
+    filt = filt & (v16i8)hev_in;                                        \
+    q0_sub_p0 = q0_m - p0_m;                                            \
+    filt_sign = __msa_clti_s_b(filt, 0);                                \
+                                                                        \
+    cnst3h = __msa_ldi_h(3);                                            \
+    q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0);            \
+    q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h);    \
+    filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt);                      \
+    filt_r += q0_sub_p0_r;                                              \
+    filt_r = __msa_sat_s_h(filt_r, 7);                                  \
+                                                                        \
+    /* combine left and right part */                                   \
+    filt = __msa_pckev_b((v16i8)filt_r, (v16i8)filt_r);                 \
+                                                                        \
+    filt = filt & (v16i8)mask_in;                                       \
+    cnst4b = __msa_ldi_b(4);                                            \
+    filt1 = __msa_adds_s_b(filt, cnst4b);                               \
+    filt1 >>= 3;                                                        \
+                                                                        \
+    cnst3b = __msa_ldi_b(3);                                            \
+    filt2 = __msa_adds_s_b(filt, cnst3b);                               \
+    filt2 >>= 3;                                                        \
+                                                                        \
+    q0_m = __msa_subs_s_b(q0_m, filt1);                                 \
+    q0_out = __msa_xori_b((v16u8)q0_m, 0x80);                           \
+    p0_m = __msa_adds_s_b(p0_m, filt2);                                 \
+    p0_out = __msa_xori_b((v16u8)p0_m, 0x80);                           \
+                                                                        \
+    filt = __msa_srari_b(filt1, 1);                                     \
+    hev_in = __msa_xori_b((v16u8)hev_in, 0xff);                         \
+    filt = filt & (v16i8)hev_in;                                        \
+                                                                        \
+    q1_m = __msa_subs_s_b(q1_m, filt);                                  \
+    q1_out = __msa_xori_b((v16u8)q1_m, 0x80);                           \
+    p1_m = __msa_adds_s_b(p1_m, filt);                                  \
+    p1_out = __msa_xori_b((v16u8)p1_m, 0x80);                           \
+  }
 
-#define VPX_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in,  \
-                           p1_out, p0_out, q0_out, q1_out) {             \
-  v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign;                    \
-  v16i8 filt, filt1, filt2, cnst4b, cnst3b;                              \
-  v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h;                \
-                                                                         \
-  p1_m = (v16i8)__msa_xori_b(p1_in, 0x80);                               \
-  p0_m = (v16i8)__msa_xori_b(p0_in, 0x80);                               \
-  q0_m = (v16i8)__msa_xori_b(q0_in, 0x80);                               \
-  q1_m = (v16i8)__msa_xori_b(q1_in, 0x80);                               \
-                                                                         \
-  filt = __msa_subs_s_b(p1_m, q1_m);                                     \
-                                                                         \
-  filt = filt & (v16i8)hev_in;                                           \
-                                                                         \
-  q0_sub_p0 = q0_m - p0_m;                                               \
-  filt_sign = __msa_clti_s_b(filt, 0);                                   \
-                                                                         \
-  cnst3h = __msa_ldi_h(3);                                               \
-  q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0);               \
-  q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h);       \
-  filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt);                         \
-  filt_r += q0_sub_p0_r;                                                 \
-  filt_r = __msa_sat_s_h(filt_r, 7);                                     \
-                                                                         \
-  q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0, q0_sub_p0);               \
-  q0_sub_p0_l = __msa_dotp_s_h((v16i8)q0_sub_p0_l, (v16i8)cnst3h);       \
-  filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt);                         \
-  filt_l += q0_sub_p0_l;                                                 \
-  filt_l = __msa_sat_s_h(filt_l, 7);                                     \
-                                                                         \
-  filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r);                    \
-  filt = filt & (v16i8)mask_in;                                          \
-                                                                         \
-  cnst4b = __msa_ldi_b(4);                                               \
-  filt1 = __msa_adds_s_b(filt, cnst4b);                                  \
-  filt1 >>= 3;                                                           \
-                                                                         \
-  cnst3b = __msa_ldi_b(3);                                               \
-  filt2 = __msa_adds_s_b(filt, cnst3b);                                  \
-  filt2 >>= 3;                                                           \
-                                                                         \
-  q0_m = __msa_subs_s_b(q0_m, filt1);                                    \
-  q0_out = __msa_xori_b((v16u8)q0_m, 0x80);                              \
-  p0_m = __msa_adds_s_b(p0_m, filt2);                                    \
-  p0_out = __msa_xori_b((v16u8)p0_m, 0x80);                              \
-                                                                         \
-  filt = __msa_srari_b(filt1, 1);                                        \
-  hev_in = __msa_xori_b((v16u8)hev_in, 0xff);                            \
-  filt = filt & (v16i8)hev_in;                                           \
-                                                                         \
-  q1_m = __msa_subs_s_b(q1_m, filt);                                     \
-  q1_out = __msa_xori_b((v16u8)q1_m, 0x80);                              \
-  p1_m = __msa_adds_s_b(p1_m, filt);                                     \
-  p1_out = __msa_xori_b((v16u8)p1_m, 0x80);                              \
-}
+#define VPX_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \
+                           p1_out, p0_out, q0_out, q1_out)              \
+  {                                                                     \
+    v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign;                 \
+    v16i8 filt, filt1, filt2, cnst4b, cnst3b;                           \
+    v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h;             \
+                                                                        \
+    p1_m = (v16i8)__msa_xori_b(p1_in, 0x80);                            \
+    p0_m = (v16i8)__msa_xori_b(p0_in, 0x80);                            \
+    q0_m = (v16i8)__msa_xori_b(q0_in, 0x80);                            \
+    q1_m = (v16i8)__msa_xori_b(q1_in, 0x80);                            \
+                                                                        \
+    filt = __msa_subs_s_b(p1_m, q1_m);                                  \
+                                                                        \
+    filt = filt & (v16i8)hev_in;                                        \
+                                                                        \
+    q0_sub_p0 = q0_m - p0_m;                                            \
+    filt_sign = __msa_clti_s_b(filt, 0);                                \
+                                                                        \
+    cnst3h = __msa_ldi_h(3);                                            \
+    q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0);            \
+    q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h);    \
+    filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt);                      \
+    filt_r += q0_sub_p0_r;                                              \
+    filt_r = __msa_sat_s_h(filt_r, 7);                                  \
+                                                                        \
+    q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0, q0_sub_p0);            \
+    q0_sub_p0_l = __msa_dotp_s_h((v16i8)q0_sub_p0_l, (v16i8)cnst3h);    \
+    filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt);                      \
+    filt_l += q0_sub_p0_l;                                              \
+    filt_l = __msa_sat_s_h(filt_l, 7);                                  \
+                                                                        \
+    filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r);                 \
+    filt = filt & (v16i8)mask_in;                                       \
+                                                                        \
+    cnst4b = __msa_ldi_b(4);                                            \
+    filt1 = __msa_adds_s_b(filt, cnst4b);                               \
+    filt1 >>= 3;                                                        \
+                                                                        \
+    cnst3b = __msa_ldi_b(3);                                            \
+    filt2 = __msa_adds_s_b(filt, cnst3b);                               \
+    filt2 >>= 3;                                                        \
+                                                                        \
+    q0_m = __msa_subs_s_b(q0_m, filt1);                                 \
+    q0_out = __msa_xori_b((v16u8)q0_m, 0x80);                           \
+    p0_m = __msa_adds_s_b(p0_m, filt2);                                 \
+    p0_out = __msa_xori_b((v16u8)p0_m, 0x80);                           \
+                                                                        \
+    filt = __msa_srari_b(filt1, 1);                                     \
+    hev_in = __msa_xori_b((v16u8)hev_in, 0xff);                         \
+    filt = filt & (v16i8)hev_in;                                        \
+                                                                        \
+    q1_m = __msa_subs_s_b(q1_m, filt);                                  \
+    q1_out = __msa_xori_b((v16u8)q1_m, 0x80);                           \
+    p1_m = __msa_adds_s_b(p1_m, filt);                                  \
+    p1_out = __msa_xori_b((v16u8)p1_m, 0x80);                           \
+  }
 
-#define VPX_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) {  \
-  v16u8 tmp, p2_a_sub_p0, q2_a_sub_q0, p3_a_sub_p0, q3_a_sub_q0;         \
-  v16u8 zero_in = { 0 };                                                 \
-                                                                         \
-  tmp = __msa_ori_b(zero_in, 1);                                         \
-  p2_a_sub_p0 = __msa_asub_u_b(p2_in, p0_in);                            \
-  q2_a_sub_q0 = __msa_asub_u_b(q2_in, q0_in);                            \
-  p3_a_sub_p0 = __msa_asub_u_b(p3_in, p0_in);                            \
-  q3_a_sub_q0 = __msa_asub_u_b(q3_in, q0_in);                            \
-                                                                         \
-  p2_a_sub_p0 = __msa_max_u_b(p2_a_sub_p0, q2_a_sub_q0);                 \
-  flat_out = __msa_max_u_b(p2_a_sub_p0, flat_out);                       \
-  p3_a_sub_p0 = __msa_max_u_b(p3_a_sub_p0, q3_a_sub_q0);                 \
-  flat_out = __msa_max_u_b(p3_a_sub_p0, flat_out);                       \
-                                                                         \
-  flat_out = (tmp < (v16u8)flat_out);                                    \
-  flat_out = __msa_xori_b(flat_out, 0xff);                               \
-  flat_out = flat_out & (mask);                                          \
-}
+#define VPX_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) \
+  {                                                                   \
+    v16u8 tmp, p2_a_sub_p0, q2_a_sub_q0, p3_a_sub_p0, q3_a_sub_q0;    \
+    v16u8 zero_in = { 0 };                                            \
+                                                                      \
+    tmp = __msa_ori_b(zero_in, 1);                                    \
+    p2_a_sub_p0 = __msa_asub_u_b(p2_in, p0_in);                       \
+    q2_a_sub_q0 = __msa_asub_u_b(q2_in, q0_in);                       \
+    p3_a_sub_p0 = __msa_asub_u_b(p3_in, p0_in);                       \
+    q3_a_sub_q0 = __msa_asub_u_b(q3_in, q0_in);                       \
+                                                                      \
+    p2_a_sub_p0 = __msa_max_u_b(p2_a_sub_p0, q2_a_sub_q0);            \
+    flat_out = __msa_max_u_b(p2_a_sub_p0, flat_out);                  \
+    p3_a_sub_p0 = __msa_max_u_b(p3_a_sub_p0, q3_a_sub_q0);            \
+    flat_out = __msa_max_u_b(p3_a_sub_p0, flat_out);                  \
+                                                                      \
+    flat_out = (tmp < (v16u8)flat_out);                               \
+    flat_out = __msa_xori_b(flat_out, 0xff);                          \
+    flat_out = flat_out & (mask);                                     \
+  }
 
-#define VPX_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in,  \
-                  q5_in, q6_in, q7_in, flat_in, flat2_out) {        \
-  v16u8 tmp, zero_in = { 0 };                                       \
-  v16u8 p4_a_sub_p0, q4_a_sub_q0, p5_a_sub_p0, q5_a_sub_q0;         \
-  v16u8 p6_a_sub_p0, q6_a_sub_q0, p7_a_sub_p0, q7_a_sub_q0;         \
-                                                                    \
-  tmp = __msa_ori_b(zero_in, 1);                                    \
-  p4_a_sub_p0 = __msa_asub_u_b(p4_in, p0_in);                       \
-  q4_a_sub_q0 = __msa_asub_u_b(q4_in, q0_in);                       \
-  p5_a_sub_p0 = __msa_asub_u_b(p5_in, p0_in);                       \
-  q5_a_sub_q0 = __msa_asub_u_b(q5_in, q0_in);                       \
-  p6_a_sub_p0 = __msa_asub_u_b(p6_in, p0_in);                       \
-  q6_a_sub_q0 = __msa_asub_u_b(q6_in, q0_in);                       \
-  p7_a_sub_p0 = __msa_asub_u_b(p7_in, p0_in);                       \
-  q7_a_sub_q0 = __msa_asub_u_b(q7_in, q0_in);                       \
-                                                                    \
-  p4_a_sub_p0 = __msa_max_u_b(p4_a_sub_p0, q4_a_sub_q0);            \
-  flat2_out = __msa_max_u_b(p5_a_sub_p0, q5_a_sub_q0);              \
-  flat2_out = __msa_max_u_b(p4_a_sub_p0, flat2_out);                \
-  p6_a_sub_p0 = __msa_max_u_b(p6_a_sub_p0, q6_a_sub_q0);            \
-  flat2_out = __msa_max_u_b(p6_a_sub_p0, flat2_out);                \
-  p7_a_sub_p0 = __msa_max_u_b(p7_a_sub_p0, q7_a_sub_q0);            \
-  flat2_out = __msa_max_u_b(p7_a_sub_p0, flat2_out);                \
-                                                                    \
-  flat2_out = (tmp < (v16u8)flat2_out);                             \
-  flat2_out = __msa_xori_b(flat2_out, 0xff);                        \
-  flat2_out = flat2_out & flat_in;                                  \
-}
+#define VPX_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in, q5_in, \
+                  q6_in, q7_in, flat_in, flat2_out)                       \
+  {                                                                       \
+    v16u8 tmp, zero_in = { 0 };                                           \
+    v16u8 p4_a_sub_p0, q4_a_sub_q0, p5_a_sub_p0, q5_a_sub_q0;             \
+    v16u8 p6_a_sub_p0, q6_a_sub_q0, p7_a_sub_p0, q7_a_sub_q0;             \
+                                                                          \
+    tmp = __msa_ori_b(zero_in, 1);                                        \
+    p4_a_sub_p0 = __msa_asub_u_b(p4_in, p0_in);                           \
+    q4_a_sub_q0 = __msa_asub_u_b(q4_in, q0_in);                           \
+    p5_a_sub_p0 = __msa_asub_u_b(p5_in, p0_in);                           \
+    q5_a_sub_q0 = __msa_asub_u_b(q5_in, q0_in);                           \
+    p6_a_sub_p0 = __msa_asub_u_b(p6_in, p0_in);                           \
+    q6_a_sub_q0 = __msa_asub_u_b(q6_in, q0_in);                           \
+    p7_a_sub_p0 = __msa_asub_u_b(p7_in, p0_in);                           \
+    q7_a_sub_q0 = __msa_asub_u_b(q7_in, q0_in);                           \
+                                                                          \
+    p4_a_sub_p0 = __msa_max_u_b(p4_a_sub_p0, q4_a_sub_q0);                \
+    flat2_out = __msa_max_u_b(p5_a_sub_p0, q5_a_sub_q0);                  \
+    flat2_out = __msa_max_u_b(p4_a_sub_p0, flat2_out);                    \
+    p6_a_sub_p0 = __msa_max_u_b(p6_a_sub_p0, q6_a_sub_q0);                \
+    flat2_out = __msa_max_u_b(p6_a_sub_p0, flat2_out);                    \
+    p7_a_sub_p0 = __msa_max_u_b(p7_a_sub_p0, q7_a_sub_q0);                \
+    flat2_out = __msa_max_u_b(p7_a_sub_p0, flat2_out);                    \
+                                                                          \
+    flat2_out = (tmp < (v16u8)flat2_out);                                 \
+    flat2_out = __msa_xori_b(flat2_out, 0xff);                            \
+    flat2_out = flat2_out & flat_in;                                      \
+  }
 
-#define VPX_FILTER8(p3_in, p2_in, p1_in, p0_in,                  \
-                    q0_in, q1_in, q2_in, q3_in,                  \
-                    p2_filt8_out, p1_filt8_out, p0_filt8_out,    \
-                    q0_filt8_out, q1_filt8_out, q2_filt8_out) {  \
-  v8u16 tmp0, tmp1, tmp2;                                        \
-                                                                 \
-  tmp2 = p2_in + p1_in + p0_in;                                  \
-  tmp0 = p3_in << 1;                                             \
-                                                                 \
-  tmp0 = tmp0 + tmp2 + q0_in;                                    \
-  tmp1 = tmp0 + p3_in + p2_in;                                   \
-  p2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);           \
-                                                                 \
-  tmp1 = tmp0 + p1_in + q1_in;                                   \
-  p1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);           \
-                                                                 \
-  tmp1 = q2_in + q1_in + q0_in;                                  \
-  tmp2 = tmp2 + tmp1;                                            \
-  tmp0 = tmp2 + (p0_in);                                         \
-  tmp0 = tmp0 + (p3_in);                                         \
-  p0_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp0, 3);           \
-                                                                 \
-  tmp0 = q2_in + q3_in;                                          \
-  tmp0 = p0_in + tmp1 + tmp0;                                    \
-  tmp1 = q3_in + q3_in;                                          \
-  tmp1 = tmp1 + tmp0;                                            \
-  q2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);           \
-                                                                 \
-  tmp0 = tmp2 + q3_in;                                           \
-  tmp1 = tmp0 + q0_in;                                           \
-  q0_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);           \
-                                                                 \
-  tmp1 = tmp0 - p2_in;                                           \
-  tmp0 = q1_in + q3_in;                                          \
-  tmp1 = tmp0 + tmp1;                                            \
-  q1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);           \
-}
+#define VPX_FILTER8(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
+                    p2_filt8_out, p1_filt8_out, p0_filt8_out, q0_filt8_out, \
+                    q1_filt8_out, q2_filt8_out)                             \
+  {                                                                         \
+    v8u16 tmp0, tmp1, tmp2;                                                 \
+                                                                            \
+    tmp2 = p2_in + p1_in + p0_in;                                           \
+    tmp0 = p3_in << 1;                                                      \
+                                                                            \
+    tmp0 = tmp0 + tmp2 + q0_in;                                             \
+    tmp1 = tmp0 + p3_in + p2_in;                                            \
+    p2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);                    \
+                                                                            \
+    tmp1 = tmp0 + p1_in + q1_in;                                            \
+    p1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);                    \
+                                                                            \
+    tmp1 = q2_in + q1_in + q0_in;                                           \
+    tmp2 = tmp2 + tmp1;                                                     \
+    tmp0 = tmp2 + (p0_in);                                                  \
+    tmp0 = tmp0 + (p3_in);                                                  \
+    p0_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp0, 3);                    \
+                                                                            \
+    tmp0 = q2_in + q3_in;                                                   \
+    tmp0 = p0_in + tmp1 + tmp0;                                             \
+    tmp1 = q3_in + q3_in;                                                   \
+    tmp1 = tmp1 + tmp0;                                                     \
+    q2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);                    \
+                                                                            \
+    tmp0 = tmp2 + q3_in;                                                    \
+    tmp1 = tmp0 + q0_in;                                                    \
+    q0_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);                    \
+                                                                            \
+    tmp1 = tmp0 - p2_in;                                                    \
+    tmp0 = q1_in + q3_in;                                                   \
+    tmp1 = tmp0 + tmp1;                                                     \
+    q1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3);                    \
+  }
 
-#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in,                 \
-                     q0_in, q1_in, q2_in, q3_in,                 \
-                     limit_in, b_limit_in, thresh_in,            \
-                     hev_out, mask_out, flat_out) {              \
-  v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m;  \
-  v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m;  \
-                                                                 \
-  /* absolute subtraction of pixel values */                     \
-  p3_asub_p2_m = __msa_asub_u_b(p3_in, p2_in);                   \
-  p2_asub_p1_m = __msa_asub_u_b(p2_in, p1_in);                   \
-  p1_asub_p0_m = __msa_asub_u_b(p1_in, p0_in);                   \
-  q1_asub_q0_m = __msa_asub_u_b(q1_in, q0_in);                   \
-  q2_asub_q1_m = __msa_asub_u_b(q2_in, q1_in);                   \
-  q3_asub_q2_m = __msa_asub_u_b(q3_in, q2_in);                   \
-  p0_asub_q0_m = __msa_asub_u_b(p0_in, q0_in);                   \
-  p1_asub_q1_m = __msa_asub_u_b(p1_in, q1_in);                   \
-                                                                 \
-  /* calculation of hev */                                       \
-  flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m);          \
-  hev_out = thresh_in < (v16u8)flat_out;                         \
-                                                                 \
-  /* calculation of mask */                                      \
-  p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m);     \
-  p1_asub_q1_m >>= 1;                                            \
-  p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m);     \
-                                                                 \
-  mask_out = b_limit_in < p0_asub_q0_m;                          \
-  mask_out = __msa_max_u_b(flat_out, mask_out);                  \
-  p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m);      \
-  mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out);              \
-  q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m);      \
-  mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out);              \
-                                                                 \
-  mask_out = limit_in < (v16u8)mask_out;                         \
-  mask_out = __msa_xori_b(mask_out, 0xff);                       \
-}
-#endif  /* VPX_DSP_LOOPFILTER_MSA_H_ */
+#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
+                     limit_in, b_limit_in, thresh_in, hev_out, mask_out,     \
+                     flat_out)                                               \
+  {                                                                          \
+    v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m;            \
+    v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m;            \
+                                                                             \
+    /* absolute subtraction of pixel values */                               \
+    p3_asub_p2_m = __msa_asub_u_b(p3_in, p2_in);                             \
+    p2_asub_p1_m = __msa_asub_u_b(p2_in, p1_in);                             \
+    p1_asub_p0_m = __msa_asub_u_b(p1_in, p0_in);                             \
+    q1_asub_q0_m = __msa_asub_u_b(q1_in, q0_in);                             \
+    q2_asub_q1_m = __msa_asub_u_b(q2_in, q1_in);                             \
+    q3_asub_q2_m = __msa_asub_u_b(q3_in, q2_in);                             \
+    p0_asub_q0_m = __msa_asub_u_b(p0_in, q0_in);                             \
+    p1_asub_q1_m = __msa_asub_u_b(p1_in, q1_in);                             \
+                                                                             \
+    /* calculation of hev */                                                 \
+    flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m);                    \
+    hev_out = thresh_in < (v16u8)flat_out;                                   \
+                                                                             \
+    /* calculation of mask */                                                \
+    p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m);               \
+    p1_asub_q1_m >>= 1;                                                      \
+    p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m);               \
+                                                                             \
+    mask_out = b_limit_in < p0_asub_q0_m;                                    \
+    mask_out = __msa_max_u_b(flat_out, mask_out);                            \
+    p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m);                \
+    mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out);                        \
+    q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m);                \
+    mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out);                        \
+                                                                             \
+    mask_out = limit_in < (v16u8)mask_out;                                   \
+    mask_out = __msa_xori_b(mask_out, 0xff);                                 \
+  }
+#endif /* VPX_DSP_LOOPFILTER_MSA_H_ */
diff --git a/vpx_dsp/mips/macros_msa.h b/vpx_dsp/mips/macros_msa.h
index 91e3615cf87ba4f41538524d5195c869dd0a6533..ed00cc5dcbca60c7cdf24d07f264c3661e28b9af 100644
--- a/vpx_dsp/mips/macros_msa.h
+++ b/vpx_dsp/mips/macros_msa.h
@@ -38,194 +38,186 @@
 #define ST_SW(...) ST_W(v4i32, __VA_ARGS__)
 
 #if (__mips_isa_rev >= 6)
-#define LH(psrc) ({                                 \
-  const uint8_t *psrc_m = (const uint8_t *)(psrc);  \
-  uint16_t val_m;                                   \
-                                                    \
-  __asm__ __volatile__ (                            \
-      "lh  %[val_m],  %[psrc_m]  \n\t"              \
-                                                    \
-      : [val_m] "=r" (val_m)                        \
-      : [psrc_m] "m" (*psrc_m)                      \
-  );                                                \
-                                                    \
-  val_m;                                            \
-})
-
-#define LW(psrc) ({                                 \
-  const uint8_t *psrc_m = (const uint8_t *)(psrc);  \
-  uint32_t val_m;                                   \
-                                                    \
-  __asm__ __volatile__ (                            \
-      "lw  %[val_m],  %[psrc_m]  \n\t"              \
-                                                    \
-      : [val_m] "=r" (val_m)                        \
-      : [psrc_m] "m" (*psrc_m)                      \
-  );                                                \
-                                                    \
-  val_m;                                            \
-})
+#define LH(psrc)                                          \
+  ({                                                      \
+    const uint8_t *psrc_m = (const uint8_t *)(psrc);      \
+    uint16_t val_m;                                       \
+                                                          \
+    __asm__ __volatile__("lh  %[val_m],  %[psrc_m]  \n\t" \
+                                                          \
+                         : [val_m] "=r"(val_m)            \
+                         : [psrc_m] "m"(*psrc_m));        \
+                                                          \
+    val_m;                                                \
+  })
+
+#define LW(psrc)                                          \
+  ({                                                      \
+    const uint8_t *psrc_m = (const uint8_t *)(psrc);      \
+    uint32_t val_m;                                       \
+                                                          \
+    __asm__ __volatile__("lw  %[val_m],  %[psrc_m]  \n\t" \
+                                                          \
+                         : [val_m] "=r"(val_m)            \
+                         : [psrc_m] "m"(*psrc_m));        \
+                                                          \
+    val_m;                                                \
+  })
 
 #if (__mips == 64)
-#define LD(psrc) ({                                 \
-  const uint8_t *psrc_m = (const uint8_t *)(psrc);  \
-  uint64_t val_m = 0;                               \
-                                                    \
-  __asm__ __volatile__ (                            \
-      "ld  %[val_m],  %[psrc_m]  \n\t"              \
-                                                    \
-      : [val_m] "=r" (val_m)                        \
-      : [psrc_m] "m" (*psrc_m)                      \
-  );                                                \
-                                                    \
-  val_m;                                            \
-})
+#define LD(psrc)                                          \
+  ({                                                      \
+    const uint8_t *psrc_m = (const uint8_t *)(psrc);      \
+    uint64_t val_m = 0;                                   \
+                                                          \
+    __asm__ __volatile__("ld  %[val_m],  %[psrc_m]  \n\t" \
+                                                          \
+                         : [val_m] "=r"(val_m)            \
+                         : [psrc_m] "m"(*psrc_m));        \
+                                                          \
+    val_m;                                                \
+  })
 #else  // !(__mips == 64)
-#define LD(psrc) ({                                        \
-  const uint8_t *psrc_m = (const uint8_t *)(psrc);         \
-  uint32_t val0_m, val1_m;                                 \
-  uint64_t val_m = 0;                                      \
+#define LD(psrc)                                            \
+  ({                                                        \
+    const uint8_t *psrc_m = (const uint8_t *)(psrc);        \
+    uint32_t val0_m, val1_m;                                \
+    uint64_t val_m = 0;                                     \
+                                                            \
+    val0_m = LW(psrc_m);                                    \
+    val1_m = LW(psrc_m + 4);                                \
+                                                            \
+    val_m = (uint64_t)(val1_m);                             \
+    val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \
+    val_m = (uint64_t)(val_m | (uint64_t)val0_m);           \
+                                                            \
+    val_m;                                                  \
+  })
+#endif  // (__mips == 64)
+
+#define SH(val, pdst)                                     \
+  {                                                       \
+    uint8_t *pdst_m = (uint8_t *)(pdst);                  \
+    const uint16_t val_m = (val);                         \
+                                                          \
+    __asm__ __volatile__("sh  %[val_m],  %[pdst_m]  \n\t" \
+                                                          \
+                         : [pdst_m] "=m"(*pdst_m)         \
+                         : [val_m] "r"(val_m));           \
+  }
+
+#define SW(val, pdst)                                     \
+  {                                                       \
+    uint8_t *pdst_m = (uint8_t *)(pdst);                  \
+    const uint32_t val_m = (val);                         \
+                                                          \
+    __asm__ __volatile__("sw  %[val_m],  %[pdst_m]  \n\t" \
+                                                          \
+                         : [pdst_m] "=m"(*pdst_m)         \
+                         : [val_m] "r"(val_m));           \
+  }
+
+#define SD(val, pdst)                                     \
+  {                                                       \
+    uint8_t *pdst_m = (uint8_t *)(pdst);                  \
+    const uint64_t val_m = (val);                         \
+                                                          \
+    __asm__ __volatile__("sd  %[val_m],  %[pdst_m]  \n\t" \
+                                                          \
+                         : [pdst_m] "=m"(*pdst_m)         \
+                         : [val_m] "r"(val_m));           \
+  }
+#else  // !(__mips_isa_rev >= 6)
+#define LH(psrc)                                           \
+  ({                                                       \
+    const uint8_t *psrc_m = (const uint8_t *)(psrc);       \
+    uint16_t val_m;                                        \
                                                            \
-  val0_m = LW(psrc_m);                                     \
-  val1_m = LW(psrc_m + 4);                                 \
+    __asm__ __volatile__("ulh  %[val_m],  %[psrc_m]  \n\t" \
                                                            \
-  val_m = (uint64_t)(val1_m);                              \
-  val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000);  \
-  val_m = (uint64_t)(val_m | (uint64_t)val0_m);            \
+                         : [val_m] "=r"(val_m)             \
+                         : [psrc_m] "m"(*psrc_m));         \
                                                            \
-  val_m;                                                   \
-})
-#endif  // (__mips == 64)
+    val_m;                                                 \
+  })
 
-#define SH(val, pdst) {                 \
-  uint8_t *pdst_m = (uint8_t *)(pdst);  \
-  const uint16_t val_m = (val);         \
-                                        \
-  __asm__ __volatile__ (                \
-      "sh  %[val_m],  %[pdst_m]  \n\t"  \
-                                        \
-      : [pdst_m] "=m" (*pdst_m)         \
-      : [val_m] "r" (val_m)             \
-  );                                    \
-}
-
-#define SW(val, pdst) {                 \
-  uint8_t *pdst_m = (uint8_t *)(pdst);  \
-  const uint32_t val_m = (val);         \
-                                        \
-  __asm__ __volatile__ (                \
-      "sw  %[val_m],  %[pdst_m]  \n\t"  \
-                                        \
-      : [pdst_m] "=m" (*pdst_m)         \
-      : [val_m] "r" (val_m)             \
-  );                                    \
-}
-
-#define SD(val, pdst) {                 \
-  uint8_t *pdst_m = (uint8_t *)(pdst);  \
-  const uint64_t val_m = (val);         \
-                                        \
-  __asm__ __volatile__ (                \
-      "sd  %[val_m],  %[pdst_m]  \n\t"  \
-                                        \
-      : [pdst_m] "=m" (*pdst_m)         \
-      : [val_m] "r" (val_m)             \
-  );                                    \
-}
-#else  // !(__mips_isa_rev >= 6)
-#define LH(psrc) ({                                 \
-  const uint8_t *psrc_m = (const uint8_t *)(psrc);  \
-  uint16_t val_m;                                   \
-                                                    \
-  __asm__ __volatile__ (                            \
-      "ulh  %[val_m],  %[psrc_m]  \n\t"             \
-                                                    \
-      : [val_m] "=r" (val_m)                        \
-      : [psrc_m] "m" (*psrc_m)                      \
-  );                                                \
-                                                    \
-  val_m;                                            \
-})
-
-#define LW(psrc) ({                                 \
-  const uint8_t *psrc_m = (const uint8_t *)(psrc);  \
-  uint32_t val_m;                                   \
-                                                    \
-  __asm__ __volatile__ (                            \
-      "ulw  %[val_m],  %[psrc_m]  \n\t"             \
-                                                    \
-      : [val_m] "=r" (val_m)                        \
-      : [psrc_m] "m" (*psrc_m)                      \
-  );                                                \
-                                                    \
-  val_m;                                            \
-})
+#define LW(psrc)                                           \
+  ({                                                       \
+    const uint8_t *psrc_m = (const uint8_t *)(psrc);       \
+    uint32_t val_m;                                        \
+                                                           \
+    __asm__ __volatile__("ulw  %[val_m],  %[psrc_m]  \n\t" \
+                                                           \
+                         : [val_m] "=r"(val_m)             \
+                         : [psrc_m] "m"(*psrc_m));         \
+                                                           \
+    val_m;                                                 \
+  })
 
 #if (__mips == 64)
-#define LD(psrc) ({                                 \
-  const uint8_t *psrc_m = (const uint8_t *)(psrc);  \
-  uint64_t val_m = 0;                               \
-                                                    \
-  __asm__ __volatile__ (                            \
-      "uld  %[val_m],  %[psrc_m]  \n\t"             \
-                                                    \
-      : [val_m] "=r" (val_m)                        \
-      : [psrc_m] "m" (*psrc_m)                      \
-  );                                                \
-                                                    \
-  val_m;                                            \
-})
-#else  // !(__mips == 64)
-#define LD(psrc) ({                                        \
-  const uint8_t *psrc_m1 = (const uint8_t *)(psrc);        \
-  uint32_t val0_m, val1_m;                                 \
-  uint64_t val_m = 0;                                      \
+#define LD(psrc)                                           \
+  ({                                                       \
+    const uint8_t *psrc_m = (const uint8_t *)(psrc);       \
+    uint64_t val_m = 0;                                    \
                                                            \
-  val0_m = LW(psrc_m1);                                    \
-  val1_m = LW(psrc_m1 + 4);                                \
+    __asm__ __volatile__("uld  %[val_m],  %[psrc_m]  \n\t" \
                                                            \
-  val_m = (uint64_t)(val1_m);                              \
-  val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000);  \
-  val_m = (uint64_t)(val_m | (uint64_t)val0_m);            \
+                         : [val_m] "=r"(val_m)             \
+                         : [psrc_m] "m"(*psrc_m));         \
                                                            \
-  val_m;                                                   \
-})
-#endif  // (__mips == 64)
-
-#define SH(val, pdst) {                  \
-  uint8_t *pdst_m = (uint8_t *)(pdst);   \
-  const uint16_t val_m = (val);          \
-                                         \
-  __asm__ __volatile__ (                 \
-      "ush  %[val_m],  %[pdst_m]  \n\t"  \
-                                         \
-      : [pdst_m] "=m" (*pdst_m)          \
-      : [val_m] "r" (val_m)              \
-  );                                     \
-}
-
-#define SW(val, pdst) {                  \
-  uint8_t *pdst_m = (uint8_t *)(pdst);   \
-  const uint32_t val_m = (val);          \
-                                         \
-  __asm__ __volatile__ (                 \
-      "usw  %[val_m],  %[pdst_m]  \n\t"  \
-                                         \
-      : [pdst_m] "=m" (*pdst_m)          \
-      : [val_m] "r" (val_m)              \
-  );                                     \
-}
-
-#define SD(val, pdst) {                                     \
-  uint8_t *pdst_m1 = (uint8_t *)(pdst);                     \
-  uint32_t val0_m, val1_m;                                  \
+    val_m;                                                 \
+  })
+#else  // !(__mips == 64)
+#define LD(psrc)                                            \
+  ({                                                        \
+    const uint8_t *psrc_m1 = (const uint8_t *)(psrc);       \
+    uint32_t val0_m, val1_m;                                \
+    uint64_t val_m = 0;                                     \
                                                             \
-  val0_m = (uint32_t)((val) & 0x00000000FFFFFFFF);          \
-  val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF);  \
+    val0_m = LW(psrc_m1);                                   \
+    val1_m = LW(psrc_m1 + 4);                               \
                                                             \
-  SW(val0_m, pdst_m1);                                      \
-  SW(val1_m, pdst_m1 + 4);                                  \
-}
+    val_m = (uint64_t)(val1_m);                             \
+    val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \
+    val_m = (uint64_t)(val_m | (uint64_t)val0_m);           \
+                                                            \
+    val_m;                                                  \
+  })
+#endif  // (__mips == 64)
+
+#define SH(val, pdst)                                      \
+  {                                                        \
+    uint8_t *pdst_m = (uint8_t *)(pdst);                   \
+    const uint16_t val_m = (val);                          \
+                                                           \
+    __asm__ __volatile__("ush  %[val_m],  %[pdst_m]  \n\t" \
+                                                           \
+                         : [pdst_m] "=m"(*pdst_m)          \
+                         : [val_m] "r"(val_m));            \
+  }
+
+#define SW(val, pdst)                                      \
+  {                                                        \
+    uint8_t *pdst_m = (uint8_t *)(pdst);                   \
+    const uint32_t val_m = (val);                          \
+                                                           \
+    __asm__ __volatile__("usw  %[val_m],  %[pdst_m]  \n\t" \
+                                                           \
+                         : [pdst_m] "=m"(*pdst_m)          \
+                         : [val_m] "r"(val_m));            \
+  }
+
+#define SD(val, pdst)                                        \
+  {                                                          \
+    uint8_t *pdst_m1 = (uint8_t *)(pdst);                    \
+    uint32_t val0_m, val1_m;                                 \
+                                                             \
+    val0_m = (uint32_t)((val)&0x00000000FFFFFFFF);           \
+    val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \
+                                                             \
+    SW(val0_m, pdst_m1);                                     \
+    SW(val1_m, pdst_m1 + 4);                                 \
+  }
 #endif  // (__mips_isa_rev >= 6)
 
 /* Description : Load 4 words with stride
@@ -236,12 +228,13 @@
                  Load word in 'out2' from (psrc + 2 * stride)
                  Load word in 'out3' from (psrc + 3 * stride)
 */
-#define LW4(psrc, stride, out0, out1, out2, out3) {  \
-  out0 = LW((psrc));                                 \
-  out1 = LW((psrc) + stride);                        \
-  out2 = LW((psrc) + 2 * stride);                    \
-  out3 = LW((psrc) + 3 * stride);                    \
-}
+#define LW4(psrc, stride, out0, out1, out2, out3) \
+  {                                               \
+    out0 = LW((psrc));                            \
+    out1 = LW((psrc) + stride);                   \
+    out2 = LW((psrc) + 2 * stride);               \
+    out3 = LW((psrc) + 3 * stride);               \
+  }
 
 /* Description : Load double words with stride
    Arguments   : Inputs  - psrc, stride
@@ -249,14 +242,16 @@
    Details     : Load double word in 'out0' from (psrc)
                  Load double word in 'out1' from (psrc + stride)
 */
-#define LD2(psrc, stride, out0, out1) {  \
-  out0 = LD((psrc));                     \
-  out1 = LD((psrc) + stride);            \
-}
-#define LD4(psrc, stride, out0, out1, out2, out3) {  \
-  LD2((psrc), stride, out0, out1);                   \
-  LD2((psrc) + 2 * stride, stride, out2, out3);      \
-}
+#define LD2(psrc, stride, out0, out1) \
+  {                                   \
+    out0 = LD((psrc));                \
+    out1 = LD((psrc) + stride);       \
+  }
+#define LD4(psrc, stride, out0, out1, out2, out3) \
+  {                                               \
+    LD2((psrc), stride, out0, out1);              \
+    LD2((psrc) + 2 * stride, stride, out2, out3); \
+  }
 
 /* Description : Store 4 words with stride
    Arguments   : Inputs - in0, in1, in2, in3, pdst, stride
@@ -265,12 +260,13 @@
                  Store word from 'in2' to (pdst + 2 * stride)
                  Store word from 'in3' to (pdst + 3 * stride)
 */
-#define SW4(in0, in1, in2, in3, pdst, stride) {  \
-  SW(in0, (pdst))                                \
-  SW(in1, (pdst) + stride);                      \
-  SW(in2, (pdst) + 2 * stride);                  \
-  SW(in3, (pdst) + 3 * stride);                  \
-}
+#define SW4(in0, in1, in2, in3, pdst, stride) \
+  {                                           \
+    SW(in0, (pdst))                           \
+    SW(in1, (pdst) + stride);                 \
+    SW(in2, (pdst) + 2 * stride);             \
+    SW(in3, (pdst) + 3 * stride);             \
+  }
 
 /* Description : Store 4 double words with stride
    Arguments   : Inputs - in0, in1, in2, in3, pdst, stride
@@ -279,12 +275,13 @@
                  Store double word from 'in2' to (pdst + 2 * stride)
                  Store double word from 'in3' to (pdst + 3 * stride)
 */
-#define SD4(in0, in1, in2, in3, pdst, stride) {  \
-  SD(in0, (pdst))                                \
-  SD(in1, (pdst) + stride);                      \
-  SD(in2, (pdst) + 2 * stride);                  \
-  SD(in3, (pdst) + 3 * stride);                  \
-}
+#define SD4(in0, in1, in2, in3, pdst, stride) \
+  {                                           \
+    SD(in0, (pdst))                           \
+    SD(in1, (pdst) + stride);                 \
+    SD(in2, (pdst) + 2 * stride);             \
+    SD(in3, (pdst) + 3 * stride);             \
+  }
 
 /* Description : Load vectors with 16 byte elements with stride
    Arguments   : Inputs  - psrc, stride
@@ -293,45 +290,50 @@
    Details     : Load 16 byte elements in 'out0' from (psrc)
                  Load 16 byte elements in 'out1' from (psrc + stride)
 */
-#define LD_B2(RTYPE, psrc, stride, out0, out1) {  \
-  out0 = LD_B(RTYPE, (psrc));                     \
-  out1 = LD_B(RTYPE, (psrc) + stride);            \
-}
+#define LD_B2(RTYPE, psrc, stride, out0, out1) \
+  {                                            \
+    out0 = LD_B(RTYPE, (psrc));                \
+    out1 = LD_B(RTYPE, (psrc) + stride);       \
+  }
 #define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__)
 #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__)
 
-#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) {  \
-  LD_B2(RTYPE, (psrc), stride, out0, out1);             \
-  out2 = LD_B(RTYPE, (psrc) + 2 * stride);              \
-}
+#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) \
+  {                                                  \
+    LD_B2(RTYPE, (psrc), stride, out0, out1);        \
+    out2 = LD_B(RTYPE, (psrc) + 2 * stride);         \
+  }
 #define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__)
 
-#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) {  \
-  LD_B2(RTYPE, (psrc), stride, out0, out1);                   \
-  LD_B2(RTYPE, (psrc) + 2 * stride , stride, out2, out3);     \
-}
+#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \
+  {                                                        \
+    LD_B2(RTYPE, (psrc), stride, out0, out1);              \
+    LD_B2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
+  }
 #define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__)
 #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__)
 
-#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) {  \
-  LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3);             \
-  out4 = LD_B(RTYPE, (psrc) + 4 * stride);                          \
-}
+#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \
+  {                                                              \
+    LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3);        \
+    out4 = LD_B(RTYPE, (psrc) + 4 * stride);                     \
+  }
 #define LD_UB5(...) LD_B5(v16u8, __VA_ARGS__)
 #define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__)
 
-#define LD_B7(RTYPE, psrc, stride,                             \
-              out0, out1, out2, out3, out4, out5, out6) {      \
-  LD_B5(RTYPE, (psrc), stride, out0, out1, out2, out3, out4);  \
-  LD_B2(RTYPE, (psrc) + 5 * stride, stride, out5, out6);       \
-}
+#define LD_B7(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6) \
+  {                                                                          \
+    LD_B5(RTYPE, (psrc), stride, out0, out1, out2, out3, out4);              \
+    LD_B2(RTYPE, (psrc) + 5 * stride, stride, out5, out6);                   \
+  }
 #define LD_SB7(...) LD_B7(v16i8, __VA_ARGS__)
 
-#define LD_B8(RTYPE, psrc, stride,                                    \
-              out0, out1, out2, out3, out4, out5, out6, out7) {       \
-  LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3);               \
-  LD_B4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7);  \
-}
+#define LD_B8(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \
+              out7)                                                          \
+  {                                                                          \
+    LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3);                    \
+    LD_B4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7);       \
+  }
 #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__)
 #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__)
 
@@ -341,33 +343,36 @@
    Details     : Load 8 halfword elements in 'out0' from (psrc)
                  Load 8 halfword elements in 'out1' from (psrc + stride)
 */
-#define LD_H2(RTYPE, psrc, stride, out0, out1) {  \
-  out0 = LD_H(RTYPE, (psrc));                     \
-  out1 = LD_H(RTYPE, (psrc) + (stride));          \
-}
+#define LD_H2(RTYPE, psrc, stride, out0, out1) \
+  {                                            \
+    out0 = LD_H(RTYPE, (psrc));                \
+    out1 = LD_H(RTYPE, (psrc) + (stride));     \
+  }
 #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
 
-#define LD_H4(RTYPE, psrc, stride, out0, out1, out2, out3) {  \
-  LD_H2(RTYPE, (psrc), stride, out0, out1);                   \
-  LD_H2(RTYPE, (psrc) + 2 * stride, stride, out2, out3);      \
-}
+#define LD_H4(RTYPE, psrc, stride, out0, out1, out2, out3) \
+  {                                                        \
+    LD_H2(RTYPE, (psrc), stride, out0, out1);              \
+    LD_H2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
+  }
 #define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__)
 
-#define LD_H8(RTYPE, psrc, stride,                                    \
-              out0, out1, out2, out3, out4, out5, out6, out7) {       \
-  LD_H4(RTYPE, (psrc), stride, out0, out1, out2, out3);               \
-  LD_H4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7);  \
-}
+#define LD_H8(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \
+              out7)                                                          \
+  {                                                                          \
+    LD_H4(RTYPE, (psrc), stride, out0, out1, out2, out3);                    \
+    LD_H4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7);       \
+  }
 #define LD_SH8(...) LD_H8(v8i16, __VA_ARGS__)
 
-#define LD_H16(RTYPE, psrc, stride,                                     \
-               out0, out1, out2, out3, out4, out5, out6, out7,          \
-               out8, out9, out10, out11, out12, out13, out14, out15) {  \
-  LD_H8(RTYPE, (psrc), stride,                                          \
-        out0, out1, out2, out3, out4, out5, out6, out7);                \
-  LD_H8(RTYPE, (psrc) + 8 * stride, stride,                             \
-        out8, out9, out10, out11, out12, out13, out14, out15);          \
-}
+#define LD_H16(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6,  \
+               out7, out8, out9, out10, out11, out12, out13, out14, out15)     \
+  {                                                                            \
+    LD_H8(RTYPE, (psrc), stride, out0, out1, out2, out3, out4, out5, out6,     \
+          out7);                                                               \
+    LD_H8(RTYPE, (psrc) + 8 * stride, stride, out8, out9, out10, out11, out12, \
+          out13, out14, out15);                                                \
+  }
 #define LD_SH16(...) LD_H16(v8i16, __VA_ARGS__)
 
 /* Description : Load 4x4 block of signed halfword elements from 1D source
@@ -375,45 +380,49 @@
    Arguments   : Input   - psrc
                  Outputs - out0, out1, out2, out3
 */
-#define LD4x4_SH(psrc, out0, out1, out2, out3) {         \
-  out0 = LD_SH(psrc);                                    \
-  out2 = LD_SH(psrc + 8);                                \
-  out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0);  \
-  out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2);  \
-}
+#define LD4x4_SH(psrc, out0, out1, out2, out3)            \
+  {                                                       \
+    out0 = LD_SH(psrc);                                   \
+    out2 = LD_SH(psrc + 8);                               \
+    out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
+    out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
+  }
 
 /* Description : Load 2 vectors of signed word elements with stride
    Arguments   : Inputs  - psrc, stride
                  Outputs - out0, out1
                  Return Type - signed word
 */
-#define LD_SW2(psrc, stride, out0, out1) {  \
-  out0 = LD_SW((psrc));                     \
-  out1 = LD_SW((psrc) + stride);            \
-}
+#define LD_SW2(psrc, stride, out0, out1) \
+  {                                      \
+    out0 = LD_SW((psrc));                \
+    out1 = LD_SW((psrc) + stride);       \
+  }
 
 /* Description : Store vectors of 16 byte elements with stride
    Arguments   : Inputs - in0, in1, pdst, stride
    Details     : Store 16 byte elements from 'in0' to (pdst)
                  Store 16 byte elements from 'in1' to (pdst + stride)
 */
-#define ST_B2(RTYPE, in0, in1, pdst, stride) {  \
-  ST_B(RTYPE, in0, (pdst));                     \
-  ST_B(RTYPE, in1, (pdst) + stride);            \
-}
+#define ST_B2(RTYPE, in0, in1, pdst, stride) \
+  {                                          \
+    ST_B(RTYPE, in0, (pdst));                \
+    ST_B(RTYPE, in1, (pdst) + stride);       \
+  }
 #define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__)
 
-#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) {  \
-  ST_B2(RTYPE, in0, in1, (pdst), stride);                 \
-  ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride);    \
-}
+#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride)   \
+  {                                                      \
+    ST_B2(RTYPE, in0, in1, (pdst), stride);              \
+    ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
+  }
 #define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
 
-#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,      \
-              pdst, stride) {                                     \
-  ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride);                 \
-  ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride);  \
-}
+#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
+  {                                                                        \
+    ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride);                        \
+    ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride);         \
+  }
 #define ST_UB8(...) ST_B8(v16u8, __VA_ARGS__)
 
 /* Description : Store vectors of 8 halfword elements with stride
@@ -421,22 +430,25 @@
    Details     : Store 8 halfword elements from 'in0' to (pdst)
                  Store 8 halfword elements from 'in1' to (pdst + stride)
 */
-#define ST_H2(RTYPE, in0, in1, pdst, stride) {  \
-  ST_H(RTYPE, in0, (pdst));                     \
-  ST_H(RTYPE, in1, (pdst) + stride);            \
-}
+#define ST_H2(RTYPE, in0, in1, pdst, stride) \
+  {                                          \
+    ST_H(RTYPE, in0, (pdst));                \
+    ST_H(RTYPE, in1, (pdst) + stride);       \
+  }
 #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
 
-#define ST_H4(RTYPE, in0, in1, in2, in3, pdst, stride) {  \
-  ST_H2(RTYPE, in0, in1, (pdst), stride);                 \
-  ST_H2(RTYPE, in2, in3, (pdst) + 2 * stride, stride);    \
-}
+#define ST_H4(RTYPE, in0, in1, in2, in3, pdst, stride)   \
+  {                                                      \
+    ST_H2(RTYPE, in0, in1, (pdst), stride);              \
+    ST_H2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
+  }
 #define ST_SH4(...) ST_H4(v8i16, __VA_ARGS__)
 
-#define ST_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) {  \
-  ST_H4(RTYPE, in0, in1, in2, in3, (pdst), stride);                           \
-  ST_H4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride);              \
-}
+#define ST_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
+  {                                                                        \
+    ST_H4(RTYPE, in0, in1, in2, in3, (pdst), stride);                      \
+    ST_H4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride);         \
+  }
 #define ST_SH8(...) ST_H8(v8i16, __VA_ARGS__)
 
 /* Description : Store vectors of word elements with stride
@@ -444,10 +456,11 @@
    Details     : Store 4 word elements from 'in0' to (pdst)
                  Store 4 word elements from 'in1' to (pdst + stride)
 */
-#define ST_SW2(in0, in1, pdst, stride) {  \
-  ST_SW(in0, (pdst));                     \
-  ST_SW(in1, (pdst) + stride);            \
-}
+#define ST_SW2(in0, in1, pdst, stride) \
+  {                                    \
+    ST_SW(in0, (pdst));                \
+    ST_SW(in1, (pdst) + stride);       \
+  }
 
 /* Description : Store 2x4 byte block to destination memory from input vector
    Arguments   : Inputs - in, stidx, pdst, stride
@@ -460,20 +473,21 @@
                  Index 'stidx+3' halfword element from 'in' vector is copied to
                  the GP register and stored to (pdst + 3 * stride)
 */
-#define ST2x4_UB(in, stidx, pdst, stride) {         \
-  uint16_t out0_m, out1_m, out2_m, out3_m;          \
-  uint8_t *pblk_2x4_m = (uint8_t *)(pdst);          \
-                                                    \
-  out0_m = __msa_copy_u_h((v8i16)in, (stidx));      \
-  out1_m = __msa_copy_u_h((v8i16)in, (stidx + 1));  \
-  out2_m = __msa_copy_u_h((v8i16)in, (stidx + 2));  \
-  out3_m = __msa_copy_u_h((v8i16)in, (stidx + 3));  \
-                                                    \
-  SH(out0_m, pblk_2x4_m);                           \
-  SH(out1_m, pblk_2x4_m + stride);                  \
-  SH(out2_m, pblk_2x4_m + 2 * stride);              \
-  SH(out3_m, pblk_2x4_m + 3 * stride);              \
-}
+#define ST2x4_UB(in, stidx, pdst, stride)            \
+  {                                                  \
+    uint16_t out0_m, out1_m, out2_m, out3_m;         \
+    uint8_t *pblk_2x4_m = (uint8_t *)(pdst);         \
+                                                     \
+    out0_m = __msa_copy_u_h((v8i16)in, (stidx));     \
+    out1_m = __msa_copy_u_h((v8i16)in, (stidx + 1)); \
+    out2_m = __msa_copy_u_h((v8i16)in, (stidx + 2)); \
+    out3_m = __msa_copy_u_h((v8i16)in, (stidx + 3)); \
+                                                     \
+    SH(out0_m, pblk_2x4_m);                          \
+    SH(out1_m, pblk_2x4_m + stride);                 \
+    SH(out2_m, pblk_2x4_m + 2 * stride);             \
+    SH(out3_m, pblk_2x4_m + 3 * stride);             \
+  }
 
 /* Description : Store 4x2 byte block to destination memory from input vector
    Arguments   : Inputs - in, pdst, stride
@@ -482,16 +496,17 @@
                  Index 1 word element from 'in' vector is copied to the GP
                  register and stored to (pdst + stride)
 */
-#define ST4x2_UB(in, pdst, stride) {        \
-  uint32_t out0_m, out1_m;                  \
-  uint8_t *pblk_4x2_m = (uint8_t *)(pdst);  \
-                                            \
-  out0_m = __msa_copy_u_w((v4i32)in, 0);    \
-  out1_m = __msa_copy_u_w((v4i32)in, 1);    \
-                                            \
-  SW(out0_m, pblk_4x2_m);                   \
-  SW(out1_m, pblk_4x2_m + stride);          \
-}
+#define ST4x2_UB(in, pdst, stride)           \
+  {                                          \
+    uint32_t out0_m, out1_m;                 \
+    uint8_t *pblk_4x2_m = (uint8_t *)(pdst); \
+                                             \
+    out0_m = __msa_copy_u_w((v4i32)in, 0);   \
+    out1_m = __msa_copy_u_w((v4i32)in, 1);   \
+                                             \
+    SW(out0_m, pblk_4x2_m);                  \
+    SW(out1_m, pblk_4x2_m + stride);         \
+  }
 
 /* Description : Store 4x4 byte block to destination memory from input vector
    Arguments   : Inputs - in0, in1, pdst, stride
@@ -504,35 +519,38 @@
                  'Idx3' word element from input vector 'in0' is copied to the
                  GP register and stored to (pdst + 3 * stride)
 */
-#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) {  \
-  uint32_t out0_m, out1_m, out2_m, out3_m;                          \
-  uint8_t *pblk_4x4_m = (uint8_t *)(pdst);                          \
-                                                                    \
-  out0_m = __msa_copy_u_w((v4i32)in0, idx0);                        \
-  out1_m = __msa_copy_u_w((v4i32)in0, idx1);                        \
-  out2_m = __msa_copy_u_w((v4i32)in1, idx2);                        \
-  out3_m = __msa_copy_u_w((v4i32)in1, idx3);                        \
-                                                                    \
-  SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride);          \
-}
-#define ST4x8_UB(in0, in1, pdst, stride) {                        \
-  uint8_t *pblk_4x8 = (uint8_t *)(pdst);                          \
-                                                                  \
-  ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride);               \
-  ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride);  \
-}
+#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
+  {                                                              \
+    uint32_t out0_m, out1_m, out2_m, out3_m;                     \
+    uint8_t *pblk_4x4_m = (uint8_t *)(pdst);                     \
+                                                                 \
+    out0_m = __msa_copy_u_w((v4i32)in0, idx0);                   \
+    out1_m = __msa_copy_u_w((v4i32)in0, idx1);                   \
+    out2_m = __msa_copy_u_w((v4i32)in1, idx2);                   \
+    out3_m = __msa_copy_u_w((v4i32)in1, idx3);                   \
+                                                                 \
+    SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride);     \
+  }
+#define ST4x8_UB(in0, in1, pdst, stride)                           \
+  {                                                                \
+    uint8_t *pblk_4x8 = (uint8_t *)(pdst);                         \
+                                                                   \
+    ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride);              \
+    ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \
+  }
 
 /* Description : Store 8x1 byte block to destination memory from input vector
    Arguments   : Inputs - in, pdst
    Details     : Index 0 double word element from 'in' vector is copied to the
                  GP register and stored to (pdst)
 */
-#define ST8x1_UB(in, pdst) {              \
-  uint64_t out0_m;                        \
-                                          \
-  out0_m = __msa_copy_u_d((v2i64)in, 0);  \
-  SD(out0_m, pdst);                       \
-}
+#define ST8x1_UB(in, pdst)                 \
+  {                                        \
+    uint64_t out0_m;                       \
+                                           \
+    out0_m = __msa_copy_u_d((v2i64)in, 0); \
+    SD(out0_m, pdst);                      \
+  }
 
 /* Description : Store 8x2 byte block to destination memory from input vector
    Arguments   : Inputs - in, pdst, stride
@@ -541,16 +559,17 @@
                  Index 1 double word element from 'in' vector is copied to the
                  GP register and stored to (pdst + stride)
 */
-#define ST8x2_UB(in, pdst, stride) {        \
-  uint64_t out0_m, out1_m;                  \
-  uint8_t *pblk_8x2_m = (uint8_t *)(pdst);  \
-                                            \
-  out0_m = __msa_copy_u_d((v2i64)in, 0);    \
-  out1_m = __msa_copy_u_d((v2i64)in, 1);    \
-                                            \
-  SD(out0_m, pblk_8x2_m);                   \
-  SD(out1_m, pblk_8x2_m + stride);          \
-}
+#define ST8x2_UB(in, pdst, stride)           \
+  {                                          \
+    uint64_t out0_m, out1_m;                 \
+    uint8_t *pblk_8x2_m = (uint8_t *)(pdst); \
+                                             \
+    out0_m = __msa_copy_u_d((v2i64)in, 0);   \
+    out1_m = __msa_copy_u_d((v2i64)in, 1);   \
+                                             \
+    SD(out0_m, pblk_8x2_m);                  \
+    SD(out1_m, pblk_8x2_m + stride);         \
+  }
 
 /* Description : Store 8x4 byte block to destination memory from input
                  vectors
@@ -564,17 +583,18 @@
                  Index 1 double word element from 'in1' vector is copied to the
                  GP register and stored to (pdst + 3 * stride)
 */
-#define ST8x4_UB(in0, in1, pdst, stride) {                  \
-  uint64_t out0_m, out1_m, out2_m, out3_m;                  \
-  uint8_t *pblk_8x4_m = (uint8_t *)(pdst);                  \
-                                                            \
-  out0_m = __msa_copy_u_d((v2i64)in0, 0);                   \
-  out1_m = __msa_copy_u_d((v2i64)in0, 1);                   \
-  out2_m = __msa_copy_u_d((v2i64)in1, 0);                   \
-  out3_m = __msa_copy_u_d((v2i64)in1, 1);                   \
-                                                            \
-  SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride);  \
-}
+#define ST8x4_UB(in0, in1, pdst, stride)                     \
+  {                                                          \
+    uint64_t out0_m, out1_m, out2_m, out3_m;                 \
+    uint8_t *pblk_8x4_m = (uint8_t *)(pdst);                 \
+                                                             \
+    out0_m = __msa_copy_u_d((v2i64)in0, 0);                  \
+    out1_m = __msa_copy_u_d((v2i64)in0, 1);                  \
+    out2_m = __msa_copy_u_d((v2i64)in1, 0);                  \
+    out3_m = __msa_copy_u_d((v2i64)in1, 1);                  \
+                                                             \
+    SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride); \
+  }
 
 /* Description : average with rounding (in0 + in1 + 1) / 2.
    Arguments   : Inputs  - in0, in1, in2, in3,
@@ -584,17 +604,19 @@
                  each unsigned byte element from 'in1' vector. Then the average
                  with rounding is calculated and written to 'out0'
 */
-#define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1);    \
-  out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3);    \
-}
+#define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1)   \
+  {                                                       \
+    out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \
+    out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
+  }
 #define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
 
-#define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                 out0, out1, out2, out3) {                       \
-  AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1)                \
-  AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3)                \
-}
+#define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                 out2, out3)                                                \
+  {                                                                         \
+    AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1)                         \
+    AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3)                         \
+  }
 #define AVER_UB4_UB(...) AVER_UB4(v16u8, __VA_ARGS__)
 
 /* Description : Immediate number of elements to slide with zero
@@ -604,18 +626,20 @@
    Details     : Byte elements from 'zero_m' vector are slid into 'in0' by
                  value specified in the 'slide_val'
 */
-#define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) {          \
-  v16i8 zero_m = { 0 };                                              \
-  out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val);  \
-  out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val);  \
-}
+#define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val)             \
+  {                                                                   \
+    v16i8 zero_m = { 0 };                                             \
+    out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \
+    out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \
+  }
 #define SLDI_B2_0_SW(...) SLDI_B2_0(v4i32, __VA_ARGS__)
 
-#define SLDI_B4_0(RTYPE, in0, in1, in2, in3,            \
-                  out0, out1, out2, out3, slide_val) {  \
-  SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val);    \
-  SLDI_B2_0(RTYPE, in2, in3, out2, out3, slide_val);    \
-}
+#define SLDI_B4_0(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3, \
+                  slide_val)                                         \
+  {                                                                  \
+    SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val);               \
+    SLDI_B2_0(RTYPE, in2, in3, out2, out3, slide_val);               \
+  }
 #define SLDI_B4_0_UB(...) SLDI_B4_0(v16u8, __VA_ARGS__)
 
 /* Description : Immediate number of elements to slide
@@ -625,18 +649,20 @@
    Details     : Byte elements from 'in0_0' vector are slid into 'in1_0' by
                  value specified in the 'slide_val'
 */
-#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) {  \
-  out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val);         \
-  out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val);         \
-}
+#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \
+  {                                                                       \
+    out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val);    \
+    out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val);    \
+  }
 #define SLDI_B2_UB(...) SLDI_B2(v16u8, __VA_ARGS__)
 #define SLDI_B2_SH(...) SLDI_B2(v8i16, __VA_ARGS__)
 
-#define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2,      \
-                out0, out1, out2, slide_val) {                        \
-  SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val)   \
-  out2 = (RTYPE)__msa_sldi_b((v16i8)in0_2, (v16i8)in1_2, slide_val);  \
-}
+#define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2, out0, out1, \
+                out2, slide_val)                                             \
+  {                                                                          \
+    SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val)        \
+    out2 = (RTYPE)__msa_sldi_b((v16i8)in0_2, (v16i8)in1_2, slide_val);       \
+  }
 #define SLDI_B3_SB(...) SLDI_B3(v16i8, __VA_ARGS__)
 #define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__)
 
@@ -647,19 +673,21 @@
    Details     : Byte elements from 'in0' & 'in1' are copied selectively to
                  'out0' as per control vector 'mask0'
 */
-#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) {  \
-  out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0);     \
-  out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2);     \
-}
+#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1)  \
+  {                                                                   \
+    out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \
+    out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \
+  }
 #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
 #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
 #define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
 
-#define VSHF_B4(RTYPE, in0, in1, mask0, mask1, mask2, mask3,     \
-                out0, out1, out2, out3) {                        \
-  VSHF_B2(RTYPE, in0, in1, in0, in1, mask0, mask1, out0, out1);  \
-  VSHF_B2(RTYPE, in0, in1, in0, in1, mask2, mask3, out2, out3);  \
-}
+#define VSHF_B4(RTYPE, in0, in1, mask0, mask1, mask2, mask3, out0, out1, out2, \
+                out3)                                                          \
+  {                                                                            \
+    VSHF_B2(RTYPE, in0, in1, in0, in1, mask0, mask1, out0, out1);              \
+    VSHF_B2(RTYPE, in0, in1, in0, in1, mask2, mask3, out2, out3);              \
+  }
 #define VSHF_B4_SB(...) VSHF_B4(v16i8, __VA_ARGS__)
 #define VSHF_B4_SH(...) VSHF_B4(v8i16, __VA_ARGS__)
 
@@ -673,18 +701,19 @@
                  The multiplication result of adjacent odd-even elements
                  are added together and written to the 'out0' vector
 */
-#define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) {  \
-  out0 = (RTYPE)__msa_dotp_u_h((v16u8)mult0, (v16u8)cnst0);        \
-  out1 = (RTYPE)__msa_dotp_u_h((v16u8)mult1, (v16u8)cnst1);        \
-}
+#define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+  {                                                             \
+    out0 = (RTYPE)__msa_dotp_u_h((v16u8)mult0, (v16u8)cnst0);   \
+    out1 = (RTYPE)__msa_dotp_u_h((v16u8)mult1, (v16u8)cnst1);   \
+  }
 #define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__)
 
-#define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3,         \
-                 cnst0, cnst1, cnst2, cnst3,                \
-                 out0, out1, out2, out3) {                  \
-  DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);  \
-  DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);  \
-}
+#define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
+                 cnst3, out0, out1, out2, out3)                          \
+  {                                                                      \
+    DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);             \
+    DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);             \
+  }
 #define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__)
 
 /* Description : Dot product of byte vector elements
@@ -697,17 +726,19 @@
                  The multiplication result of adjacent odd-even elements
                  are added together and written to the 'out0' vector
 */
-#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) {  \
-  out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0);        \
-  out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1);        \
-}
+#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+  {                                                             \
+    out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0);   \
+    out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1);   \
+  }
 #define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
 
-#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3,                     \
-                 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) {  \
-  DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);              \
-  DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);              \
-}
+#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
+                 cnst3, out0, out1, out2, out3)                          \
+  {                                                                      \
+    DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);             \
+    DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);             \
+  }
 #define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__)
 
 /* Description : Dot product of halfword vector elements
@@ -720,18 +751,19 @@
                  The multiplication result of adjacent odd-even elements
                  are added together and written to the 'out0' vector
 */
-#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) {  \
-  out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0);        \
-  out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1);        \
-}
+#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+  {                                                             \
+    out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0);   \
+    out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1);   \
+  }
 #define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
 
-#define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3,         \
-                 cnst0, cnst1, cnst2, cnst3,                \
-                 out0, out1, out2, out3) {                  \
-  DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);  \
-  DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);  \
-}
+#define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
+                 cnst3, out0, out1, out2, out3)                          \
+  {                                                                      \
+    DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);             \
+    DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);             \
+  }
 #define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__)
 
 /* Description : Dot product of word vector elements
@@ -744,10 +776,11 @@
                  The multiplication result of adjacent odd-even elements
                  are added together and written to the 'out0' vector
 */
-#define DOTP_SW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) {  \
-  out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0);        \
-  out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1);        \
-}
+#define DOTP_SW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+  {                                                             \
+    out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0);   \
+    out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1);   \
+  }
 #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__)
 
 /* Description : Dot product & addition of byte vector elements
@@ -760,17 +793,19 @@
                  The multiplication result of adjacent odd-even elements
                  are added to the 'out0' vector
 */
-#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) {         \
-  out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0);  \
-  out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1);  \
-}
+#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1)            \
+  {                                                                         \
+    out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0); \
+    out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1); \
+  }
 #define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__)
 
-#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3,                     \
-                  cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) {  \
-  DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);              \
-  DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);              \
-}
+#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
+                  cnst3, out0, out1, out2, out3)                          \
+  {                                                                       \
+    DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1);             \
+    DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3);             \
+  }
 #define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__)
 
 /* Description : Dot product & addition of halfword vector elements
@@ -783,10 +818,11 @@
                  The multiplication result of adjacent odd-even elements
                  are added to the 'out0' vector
 */
-#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) {         \
-  out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0);  \
-  out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1);  \
-}
+#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1)            \
+  {                                                                         \
+    out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \
+    out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \
+  }
 #define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
 
 /* Description : Dot product & addition of double word vector elements
@@ -799,10 +835,11 @@
                  The multiplication result of adjacent odd-even elements
                  are added to the 'out0' vector
 */
-#define DPADD_SD2(RTYPE, mult0, mult1, out0, out1) {                       \
-  out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0);  \
-  out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1);  \
-}
+#define DPADD_SD2(RTYPE, mult0, mult1, out0, out1)                          \
+  {                                                                         \
+    out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \
+    out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \
+  }
 #define DPADD_SD2_SD(...) DPADD_SD2(v2i64, __VA_ARGS__)
 
 /* Description : Minimum values between unsigned elements of
@@ -813,16 +850,18 @@
    Details     : Minimum of unsigned halfword element values from 'in0' and
                  'min_vec' are written to output vector 'in0'
 */
-#define MIN_UH2(RTYPE, in0, in1, min_vec) {         \
-  in0 = (RTYPE)__msa_min_u_h((v8u16)in0, min_vec);  \
-  in1 = (RTYPE)__msa_min_u_h((v8u16)in1, min_vec);  \
-}
+#define MIN_UH2(RTYPE, in0, in1, min_vec)            \
+  {                                                  \
+    in0 = (RTYPE)__msa_min_u_h((v8u16)in0, min_vec); \
+    in1 = (RTYPE)__msa_min_u_h((v8u16)in1, min_vec); \
+  }
 #define MIN_UH2_UH(...) MIN_UH2(v8u16, __VA_ARGS__)
 
-#define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) {  \
-  MIN_UH2(RTYPE, in0, in1, min_vec);                   \
-  MIN_UH2(RTYPE, in2, in3, min_vec);                   \
-}
+#define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) \
+  {                                                 \
+    MIN_UH2(RTYPE, in0, in1, min_vec);              \
+    MIN_UH2(RTYPE, in2, in3, min_vec);              \
+  }
 #define MIN_UH4_UH(...) MIN_UH4(v8u16, __VA_ARGS__)
 
 /* Description : Clips all signed halfword elements of input vector
@@ -831,22 +870,25 @@
                  Output - out_m
                  Return Type - signed halfword
 */
-#define CLIP_SH_0_255(in) ({                          \
-  v8i16 max_m = __msa_ldi_h(255);                     \
-  v8i16 out_m;                                        \
-                                                      \
-  out_m = __msa_maxi_s_h((v8i16)in, 0);               \
-  out_m = __msa_min_s_h((v8i16)max_m, (v8i16)out_m);  \
-  out_m;                                              \
-})
-#define CLIP_SH2_0_255(in0, in1) {  \
-  in0 = CLIP_SH_0_255(in0);         \
-  in1 = CLIP_SH_0_255(in1);         \
-}
-#define CLIP_SH4_0_255(in0, in1, in2, in3) {  \
-  CLIP_SH2_0_255(in0, in1);                   \
-  CLIP_SH2_0_255(in2, in3);                   \
-}
+#define CLIP_SH_0_255(in)                              \
+  ({                                                   \
+    v8i16 max_m = __msa_ldi_h(255);                    \
+    v8i16 out_m;                                       \
+                                                       \
+    out_m = __msa_maxi_s_h((v8i16)in, 0);              \
+    out_m = __msa_min_s_h((v8i16)max_m, (v8i16)out_m); \
+    out_m;                                             \
+  })
+#define CLIP_SH2_0_255(in0, in1) \
+  {                              \
+    in0 = CLIP_SH_0_255(in0);    \
+    in1 = CLIP_SH_0_255(in1);    \
+  }
+#define CLIP_SH4_0_255(in0, in1, in2, in3) \
+  {                                        \
+    CLIP_SH2_0_255(in0, in1);              \
+    CLIP_SH2_0_255(in2, in3);              \
+  }
 
 /* Description : Horizontal addition of 4 signed word elements of input vector
    Arguments   : Input  - in       (signed word vector)
@@ -855,16 +897,17 @@
    Details     : 4 signed word elements of 'in' vector are added together and
                  the resulting integer sum is returned
 */
-#define HADD_SW_S32(in) ({                        \
-  v2i64 res0_m, res1_m;                           \
-  int32_t sum_m;                                  \
-                                                  \
-  res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in);  \
-  res1_m = __msa_splati_d(res0_m, 1);             \
-  res0_m = res0_m + res1_m;                       \
-  sum_m = __msa_copy_s_w((v4i32)res0_m, 0);       \
-  sum_m;                                          \
-})
+#define HADD_SW_S32(in)                            \
+  ({                                               \
+    v2i64 res0_m, res1_m;                          \
+    int32_t sum_m;                                 \
+                                                   \
+    res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); \
+    res1_m = __msa_splati_d(res0_m, 1);            \
+    res0_m = res0_m + res1_m;                      \
+    sum_m = __msa_copy_s_w((v4i32)res0_m, 0);      \
+    sum_m;                                         \
+  })
 
 /* Description : Horizontal addition of 8 unsigned halfword elements
    Arguments   : Inputs  - in       (unsigned halfword vector)
@@ -873,18 +916,19 @@
    Details     : 8 unsigned halfword elements of input vector are added
                  together and the resulting integer sum is returned
 */
-#define HADD_UH_U32(in) ({                           \
-  v4u32 res_m;                                       \
-  v2u64 res0_m, res1_m;                              \
-  uint32_t sum_m;                                    \
-                                                     \
-  res_m = __msa_hadd_u_w((v8u16)in, (v8u16)in);      \
-  res0_m = __msa_hadd_u_d(res_m, res_m);             \
-  res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1);  \
-  res0_m = res0_m + res1_m;                          \
-  sum_m = __msa_copy_u_w((v4i32)res0_m, 0);          \
-  sum_m;                                             \
-})
+#define HADD_UH_U32(in)                               \
+  ({                                                  \
+    v4u32 res_m;                                      \
+    v2u64 res0_m, res1_m;                             \
+    uint32_t sum_m;                                   \
+                                                      \
+    res_m = __msa_hadd_u_w((v8u16)in, (v8u16)in);     \
+    res0_m = __msa_hadd_u_d(res_m, res_m);            \
+    res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); \
+    res0_m = res0_m + res1_m;                         \
+    sum_m = __msa_copy_u_w((v4i32)res0_m, 0);         \
+    sum_m;                                            \
+  })
 
 /* Description : Horizontal addition of unsigned byte vector elements
    Arguments   : Inputs  - in0, in1
@@ -894,16 +938,18 @@
                  even unsigned byte element from 'in0' (pairwise) and the
                  halfword result is written to 'out0'
 */
-#define HADD_UB2(RTYPE, in0, in1, out0, out1) {          \
-  out0 = (RTYPE)__msa_hadd_u_h((v16u8)in0, (v16u8)in0);  \
-  out1 = (RTYPE)__msa_hadd_u_h((v16u8)in1, (v16u8)in1);  \
-}
+#define HADD_UB2(RTYPE, in0, in1, out0, out1)             \
+  {                                                       \
+    out0 = (RTYPE)__msa_hadd_u_h((v16u8)in0, (v16u8)in0); \
+    out1 = (RTYPE)__msa_hadd_u_h((v16u8)in1, (v16u8)in1); \
+  }
 #define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__)
 
-#define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) {  \
-  HADD_UB2(RTYPE, in0, in1, out0, out1);                               \
-  HADD_UB2(RTYPE, in2, in3, out2, out3);                               \
-}
+#define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
+  {                                                                 \
+    HADD_UB2(RTYPE, in0, in1, out0, out1);                          \
+    HADD_UB2(RTYPE, in2, in3, out2, out3);                          \
+  }
 #define HADD_UB4_UH(...) HADD_UB4(v8u16, __VA_ARGS__)
 
 /* Description : Horizontal subtraction of unsigned byte vector elements
@@ -914,10 +960,11 @@
                  even unsigned byte element from 'in0' (pairwise) and the
                  halfword result is written to 'out0'
 */
-#define HSUB_UB2(RTYPE, in0, in1, out0, out1) {          \
-  out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0);  \
-  out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1);  \
-}
+#define HSUB_UB2(RTYPE, in0, in1, out0, out1)             \
+  {                                                       \
+    out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \
+    out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \
+  }
 #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
 
 /* Description : SAD (Sum of Absolute Difference)
@@ -928,18 +975,19 @@
                  'ref0' is calculated and preserved in 'diff0'. Then even-odd
                  pairs are added together to generate 8 halfword results.
 */
-#define SAD_UB2_UH(in0, in1, ref0, ref1) ({                 \
-  v16u8 diff0_m, diff1_m;                                   \
-  v8u16 sad_m = { 0 };                                      \
-                                                            \
-  diff0_m = __msa_asub_u_b((v16u8)in0, (v16u8)ref0);        \
-  diff1_m = __msa_asub_u_b((v16u8)in1, (v16u8)ref1);        \
-                                                            \
-  sad_m += __msa_hadd_u_h((v16u8)diff0_m, (v16u8)diff0_m);  \
-  sad_m += __msa_hadd_u_h((v16u8)diff1_m, (v16u8)diff1_m);  \
-                                                            \
-  sad_m;                                                    \
-})
+#define SAD_UB2_UH(in0, in1, ref0, ref1)                     \
+  ({                                                         \
+    v16u8 diff0_m, diff1_m;                                  \
+    v8u16 sad_m = { 0 };                                     \
+                                                             \
+    diff0_m = __msa_asub_u_b((v16u8)in0, (v16u8)ref0);       \
+    diff1_m = __msa_asub_u_b((v16u8)in1, (v16u8)ref1);       \
+                                                             \
+    sad_m += __msa_hadd_u_h((v16u8)diff0_m, (v16u8)diff0_m); \
+    sad_m += __msa_hadd_u_h((v16u8)diff1_m, (v16u8)diff1_m); \
+                                                             \
+    sad_m;                                                   \
+  })
 
 /* Description : Horizontal subtraction of signed halfword vector elements
    Arguments   : Inputs  - in0, in1
@@ -949,10 +997,11 @@
                  even signed halfword element from 'in0' (pairwise) and the
                  word result is written to 'out0'
 */
-#define HSUB_UH2(RTYPE, in0, in1, out0, out1) {          \
-  out0 = (RTYPE)__msa_hsub_s_w((v8i16)in0, (v8i16)in0);  \
-  out1 = (RTYPE)__msa_hsub_s_w((v8i16)in1, (v8i16)in1);  \
-}
+#define HSUB_UH2(RTYPE, in0, in1, out0, out1)             \
+  {                                                       \
+    out0 = (RTYPE)__msa_hsub_s_w((v8i16)in0, (v8i16)in0); \
+    out1 = (RTYPE)__msa_hsub_s_w((v8i16)in1, (v8i16)in1); \
+  }
 #define HSUB_UH2_SW(...) HSUB_UH2(v4i32, __VA_ARGS__)
 
 /* Description : Set element n input vector to GPR value
@@ -961,25 +1010,28 @@
                  Return Type - as per RTYPE
    Details     : Set element 0 in vector 'out' to value specified in 'in0'
 */
-#define INSERT_W2(RTYPE, in0, in1, out) {           \
-  out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0);  \
-  out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1);  \
-}
+#define INSERT_W2(RTYPE, in0, in1, out)              \
+  {                                                  \
+    out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
+    out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
+  }
 #define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
 
-#define INSERT_W4(RTYPE, in0, in1, in2, in3, out) {  \
-  out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0);   \
-  out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1);   \
-  out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2);   \
-  out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3);   \
-}
+#define INSERT_W4(RTYPE, in0, in1, in2, in3, out)    \
+  {                                                  \
+    out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
+    out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
+    out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2); \
+    out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3); \
+  }
 #define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
 #define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
 
-#define INSERT_D2(RTYPE, in0, in1, out) {           \
-  out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0);  \
-  out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1);  \
-}
+#define INSERT_D2(RTYPE, in0, in1, out)              \
+  {                                                  \
+    out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \
+    out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \
+  }
 #define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__)
 #define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
 
@@ -990,10 +1042,11 @@
    Details     : Even byte elements of 'in0' and 'in1' are interleaved
                  and written to 'out0'
 */
-#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0);     \
-  out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2);     \
-}
+#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                      \
+    out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \
+    out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \
+  }
 #define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
 #define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
 
@@ -1004,10 +1057,11 @@
    Details     : Even halfword elements of 'in0' and 'in1' are interleaved
                  and written to 'out0'
 */
-#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0);     \
-  out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2);     \
-}
+#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                      \
+    out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
+    out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \
+  }
 #define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
 #define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
 #define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
@@ -1019,10 +1073,11 @@
    Details     : Even word elements of 'in0' and 'in1' are interleaved
                  and written to 'out0'
 */
-#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0);     \
-  out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2);     \
-}
+#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                      \
+    out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
+    out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \
+  }
 #define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
 
 /* Description : Interleave even double word elements from vectors
@@ -1032,10 +1087,11 @@
    Details     : Even double word elements of 'in0' and 'in1' are interleaved
                  and written to 'out0'
 */
-#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0);     \
-  out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2);     \
-}
+#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                      \
+    out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \
+    out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \
+  }
 #define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
 
 /* Description : Interleave left half of byte elements from vectors
@@ -1045,20 +1101,22 @@
    Details     : Left half of byte elements of 'in0' and 'in1' are interleaved
                  and written to 'out0'.
 */
-#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1);     \
-  out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3);     \
-}
+#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                     \
+    out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
+    out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \
+  }
 #define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
 #define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
 #define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__)
 #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
 
-#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                out0, out1, out2, out3) {                       \
-  ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1);               \
-  ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3);               \
-}
+#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                out2, out3)                                                \
+  {                                                                        \
+    ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1);                        \
+    ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3);                        \
+  }
 #define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__)
 #define ILVL_B4_UH(...) ILVL_B4(v8u16, __VA_ARGS__)
 
@@ -1069,10 +1127,11 @@
    Details     : Left half of halfword elements of 'in0' and 'in1' are
                  interleaved and written to 'out0'.
 */
-#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1);     \
-  out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3);     \
-}
+#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                     \
+    out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
+    out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3); \
+  }
 #define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__)
 
 /* Description : Interleave left half of word elements from vectors
@@ -1082,10 +1141,11 @@
    Details     : Left half of word elements of 'in0' and 'in1' are interleaved
                  and written to 'out0'.
 */
-#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1);     \
-  out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3);     \
-}
+#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                     \
+    out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
+    out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3); \
+  }
 #define ILVL_W2_UB(...) ILVL_W2(v16u8, __VA_ARGS__)
 #define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__)
 
@@ -1096,33 +1156,36 @@
    Details     : Right half of byte elements of 'in0' and 'in1' are interleaved
                  and written to out0.
 */
-#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1);     \
-  out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3);     \
-}
+#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                     \
+    out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
+    out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \
+  }
 #define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
 #define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
 #define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__)
 #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
 
-#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                out0, out1, out2, out3) {                       \
-  ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1);               \
-  ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3);               \
-}
+#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                out2, out3)                                                \
+  {                                                                        \
+    ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1);                        \
+    ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3);                        \
+  }
 #define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
 #define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
 #define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
 #define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
 
-#define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,     \
-                in8, in9, in10, in11, in12, in13, in14, in15,      \
-                out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,           \
-          out0, out1, out2, out3);                                 \
-  ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15,     \
-          out4, out5, out6, out7);                                 \
-}
+#define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \
+                in11, in12, in13, in14, in15, out0, out1, out2, out3, out4,    \
+                out5, out6, out7)                                              \
+  {                                                                            \
+    ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,   \
+            out3);                                                             \
+    ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, out4, out5,   \
+            out6, out7);                                                       \
+  }
 #define ILVR_B8_UH(...) ILVR_B8(v8u16, __VA_ARGS__)
 
 /* Description : Interleave right half of halfword elements from vectors
@@ -1132,31 +1195,35 @@
    Details     : Right half of halfword elements of 'in0' and 'in1' are
                  interleaved and written to 'out0'.
 */
-#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1);     \
-  out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3);     \
-}
+#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                     \
+    out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
+    out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \
+  }
 #define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
 
-#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                out0, out1, out2, out3) {                       \
-  ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1);               \
-  ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3);               \
-}
+#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                out2, out3)                                                \
+  {                                                                        \
+    ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1);                        \
+    ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3);                        \
+  }
 #define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
 
-#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1);     \
-  out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3);     \
-}
+#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                     \
+    out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
+    out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3); \
+  }
 #define ILVR_W2_UB(...) ILVR_W2(v16u8, __VA_ARGS__)
 #define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__)
 
-#define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                out0, out1, out2, out3) {                       \
-  ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1);               \
-  ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3);               \
-}
+#define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                out2, out3)                                                \
+  {                                                                        \
+    ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1);                        \
+    ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3);                        \
+  }
 #define ILVR_W4_UB(...) ILVR_W4(v16u8, __VA_ARGS__)
 
 /* Description : Interleave right half of double word elements from vectors
@@ -1166,25 +1233,28 @@
    Details     : Right half of double word elements of 'in0' and 'in1' are
                  interleaved and written to 'out0'.
 */
-#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) {   \
-  out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1));  \
-  out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3));  \
-}
+#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1)      \
+  {                                                         \
+    out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1)); \
+    out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3)); \
+  }
 #define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
 #define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
 #define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
 
-#define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) {  \
-  ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1);                         \
-  out2 = (RTYPE)__msa_ilvr_d((v2i64)(in4), (v2i64)(in5));                 \
-}
+#define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
+  {                                                                    \
+    ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1);                    \
+    out2 = (RTYPE)__msa_ilvr_d((v2i64)(in4), (v2i64)(in5));            \
+  }
 #define ILVR_D3_SB(...) ILVR_D3(v16i8, __VA_ARGS__)
 
-#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                out0, out1, out2, out3) {                       \
-  ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1);               \
-  ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3);               \
-}
+#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                out2, out3)                                                \
+  {                                                                        \
+    ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1);                        \
+    ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3);                        \
+  }
 #define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
 #define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
 
@@ -1195,26 +1265,29 @@
    Details     : Right half of byte elements from 'in0' and 'in1' are
                  interleaved and written to 'out0'
 */
-#define ILVRL_B2(RTYPE, in0, in1, out0, out1) {        \
-  out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1);  \
-  out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1);  \
-}
+#define ILVRL_B2(RTYPE, in0, in1, out0, out1)           \
+  {                                                     \
+    out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
+    out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
+  }
 #define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
 #define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
 #define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
 #define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
 
-#define ILVRL_H2(RTYPE, in0, in1, out0, out1) {        \
-  out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1);  \
-  out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1);  \
-}
+#define ILVRL_H2(RTYPE, in0, in1, out0, out1)           \
+  {                                                     \
+    out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
+    out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
+  }
 #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
 #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
 
-#define ILVRL_W2(RTYPE, in0, in1, out0, out1) {        \
-  out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1);  \
-  out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1);  \
-}
+#define ILVRL_W2(RTYPE, in0, in1, out0, out1)           \
+  {                                                     \
+    out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
+    out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
+  }
 #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
 #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
 
@@ -1228,16 +1301,18 @@
                  value generated with (sat_val + 1) bit range.
                  The results are written in place
 */
-#define SAT_UH2(RTYPE, in0, in1, sat_val) {         \
-  in0 = (RTYPE)__msa_sat_u_h((v8u16)in0, sat_val);  \
-  in1 = (RTYPE)__msa_sat_u_h((v8u16)in1, sat_val);  \
-}
+#define SAT_UH2(RTYPE, in0, in1, sat_val)            \
+  {                                                  \
+    in0 = (RTYPE)__msa_sat_u_h((v8u16)in0, sat_val); \
+    in1 = (RTYPE)__msa_sat_u_h((v8u16)in1, sat_val); \
+  }
 #define SAT_UH2_UH(...) SAT_UH2(v8u16, __VA_ARGS__)
 
-#define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) {  \
-  SAT_UH2(RTYPE, in0, in1, sat_val);                   \
-  SAT_UH2(RTYPE, in2, in3, sat_val)                    \
-}
+#define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) \
+  {                                                 \
+    SAT_UH2(RTYPE, in0, in1, sat_val);              \
+    SAT_UH2(RTYPE, in2, in3, sat_val)               \
+  }
 #define SAT_UH4_UH(...) SAT_UH4(v8u16, __VA_ARGS__)
 
 /* Description : Saturate the halfword element values to the max
@@ -1250,16 +1325,18 @@
                  value generated with (sat_val + 1) bit range
                  The results are written in place
 */
-#define SAT_SH2(RTYPE, in0, in1, sat_val) {         \
-  in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val);  \
-  in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val);  \
-}
+#define SAT_SH2(RTYPE, in0, in1, sat_val)            \
+  {                                                  \
+    in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val); \
+    in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val); \
+  }
 #define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__)
 
-#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) {  \
-  SAT_SH2(RTYPE, in0, in1, sat_val);                   \
-  SAT_SH2(RTYPE, in2, in3, sat_val);                   \
-}
+#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \
+  {                                                 \
+    SAT_SH2(RTYPE, in0, in1, sat_val);              \
+    SAT_SH2(RTYPE, in2, in3, sat_val);              \
+  }
 #define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__)
 
 /* Description : Indexed halfword element values are replicated to all
@@ -1271,17 +1348,18 @@
                   elements in 'out0' vector
                   Valid index range for halfword operation is 0-7
 */
-#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) {  \
-  out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0);        \
-  out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1);        \
-}
+#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \
+  {                                                  \
+    out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0);   \
+    out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1);   \
+  }
 #define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__)
 
-#define SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3,  \
-                  out0, out1, out2, out3) {           \
-  SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1);       \
-  SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3);       \
-}
+#define SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3, out0, out1, out2, out3) \
+  {                                                                          \
+    SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1);                            \
+    SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3);                            \
+  }
 #define SPLATI_H4_SB(...) SPLATI_H4(v16i8, __VA_ARGS__)
 #define SPLATI_H4_SH(...) SPLATI_H4(v8i16, __VA_ARGS__)
 
@@ -1293,19 +1371,21 @@
                  'out0' & even byte elements of 'in1' are copied to the right
                  half of 'out0'.
 */
-#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1);     \
-  out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3);     \
-}
+#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                      \
+    out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
+    out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \
+  }
 #define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
 #define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
 #define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
 
-#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                 out0, out1, out2, out3) {                       \
-  PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1);               \
-  PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3);               \
-}
+#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                 out2, out3)                                                \
+  {                                                                         \
+    PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1);                        \
+    PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3);                        \
+  }
 #define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
 #define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
 #define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
@@ -1318,18 +1398,20 @@
                  'out0' & even halfword elements of 'in1' are copied to the
                  right half of 'out0'.
 */
-#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1);     \
-  out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3);     \
-}
+#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                      \
+    out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \
+    out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \
+  }
 #define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
 #define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__)
 
-#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                 out0, out1, out2, out3) {                       \
-  PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1);               \
-  PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3);               \
-}
+#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                 out2, out3)                                                \
+  {                                                                         \
+    PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1);                        \
+    PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3);                        \
+  }
 #define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__)
 
 /* Description : Pack even double word elements of vector pairs
@@ -1340,18 +1422,20 @@
                  'out0' & even double elements of 'in1' are copied to the right
                  half of 'out0'.
 */
-#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1);     \
-  out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3);     \
-}
+#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1)  \
+  {                                                      \
+    out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1); \
+    out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3); \
+  }
 #define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__)
 #define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__)
 
-#define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                 out0, out1, out2, out3) {                       \
-  PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1);               \
-  PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3);               \
-}
+#define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                 out2, out3)                                                \
+  {                                                                         \
+    PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1);                        \
+    PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3);                        \
+  }
 #define PCKEV_D4_UB(...) PCKEV_D4(v16u8, __VA_ARGS__)
 
 /* Description : Each byte element is logically xor'ed with immediate 128
@@ -1361,30 +1445,34 @@
    Details     : Each unsigned byte element from input vector 'in0' is
                  logically xor'ed with 128 and the result is stored in-place.
 */
-#define XORI_B2_128(RTYPE, in0, in1) {         \
-  in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128);  \
-  in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128);  \
-}
+#define XORI_B2_128(RTYPE, in0, in1)            \
+  {                                             \
+    in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128); \
+    in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128); \
+  }
 #define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__)
 #define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__)
 
-#define XORI_B3_128(RTYPE, in0, in1, in2) {    \
-  XORI_B2_128(RTYPE, in0, in1);                \
-  in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128);  \
-}
+#define XORI_B3_128(RTYPE, in0, in1, in2)       \
+  {                                             \
+    XORI_B2_128(RTYPE, in0, in1);               \
+    in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128); \
+  }
 #define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__)
 
-#define XORI_B4_128(RTYPE, in0, in1, in2, in3) {  \
-  XORI_B2_128(RTYPE, in0, in1);                   \
-  XORI_B2_128(RTYPE, in2, in3);                   \
-}
+#define XORI_B4_128(RTYPE, in0, in1, in2, in3) \
+  {                                            \
+    XORI_B2_128(RTYPE, in0, in1);              \
+    XORI_B2_128(RTYPE, in2, in3);              \
+  }
 #define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__)
 #define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__)
 
-#define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) {  \
-  XORI_B4_128(RTYPE, in0, in1, in2, in3);                        \
-  XORI_B3_128(RTYPE, in4, in5, in6);                             \
-}
+#define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) \
+  {                                                           \
+    XORI_B4_128(RTYPE, in0, in1, in2, in3);                   \
+    XORI_B3_128(RTYPE, in4, in5, in6);                        \
+  }
 #define XORI_B7_128_SB(...) XORI_B7_128(v16i8, __VA_ARGS__)
 
 /* Description : Average of signed halfword elements -> (a + b) / 2
@@ -1396,13 +1484,14 @@
                  in one extra bit in the result. The result is then divided by
                  2 and written to 'out0'
 */
-#define AVE_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                out0, out1, out2, out3) {                       \
-  out0 = (RTYPE)__msa_ave_s_h((v8i16)in0, (v8i16)in1);          \
-  out1 = (RTYPE)__msa_ave_s_h((v8i16)in2, (v8i16)in3);          \
-  out2 = (RTYPE)__msa_ave_s_h((v8i16)in4, (v8i16)in5);          \
-  out3 = (RTYPE)__msa_ave_s_h((v8i16)in6, (v8i16)in7);          \
-}
+#define AVE_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                out2, out3)                                                \
+  {                                                                        \
+    out0 = (RTYPE)__msa_ave_s_h((v8i16)in0, (v8i16)in1);                   \
+    out1 = (RTYPE)__msa_ave_s_h((v8i16)in2, (v8i16)in3);                   \
+    out2 = (RTYPE)__msa_ave_s_h((v8i16)in4, (v8i16)in5);                   \
+    out3 = (RTYPE)__msa_ave_s_h((v8i16)in6, (v8i16)in7);                   \
+  }
 #define AVE_SH4_SH(...) AVE_SH4(v8i16, __VA_ARGS__)
 
 /* Description : Addition of signed halfword elements and signed saturation
@@ -1413,17 +1502,19 @@
                  halfword elements of 'in1'. The result is then signed saturated
                  between halfword data type range
 */
-#define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1) {  \
-  out0 = (RTYPE)__msa_adds_s_h((v8i16)in0, (v8i16)in1);    \
-  out1 = (RTYPE)__msa_adds_s_h((v8i16)in2, (v8i16)in3);    \
-}
+#define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1)   \
+  {                                                       \
+    out0 = (RTYPE)__msa_adds_s_h((v8i16)in0, (v8i16)in1); \
+    out1 = (RTYPE)__msa_adds_s_h((v8i16)in2, (v8i16)in3); \
+  }
 #define ADDS_SH2_SH(...) ADDS_SH2(v8i16, __VA_ARGS__)
 
-#define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,  \
-                 out0, out1, out2, out3) {                       \
-  ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1);               \
-  ADDS_SH2(RTYPE, in4, in5, in6, in7, out2, out3);               \
-}
+#define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                 out2, out3)                                                \
+  {                                                                         \
+    ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1);                        \
+    ADDS_SH2(RTYPE, in4, in5, in6, in7, out2, out3);                        \
+  }
 #define ADDS_SH4_SH(...) ADDS_SH4(v8i16, __VA_ARGS__)
 
 /* Description : Shift left all elements of vector (generic for all data types)
@@ -1433,12 +1524,13 @@
    Details     : Each element of vector 'in0' is left shifted by 'shift' and
                  the result is written in-place.
 */
-#define SLLI_4V(in0, in1, in2, in3, shift) {  \
-  in0 = in0 << shift;                         \
-  in1 = in1 << shift;                         \
-  in2 = in2 << shift;                         \
-  in3 = in3 << shift;                         \
-}
+#define SLLI_4V(in0, in1, in2, in3, shift) \
+  {                                        \
+    in0 = in0 << shift;                    \
+    in1 = in1 << shift;                    \
+    in2 = in2 << shift;                    \
+    in3 = in3 << shift;                    \
+  }
 
 /* Description : Arithmetic shift right all elements of vector
                  (generic for all data types)
@@ -1448,12 +1540,13 @@
    Details     : Each element of vector 'in0' is right shifted by 'shift' and
                  the result is written in-place. 'shift' is a GP variable.
 */
-#define SRA_4V(in0, in1, in2, in3, shift) {  \
-  in0 = in0 >> shift;                        \
-  in1 = in1 >> shift;                        \
-  in2 = in2 >> shift;                        \
-  in3 = in3 >> shift;                        \
-}
+#define SRA_4V(in0, in1, in2, in3, shift) \
+  {                                       \
+    in0 = in0 >> shift;                   \
+    in1 = in1 >> shift;                   \
+    in2 = in2 >> shift;                   \
+    in3 = in3 >> shift;                   \
+  }
 
 /* Description : Shift right arithmetic rounded words
    Arguments   : Inputs  - in0, in1, shift
@@ -1465,15 +1558,17 @@
                  rounding and the result is written in-place.
                  'shift' is a vector.
 */
-#define SRAR_W2(RTYPE, in0, in1, shift) {               \
-  in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift);  \
-  in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift);  \
-}
-
-#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) {  \
-  SRAR_W2(RTYPE, in0, in1, shift)                    \
-  SRAR_W2(RTYPE, in2, in3, shift)                    \
-}
+#define SRAR_W2(RTYPE, in0, in1, shift)                  \
+  {                                                      \
+    in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift); \
+    in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift); \
+  }
+
+#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \
+  {                                               \
+    SRAR_W2(RTYPE, in0, in1, shift)               \
+    SRAR_W2(RTYPE, in2, in3, shift)               \
+  }
 #define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__)
 
 /* Description : Shift right arithmetic rounded (immediate)
@@ -1485,30 +1580,34 @@
                  shifted value for rounding and the result is written in-place.
                  'shift' is an immediate value.
 */
-#define SRARI_H2(RTYPE, in0, in1, shift) {        \
-  in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift);  \
-  in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift);  \
-}
+#define SRARI_H2(RTYPE, in0, in1, shift)           \
+  {                                                \
+    in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift); \
+    in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift); \
+  }
 #define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__)
 #define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__)
 
-#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) {  \
-  SRARI_H2(RTYPE, in0, in1, shift);                   \
-  SRARI_H2(RTYPE, in2, in3, shift);                   \
-}
+#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \
+  {                                                \
+    SRARI_H2(RTYPE, in0, in1, shift);              \
+    SRARI_H2(RTYPE, in2, in3, shift);              \
+  }
 #define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__)
 #define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__)
 
-#define SRARI_W2(RTYPE, in0, in1, shift) {        \
-  in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift);  \
-  in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift);  \
-}
+#define SRARI_W2(RTYPE, in0, in1, shift)           \
+  {                                                \
+    in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \
+    in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \
+  }
 #define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__)
 
-#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) {  \
-  SRARI_W2(RTYPE, in0, in1, shift);                   \
-  SRARI_W2(RTYPE, in2, in3, shift);                   \
-}
+#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \
+  {                                                \
+    SRARI_W2(RTYPE, in0, in1, shift);              \
+    SRARI_W2(RTYPE, in2, in3, shift);              \
+  }
 #define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
 
 /* Description : Logical shift right all elements of vector (immediate)
@@ -1518,12 +1617,13 @@
    Details     : Each element of vector 'in0' is right shifted by 'shift' and
                  the result is written in-place. 'shift' is an immediate value.
 */
-#define SRLI_H4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3, shift) {  \
-  out0 = (RTYPE)__msa_srli_h((v8i16)in0, shift);                             \
-  out1 = (RTYPE)__msa_srli_h((v8i16)in1, shift);                             \
-  out2 = (RTYPE)__msa_srli_h((v8i16)in2, shift);                             \
-  out3 = (RTYPE)__msa_srli_h((v8i16)in3, shift);                             \
-}
+#define SRLI_H4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3, shift) \
+  {                                                                       \
+    out0 = (RTYPE)__msa_srli_h((v8i16)in0, shift);                        \
+    out1 = (RTYPE)__msa_srli_h((v8i16)in1, shift);                        \
+    out2 = (RTYPE)__msa_srli_h((v8i16)in2, shift);                        \
+    out3 = (RTYPE)__msa_srli_h((v8i16)in3, shift);                        \
+  }
 #define SRLI_H4_SH(...) SRLI_H4(v8i16, __VA_ARGS__)
 
 /* Description : Multiplication of pairs of vectors
@@ -1532,15 +1632,16 @@
    Details     : Each element from 'in0' is multiplied with elements from 'in1'
                  and the result is written to 'out0'
 */
-#define MUL2(in0, in1, in2, in3, out0, out1) {  \
-  out0 = in0 * in1;                             \
-  out1 = in2 * in3;                             \
-}
-#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7,  \
-             out0, out1, out2, out3) {                \
-  MUL2(in0, in1, in2, in3, out0, out1);               \
-  MUL2(in4, in5, in6, in7, out2, out3);               \
-}
+#define MUL2(in0, in1, in2, in3, out0, out1) \
+  {                                          \
+    out0 = in0 * in1;                        \
+    out1 = in2 * in3;                        \
+  }
+#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
+  {                                                                          \
+    MUL2(in0, in1, in2, in3, out0, out1);                                    \
+    MUL2(in4, in5, in6, in7, out2, out3);                                    \
+  }
 
 /* Description : Addition of 2 pairs of vectors
    Arguments   : Inputs  - in0, in1, in2, in3
@@ -1548,15 +1649,16 @@
    Details     : Each element in 'in0' is added to 'in1' and result is written
                  to 'out0'.
 */
-#define ADD2(in0, in1, in2, in3, out0, out1) {  \
-  out0 = in0 + in1;                             \
-  out1 = in2 + in3;                             \
-}
-#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7,  \
-             out0, out1, out2, out3) {                \
-  ADD2(in0, in1, in2, in3, out0, out1);               \
-  ADD2(in4, in5, in6, in7, out2, out3);               \
-}
+#define ADD2(in0, in1, in2, in3, out0, out1) \
+  {                                          \
+    out0 = in0 + in1;                        \
+    out1 = in2 + in3;                        \
+  }
+#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
+  {                                                                          \
+    ADD2(in0, in1, in2, in3, out0, out1);                                    \
+    ADD2(in4, in5, in6, in7, out2, out3);                                    \
+  }
 
 /* Description : Subtraction of 2 pairs of vectors
    Arguments   : Inputs  - in0, in1, in2, in3
@@ -1564,17 +1666,18 @@
    Details     : Each element in 'in1' is subtracted from 'in0' and result is
                  written to 'out0'.
 */
-#define SUB2(in0, in1, in2, in3, out0, out1) {  \
-  out0 = in0 - in1;                             \
-  out1 = in2 - in3;                             \
-}
-#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7,  \
-             out0, out1, out2, out3) {                \
-  out0 = in0 - in1;                                   \
-  out1 = in2 - in3;                                   \
-  out2 = in4 - in5;                                   \
-  out3 = in6 - in7;                                   \
-}
+#define SUB2(in0, in1, in2, in3, out0, out1) \
+  {                                          \
+    out0 = in0 - in1;                        \
+    out1 = in2 - in3;                        \
+  }
+#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
+  {                                                                          \
+    out0 = in0 - in1;                                                        \
+    out1 = in2 - in3;                                                        \
+    out2 = in4 - in5;                                                        \
+    out3 = in6 - in7;                                                        \
+  }
 
 /* Description : Sign extend halfword elements from right half of the vector
    Arguments   : Input  - in    (halfword vector)
@@ -1584,12 +1687,13 @@
                  extracted and interleaved with same vector 'in0' to generate
                  4 word elements keeping sign intact
 */
-#define UNPCK_R_SH_SW(in, out) {                 \
-  v8i16 sign_m;                                  \
-                                                 \
-  sign_m = __msa_clti_s_h((v8i16)in, 0);         \
-  out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in);  \
-}
+#define UNPCK_R_SH_SW(in, out)                    \
+  {                                               \
+    v8i16 sign_m;                                 \
+                                                  \
+    sign_m = __msa_clti_s_h((v8i16)in, 0);        \
+    out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \
+  }
 
 /* Description : Zero extend unsigned byte elements to halfword elements
    Arguments   : Input   - in          (unsigned byte vector)
@@ -1598,11 +1702,12 @@
    Details     : Zero extended right half of vector is returned in 'out0'
                  Zero extended left half of vector is returned in 'out1'
 */
-#define UNPCK_UB_SH(in, out0, out1) {   \
-  v16i8 zero_m = { 0 };                 \
-                                        \
-  ILVRL_B2_SH(zero_m, in, out0, out1);  \
-}
+#define UNPCK_UB_SH(in, out0, out1)      \
+  {                                      \
+    v16i8 zero_m = { 0 };                \
+                                         \
+    ILVRL_B2_SH(zero_m, in, out0, out1); \
+  }
 
 /* Description : Sign extend halfword elements from input vector and return
                  the result in pair of vectors
@@ -1615,91 +1720,96 @@
                  Then interleaved left with same vector 'in0' to
                  generate 4 signed word elements in 'out1'
 */
-#define UNPCK_SH_SW(in, out0, out1) {    \
-  v8i16 tmp_m;                           \
-                                         \
-  tmp_m = __msa_clti_s_h((v8i16)in, 0);  \
-  ILVRL_H2_SW(tmp_m, in, out0, out1);    \
-}
+#define UNPCK_SH_SW(in, out0, out1)       \
+  {                                       \
+    v8i16 tmp_m;                          \
+                                          \
+    tmp_m = __msa_clti_s_h((v8i16)in, 0); \
+    ILVRL_H2_SW(tmp_m, in, out0, out1);   \
+  }
 
 /* Description : Butterfly of 4 input vectors
    Arguments   : Inputs  - in0, in1, in2, in3
                  Outputs - out0, out1, out2, out3
    Details     : Butterfly operation
 */
-#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) {  \
-  out0 = in0 + in3;                                                \
-  out1 = in1 + in2;                                                \
-                                                                   \
-  out2 = in1 - in2;                                                \
-  out3 = in0 - in3;                                                \
-}
+#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \
+  {                                                             \
+    out0 = in0 + in3;                                           \
+    out1 = in1 + in2;                                           \
+                                                                \
+    out2 = in1 - in2;                                           \
+    out3 = in0 - in3;                                           \
+  }
 
 /* Description : Butterfly of 8 input vectors
    Arguments   : Inputs  - in0 ...  in7
                  Outputs - out0 .. out7
    Details     : Butterfly operation
 */
-#define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7,            \
-                    out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  out0 = in0 + in7;                                                    \
-  out1 = in1 + in6;                                                    \
-  out2 = in2 + in5;                                                    \
-  out3 = in3 + in4;                                                    \
-                                                                       \
-  out4 = in3 - in4;                                                    \
-  out5 = in2 - in5;                                                    \
-  out6 = in1 - in6;                                                    \
-  out7 = in0 - in7;                                                    \
-}
+#define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
+                    out3, out4, out5, out6, out7)                             \
+  {                                                                           \
+    out0 = in0 + in7;                                                         \
+    out1 = in1 + in6;                                                         \
+    out2 = in2 + in5;                                                         \
+    out3 = in3 + in4;                                                         \
+                                                                              \
+    out4 = in3 - in4;                                                         \
+    out5 = in2 - in5;                                                         \
+    out6 = in1 - in6;                                                         \
+    out7 = in0 - in7;                                                         \
+  }
 
 /* Description : Butterfly of 16 input vectors
    Arguments   : Inputs  - in0 ...  in15
                  Outputs - out0 .. out15
    Details     : Butterfly operation
 */
-#define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,                  \
-                     in8, in9,  in10, in11, in12, in13, in14, in15,           \
-                     out0, out1, out2, out3, out4, out5, out6, out7,          \
-                     out8, out9, out10, out11, out12, out13, out14, out15) {  \
-  out0 = in0 + in15;                                                          \
-  out1 = in1 + in14;                                                          \
-  out2 = in2 + in13;                                                          \
-  out3 = in3 + in12;                                                          \
-  out4 = in4 + in11;                                                          \
-  out5 = in5 + in10;                                                          \
-  out6 = in6 + in9;                                                           \
-  out7 = in7 + in8;                                                           \
+#define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10,  \
+                     in11, in12, in13, in14, in15, out0, out1, out2, out3,    \
+                     out4, out5, out6, out7, out8, out9, out10, out11, out12, \
+                     out13, out14, out15)                                     \
+  {                                                                           \
+    out0 = in0 + in15;                                                        \
+    out1 = in1 + in14;                                                        \
+    out2 = in2 + in13;                                                        \
+    out3 = in3 + in12;                                                        \
+    out4 = in4 + in11;                                                        \
+    out5 = in5 + in10;                                                        \
+    out6 = in6 + in9;                                                         \
+    out7 = in7 + in8;                                                         \
                                                                               \
-  out8 = in7 - in8;                                                           \
-  out9 = in6 - in9;                                                           \
-  out10 = in5 - in10;                                                         \
-  out11 = in4 - in11;                                                         \
-  out12 = in3 - in12;                                                         \
-  out13 = in2 - in13;                                                         \
-  out14 = in1 - in14;                                                         \
-  out15 = in0 - in15;                                                         \
-}
+    out8 = in7 - in8;                                                         \
+    out9 = in6 - in9;                                                         \
+    out10 = in5 - in10;                                                       \
+    out11 = in4 - in11;                                                       \
+    out12 = in3 - in12;                                                       \
+    out13 = in2 - in13;                                                       \
+    out14 = in1 - in14;                                                       \
+    out15 = in0 - in15;                                                       \
+  }
 
 /* Description : Transpose input 8x8 byte block
    Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
                  Outputs - out0, out1, out2, out3, out4, out5, out6, out7
                  Return Type - as per RTYPE
 */
-#define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,     \
-                        out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                    \
-  v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                    \
-                                                                           \
-  ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5,                       \
-             tmp0_m, tmp1_m, tmp2_m, tmp3_m);                              \
-  ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m);                             \
-  ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m);                             \
-  ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2);                             \
-  ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6);                             \
-  SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8);                             \
-  SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8);                             \
-}
+#define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0,   \
+                        out1, out2, out3, out4, out5, out6, out7)              \
+  {                                                                            \
+    v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                      \
+    v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                      \
+                                                                               \
+    ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, tmp0_m, tmp1_m, tmp2_m, \
+               tmp3_m);                                                        \
+    ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m);                               \
+    ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m);                               \
+    ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2);                               \
+    ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6);                               \
+    SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8);                               \
+    SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8);                               \
+  }
 #define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__)
 
 /* Description : Transpose 16x8 block into 8x16 with byte elements in vectors
@@ -1708,128 +1818,133 @@
                  Outputs - out0, out1, out2, out3, out4, out5, out6, out7
                  Return Type - unsigned byte
 */
-#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7,            \
-                            in8, in9, in10, in11, in12, in13, in14, in15,      \
-                            out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                        \
-  v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                        \
-                                                                               \
-  ILVEV_D2_UB(in0, in8, in1, in9, out7, out6);                                 \
-  ILVEV_D2_UB(in2, in10, in3, in11, out5, out4);                               \
-  ILVEV_D2_UB(in4, in12, in5, in13, out3, out2);                               \
-  ILVEV_D2_UB(in6, in14, in7, in15, out1, out0);                               \
-                                                                               \
-  tmp0_m = (v16u8)__msa_ilvev_b((v16i8)out6, (v16i8)out7);                     \
-  tmp4_m = (v16u8)__msa_ilvod_b((v16i8)out6, (v16i8)out7);                     \
-  tmp1_m = (v16u8)__msa_ilvev_b((v16i8)out4, (v16i8)out5);                     \
-  tmp5_m = (v16u8)__msa_ilvod_b((v16i8)out4, (v16i8)out5);                     \
-  out5 = (v16u8)__msa_ilvev_b((v16i8)out2, (v16i8)out3);                       \
-  tmp6_m = (v16u8)__msa_ilvod_b((v16i8)out2, (v16i8)out3);                     \
-  out7 = (v16u8)__msa_ilvev_b((v16i8)out0, (v16i8)out1);                       \
-  tmp7_m = (v16u8)__msa_ilvod_b((v16i8)out0, (v16i8)out1);                     \
-                                                                               \
-  ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m);                     \
-  out0 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m);                   \
-  out4 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m);                   \
-                                                                               \
-  tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m);                 \
-  tmp3_m = (v16u8)__msa_ilvod_h((v8i16)out7, (v8i16)out5);                     \
-  out2 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m);                   \
-  out6 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m);                   \
-                                                                               \
-  ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m);                 \
-  out1 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m);                   \
-  out5 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m);                   \
-                                                                               \
-  tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m);                 \
-  tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m);                 \
-  tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m);                 \
-  tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m);                 \
-  out3 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m);                   \
-  out7 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m);                   \
-}
+#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \
+                            in10, in11, in12, in13, in14, in15, out0, out1,   \
+                            out2, out3, out4, out5, out6, out7)               \
+  {                                                                           \
+    v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                     \
+    v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                     \
+                                                                              \
+    ILVEV_D2_UB(in0, in8, in1, in9, out7, out6);                              \
+    ILVEV_D2_UB(in2, in10, in3, in11, out5, out4);                            \
+    ILVEV_D2_UB(in4, in12, in5, in13, out3, out2);                            \
+    ILVEV_D2_UB(in6, in14, in7, in15, out1, out0);                            \
+                                                                              \
+    tmp0_m = (v16u8)__msa_ilvev_b((v16i8)out6, (v16i8)out7);                  \
+    tmp4_m = (v16u8)__msa_ilvod_b((v16i8)out6, (v16i8)out7);                  \
+    tmp1_m = (v16u8)__msa_ilvev_b((v16i8)out4, (v16i8)out5);                  \
+    tmp5_m = (v16u8)__msa_ilvod_b((v16i8)out4, (v16i8)out5);                  \
+    out5 = (v16u8)__msa_ilvev_b((v16i8)out2, (v16i8)out3);                    \
+    tmp6_m = (v16u8)__msa_ilvod_b((v16i8)out2, (v16i8)out3);                  \
+    out7 = (v16u8)__msa_ilvev_b((v16i8)out0, (v16i8)out1);                    \
+    tmp7_m = (v16u8)__msa_ilvod_b((v16i8)out0, (v16i8)out1);                  \
+                                                                              \
+    ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m);                  \
+    out0 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m);                \
+    out4 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m);                \
+                                                                              \
+    tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m);              \
+    tmp3_m = (v16u8)__msa_ilvod_h((v8i16)out7, (v8i16)out5);                  \
+    out2 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m);                \
+    out6 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m);                \
+                                                                              \
+    ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m);              \
+    out1 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m);                \
+    out5 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m);                \
+                                                                              \
+    tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m);              \
+    tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m);              \
+    tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m);              \
+    tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m);              \
+    out3 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m);                \
+    out7 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m);                \
+  }
 
 /* Description : Transpose 4x4 block with half word elements in vectors
    Arguments   : Inputs  - in0, in1, in2, in3
                  Outputs - out0, out1, out2, out3
                  Return Type - signed halfword
 */
-#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) {  \
-  v8i16 s0_m, s1_m;                                                       \
-                                                                          \
-  ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m);                             \
-  ILVRL_W2_SH(s1_m, s0_m, out0, out2);                                    \
-  out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0);                   \
-  out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2);                   \
-}
+#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
+  {                                                                    \
+    v8i16 s0_m, s1_m;                                                  \
+                                                                       \
+    ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m);                        \
+    ILVRL_W2_SH(s1_m, s0_m, out0, out2);                               \
+    out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0);              \
+    out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2);              \
+  }
 
 /* Description : Transpose 4x8 block with half word elements in vectors
    Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
                  Outputs - out0, out1, out2, out3, out4, out5, out6, out7
                  Return Type - signed halfword
 */
-#define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,            \
-                           out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                       \
-  v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n;                                       \
-  v8i16 zero_m = { 0 };                                                       \
-                                                                              \
-  ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6,                          \
-             tmp0_n, tmp1_n, tmp2_n, tmp3_n);                                 \
-  ILVRL_W2_SH(tmp1_n, tmp0_n, tmp0_m, tmp2_m);                                \
-  ILVRL_W2_SH(tmp3_n, tmp2_n, tmp1_m, tmp3_m);                                \
-                                                                              \
-  out0 = (v8i16)__msa_ilvr_d((v2i64)tmp1_m, (v2i64)tmp0_m);                   \
-  out1 = (v8i16)__msa_ilvl_d((v2i64)tmp1_m, (v2i64)tmp0_m);                   \
-  out2 = (v8i16)__msa_ilvr_d((v2i64)tmp3_m, (v2i64)tmp2_m);                   \
-  out3 = (v8i16)__msa_ilvl_d((v2i64)tmp3_m, (v2i64)tmp2_m);                   \
-                                                                              \
-  out4 = zero_m;                                                              \
-  out5 = zero_m;                                                              \
-  out6 = zero_m;                                                              \
-  out7 = zero_m;                                                              \
-}
+#define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                           out2, out3, out4, out5, out6, out7)                 \
+  {                                                                            \
+    v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                      \
+    v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n;                                      \
+    v8i16 zero_m = { 0 };                                                      \
+                                                                               \
+    ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6, tmp0_n, tmp1_n, tmp2_n, \
+               tmp3_n);                                                        \
+    ILVRL_W2_SH(tmp1_n, tmp0_n, tmp0_m, tmp2_m);                               \
+    ILVRL_W2_SH(tmp3_n, tmp2_n, tmp1_m, tmp3_m);                               \
+                                                                               \
+    out0 = (v8i16)__msa_ilvr_d((v2i64)tmp1_m, (v2i64)tmp0_m);                  \
+    out1 = (v8i16)__msa_ilvl_d((v2i64)tmp1_m, (v2i64)tmp0_m);                  \
+    out2 = (v8i16)__msa_ilvr_d((v2i64)tmp3_m, (v2i64)tmp2_m);                  \
+    out3 = (v8i16)__msa_ilvl_d((v2i64)tmp3_m, (v2i64)tmp2_m);                  \
+                                                                               \
+    out4 = zero_m;                                                             \
+    out5 = zero_m;                                                             \
+    out6 = zero_m;                                                             \
+    out7 = zero_m;                                                             \
+  }
 
 /* Description : Transpose 8x4 block with half word elements in vectors
    Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
                  Outputs - out0, out1, out2, out3, out4, out5, out6, out7
                  Return Type - signed halfword
 */
-#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) {  \
-  v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
-                                                                          \
-  ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m);                         \
-  ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m);                         \
-  ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2);                 \
-  ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3);                 \
-}
+#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
+  {                                                                    \
+    v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                              \
+                                                                       \
+    ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m);                    \
+    ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m);                    \
+    ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2);            \
+    ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3);            \
+  }
 
 /* Description : Transpose 8x8 block with half word elements in vectors
    Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
                  Outputs - out0, out1, out2, out3, out4, out5, out6, out7
                  Return Type - as per RTYPE
 */
-#define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,     \
-                       out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  v8i16 s0_m, s1_m;                                                       \
-  v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
-  v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                   \
-                                                                          \
-  ILVR_H2_SH(in6, in4, in7, in5, s0_m, s1_m);                             \
-  ILVRL_H2_SH(s1_m, s0_m, tmp0_m, tmp1_m);                                \
-  ILVL_H2_SH(in6, in4, in7, in5, s0_m, s1_m);                             \
-  ILVRL_H2_SH(s1_m, s0_m, tmp2_m, tmp3_m);                                \
-  ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m);                             \
-  ILVRL_H2_SH(s1_m, s0_m, tmp4_m, tmp5_m);                                \
-  ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m);                             \
-  ILVRL_H2_SH(s1_m, s0_m, tmp6_m, tmp7_m);                                \
-  PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m,         \
-           tmp3_m, tmp7_m, out0, out2, out4, out6);                       \
-  out1 = (RTYPE)__msa_pckod_d((v2i64)tmp0_m, (v2i64)tmp4_m);              \
-  out3 = (RTYPE)__msa_pckod_d((v2i64)tmp1_m, (v2i64)tmp5_m);              \
-  out5 = (RTYPE)__msa_pckod_d((v2i64)tmp2_m, (v2i64)tmp6_m);              \
-  out7 = (RTYPE)__msa_pckod_d((v2i64)tmp3_m, (v2i64)tmp7_m);              \
-}
+#define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, \
+                       out1, out2, out3, out4, out5, out6, out7)            \
+  {                                                                         \
+    v8i16 s0_m, s1_m;                                                       \
+    v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
+    v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                   \
+                                                                            \
+    ILVR_H2_SH(in6, in4, in7, in5, s0_m, s1_m);                             \
+    ILVRL_H2_SH(s1_m, s0_m, tmp0_m, tmp1_m);                                \
+    ILVL_H2_SH(in6, in4, in7, in5, s0_m, s1_m);                             \
+    ILVRL_H2_SH(s1_m, s0_m, tmp2_m, tmp3_m);                                \
+    ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m);                             \
+    ILVRL_H2_SH(s1_m, s0_m, tmp4_m, tmp5_m);                                \
+    ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m);                             \
+    ILVRL_H2_SH(s1_m, s0_m, tmp6_m, tmp7_m);                                \
+    PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, tmp3_m, \
+             tmp7_m, out0, out2, out4, out6);                               \
+    out1 = (RTYPE)__msa_pckod_d((v2i64)tmp0_m, (v2i64)tmp4_m);              \
+    out3 = (RTYPE)__msa_pckod_d((v2i64)tmp1_m, (v2i64)tmp5_m);              \
+    out5 = (RTYPE)__msa_pckod_d((v2i64)tmp2_m, (v2i64)tmp6_m);              \
+    out7 = (RTYPE)__msa_pckod_d((v2i64)tmp3_m, (v2i64)tmp7_m);              \
+  }
 #define TRANSPOSE8x8_SH_SH(...) TRANSPOSE8x8_H(v8i16, __VA_ARGS__)
 
 /* Description : Transpose 4x4 block with word elements in vectors
@@ -1837,40 +1952,42 @@
                  Outputs - out0, out1, out2, out3
                  Return Type - signed word
 */
-#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) {  \
-  v4i32 s0_m, s1_m, s2_m, s3_m;                                           \
-                                                                          \
-  ILVRL_W2_SW(in1, in0, s0_m, s1_m);                                      \
-  ILVRL_W2_SW(in3, in2, s2_m, s3_m);                                      \
-                                                                          \
-  out0 = (v4i32)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m);                   \
-  out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m);                   \
-  out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m);                   \
-  out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m);                   \
-}
+#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \
+  {                                                                    \
+    v4i32 s0_m, s1_m, s2_m, s3_m;                                      \
+                                                                       \
+    ILVRL_W2_SW(in1, in0, s0_m, s1_m);                                 \
+    ILVRL_W2_SW(in3, in2, s2_m, s3_m);                                 \
+                                                                       \
+    out0 = (v4i32)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m);              \
+    out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m);              \
+    out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m);              \
+    out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m);              \
+  }
 
 /* Description : Add block 4x4
    Arguments   : Inputs - in0, in1, in2, in3, pdst, stride
    Details     : Least significant 4 bytes from each input vector are added to
                  the destination bytes, clipped between 0-255 and stored.
 */
-#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) {     \
-  uint32_t src0_m, src1_m, src2_m, src3_m;                      \
-  v8i16 inp0_m, inp1_m, res0_m, res1_m;                         \
-  v16i8 dst0_m = { 0 };                                         \
-  v16i8 dst1_m = { 0 };                                         \
-  v16i8 zero_m = { 0 };                                         \
-                                                                \
-  ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m)                \
-  LW4(pdst, stride,  src0_m, src1_m, src2_m, src3_m);           \
-  INSERT_W2_SB(src0_m, src1_m, dst0_m);                         \
-  INSERT_W2_SB(src2_m, src3_m, dst1_m);                         \
-  ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m);   \
-  ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m);         \
-  CLIP_SH2_0_255(res0_m, res1_m);                               \
-  PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m);  \
-  ST4x4_UB(dst0_m, dst1_m, 0, 1, 0, 1, pdst, stride);           \
-}
+#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride)        \
+  {                                                              \
+    uint32_t src0_m, src1_m, src2_m, src3_m;                     \
+    v8i16 inp0_m, inp1_m, res0_m, res1_m;                        \
+    v16i8 dst0_m = { 0 };                                        \
+    v16i8 dst1_m = { 0 };                                        \
+    v16i8 zero_m = { 0 };                                        \
+                                                                 \
+    ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m)               \
+    LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m);           \
+    INSERT_W2_SB(src0_m, src1_m, dst0_m);                        \
+    INSERT_W2_SB(src2_m, src3_m, dst1_m);                        \
+    ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m);  \
+    ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m);        \
+    CLIP_SH2_0_255(res0_m, res1_m);                              \
+    PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
+    ST4x4_UB(dst0_m, dst1_m, 0, 1, 0, 1, pdst, stride);          \
+  }
 
 /* Description : Pack even elements of input vectors & xor with 128
    Arguments   : Inputs - in0, in1
@@ -1880,53 +1997,57 @@
                  together in one vector and the resulting vector is xor'ed with
                  128 to shift the range from signed to unsigned byte
 */
-#define PCKEV_XORI128_UB(in0, in1) ({                    \
-  v16u8 out_m;                                           \
-                                                         \
-  out_m = (v16u8)__msa_pckev_b((v16i8)in1, (v16i8)in0);  \
-  out_m = (v16u8)__msa_xori_b((v16u8)out_m, 128);        \
-  out_m;                                                 \
-})
+#define PCKEV_XORI128_UB(in0, in1)                        \
+  ({                                                      \
+    v16u8 out_m;                                          \
+                                                          \
+    out_m = (v16u8)__msa_pckev_b((v16i8)in1, (v16i8)in0); \
+    out_m = (v16u8)__msa_xori_b((v16u8)out_m, 128);       \
+    out_m;                                                \
+  })
 
 /* Description : Converts inputs to unsigned bytes, interleave, average & store
                  as 8x4 unsigned byte block
    Arguments   : Inputs - in0, in1, in2, in3, dst0, dst1, dst2, dst3,
                           pdst, stride
 */
-#define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3,                      \
-                                dst0, dst1, dst2, dst3, pdst, stride) {  \
-  v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                  \
-  uint8_t *pdst_m = (uint8_t *)(pdst);                                   \
-                                                                         \
-  tmp0_m = PCKEV_XORI128_UB(in0, in1);                                   \
-  tmp1_m = PCKEV_XORI128_UB(in2, in3);                                   \
-  ILVR_D2_UB(dst1, dst0, dst3, dst2, tmp2_m, tmp3_m);                    \
-  AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m);           \
-  ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride);                              \
-}
+#define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3, dst0, dst1, dst2, dst3, \
+                                pdst, stride)                               \
+  {                                                                         \
+    v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
+    uint8_t *pdst_m = (uint8_t *)(pdst);                                    \
+                                                                            \
+    tmp0_m = PCKEV_XORI128_UB(in0, in1);                                    \
+    tmp1_m = PCKEV_XORI128_UB(in2, in3);                                    \
+    ILVR_D2_UB(dst1, dst0, dst3, dst2, tmp2_m, tmp3_m);                     \
+    AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m);            \
+    ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride);                               \
+  }
 
 /* Description : Pack even byte elements and store byte vector in destination
                  memory
    Arguments   : Inputs - in0, in1, pdst
 */
-#define PCKEV_ST_SB(in0, in1, pdst) {             \
-  v16i8 tmp_m;                                    \
-                                                  \
-  tmp_m = __msa_pckev_b((v16i8)in1, (v16i8)in0);  \
-  ST_SB(tmp_m, (pdst));                           \
-}
+#define PCKEV_ST_SB(in0, in1, pdst)                \
+  {                                                \
+    v16i8 tmp_m;                                   \
+                                                   \
+    tmp_m = __msa_pckev_b((v16i8)in1, (v16i8)in0); \
+    ST_SB(tmp_m, (pdst));                          \
+  }
 
 /* Description : Horizontal 2 tap filter kernel code
    Arguments   : Inputs - in0, in1, mask, coeff, shift
 */
-#define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) ({    \
-  v16i8 tmp0_m;                                                \
-  v8u16 tmp1_m;                                                \
-                                                               \
-  tmp0_m = __msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0);  \
-  tmp1_m = __msa_dotp_u_h((v16u8)tmp0_m, (v16u8)coeff);        \
-  tmp1_m = (v8u16)__msa_srari_h((v8i16)tmp1_m, shift);         \
-                                                               \
-  tmp1_m;                                                      \
-})
-#endif  /* VPX_DSP_MIPS_MACROS_MSA_H_ */
+#define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift)        \
+  ({                                                            \
+    v16i8 tmp0_m;                                               \
+    v8u16 tmp1_m;                                               \
+                                                                \
+    tmp0_m = __msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0); \
+    tmp1_m = __msa_dotp_u_h((v16u8)tmp0_m, (v16u8)coeff);       \
+    tmp1_m = (v8u16)__msa_srari_h((v8i16)tmp1_m, shift);        \
+                                                                \
+    tmp1_m;                                                     \
+  })
+#endif /* VPX_DSP_MIPS_MACROS_MSA_H_ */
diff --git a/vpx_dsp/mips/sad_msa.c b/vpx_dsp/mips/sad_msa.c
index 3bdec28e6ef058bc7d6f173d6f38736d29afd4c2..bfd9a690c19d6ff0b7e59955fe04e88998954592 100644
--- a/vpx_dsp/mips/sad_msa.c
+++ b/vpx_dsp/mips/sad_msa.c
@@ -11,12 +11,13 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_dsp/mips/macros_msa.h"
 
-#define SAD_INSVE_W4(RTYPE, in0, in1, in2, in3, out) {    \
-  out = (RTYPE)__msa_insve_w((v4i32)out, 0, (v4i32)in0);  \
-  out = (RTYPE)__msa_insve_w((v4i32)out, 1, (v4i32)in1);  \
-  out = (RTYPE)__msa_insve_w((v4i32)out, 2, (v4i32)in2);  \
-  out = (RTYPE)__msa_insve_w((v4i32)out, 3, (v4i32)in3);  \
-}
+#define SAD_INSVE_W4(RTYPE, in0, in1, in2, in3, out)       \
+  {                                                        \
+    out = (RTYPE)__msa_insve_w((v4i32)out, 0, (v4i32)in0); \
+    out = (RTYPE)__msa_insve_w((v4i32)out, 1, (v4i32)in1); \
+    out = (RTYPE)__msa_insve_w((v4i32)out, 2, (v4i32)in2); \
+    out = (RTYPE)__msa_insve_w((v4i32)out, 3, (v4i32)in3); \
+  }
 #define SAD_INSVE_W4_UB(...) SAD_INSVE_W4(v16u8, __VA_ARGS__)
 
 static uint32_t sad_4width_msa(const uint8_t *src_ptr, int32_t src_stride,
@@ -58,8 +59,8 @@ static uint32_t sad_8width_msa(const uint8_t *src, int32_t src_stride,
     LD_UB4(ref, ref_stride, ref0, ref1, ref2, ref3);
     ref += (4 * ref_stride);
 
-    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2,
-                src0, src1, ref0, ref1);
+    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1,
+                ref0, ref1);
     sad += SAD_UB2_UH(src0, src1, ref0, ref1);
   }
 
@@ -214,8 +215,8 @@ static void sad_8width_x3_msa(const uint8_t *src, int32_t src_stride,
     src += (4 * src_stride);
     LD_UB4(ref, ref_stride, ref00, ref11, ref22, ref33);
     ref += (4 * ref_stride);
-    PCKEV_D4_UB(src1, src0, src3, src2, ref11, ref00, ref33, ref22,
-                src0, src1, ref0, ref1);
+    PCKEV_D4_UB(src1, src0, src3, src2, ref11, ref00, ref33, ref22, src0, src1,
+                ref0, ref1);
     sad0 += SAD_UB2_UH(src0, src1, ref0, ref1);
 
     SLDI_B2_UB(ref00, ref11, ref00, ref11, ref00, ref11, 1);
@@ -473,8 +474,8 @@ static void sad_8width_x8_msa(const uint8_t *src, int32_t src_stride,
     src += (4 * src_stride);
     LD_UB4(ref, ref_stride, ref00, ref11, ref22, ref33);
     ref += (4 * ref_stride);
-    PCKEV_D4_UB(src1, src0, src3, src2, ref11, ref00, ref33, ref22,
-                src0, src1, ref0, ref1);
+    PCKEV_D4_UB(src1, src0, src3, src2, ref11, ref00, ref33, ref22, src0, src1,
+                ref0, ref1);
     sad0 += SAD_UB2_UH(src0, src1, ref0, ref1);
 
     SLDI_B2_UB(ref00, ref11, ref00, ref11, ref00, ref11, 1);
@@ -793,9 +794,9 @@ static void sad_64width_x8_msa(const uint8_t *src, int32_t src_stride,
 }
 
 static void sad_4width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride,
-                               const uint8_t * const aref_ptr[],
-                               int32_t ref_stride,
-                               int32_t height, uint32_t *sad_array) {
+                               const uint8_t *const aref_ptr[],
+                               int32_t ref_stride, int32_t height,
+                               uint32_t *sad_array) {
   const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
   int32_t ht_cnt;
   uint32_t src0, src1, src2, src3;
@@ -854,9 +855,9 @@ static void sad_4width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride,
 }
 
 static void sad_8width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride,
-                               const uint8_t * const aref_ptr[],
-                               int32_t ref_stride,
-                               int32_t height, uint32_t *sad_array) {
+                               const uint8_t *const aref_ptr[],
+                               int32_t ref_stride, int32_t height,
+                               uint32_t *sad_array) {
   int32_t ht_cnt;
   const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
   v16u8 src0, src1, src2, src3;
@@ -905,9 +906,9 @@ static void sad_8width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride,
 }
 
 static void sad_16width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride,
-                                const uint8_t * const aref_ptr[],
-                                int32_t ref_stride,
-                                int32_t height, uint32_t *sad_array) {
+                                const uint8_t *const aref_ptr[],
+                                int32_t ref_stride, int32_t height,
+                                uint32_t *sad_array) {
   int32_t ht_cnt;
   const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
   v16u8 src, ref0, ref1, ref2, ref3, diff;
@@ -970,9 +971,9 @@ static void sad_16width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride,
 }
 
 static void sad_32width_x4d_msa(const uint8_t *src, int32_t src_stride,
-                                const uint8_t * const aref_ptr[],
-                                int32_t ref_stride,
-                                int32_t height, uint32_t *sad_array) {
+                                const uint8_t *const aref_ptr[],
+                                int32_t ref_stride, int32_t height,
+                                uint32_t *sad_array) {
   const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
   int32_t ht_cnt;
   v16u8 src0, src1, ref0, ref1;
@@ -1014,9 +1015,9 @@ static void sad_32width_x4d_msa(const uint8_t *src, int32_t src_stride,
 }
 
 static void sad_64width_x4d_msa(const uint8_t *src, int32_t src_stride,
-                                const uint8_t * const aref_ptr[],
-                                int32_t ref_stride,
-                                int32_t height, uint32_t *sad_array) {
+                                const uint8_t *const aref_ptr[],
+                                int32_t ref_stride, int32_t height,
+                                uint32_t *sad_array) {
   const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
   int32_t ht_cnt;
   v16u8 src0, src1, src2, src3;
@@ -1114,8 +1115,8 @@ static uint32_t avgsad_8width_msa(const uint8_t *src, int32_t src_stride,
     ref += (4 * ref_stride);
     LD_UB2(sec_pred, 16, pred0, pred1);
     sec_pred += 32;
-    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2,
-                src0, src1, ref0, ref1);
+    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1,
+                ref0, ref1);
     AVER_UB2_UB(pred0, ref0, pred1, ref1, diff0, diff1);
     sad += SAD_UB2_UH(src0, src1, diff0, diff1);
   }
@@ -1213,8 +1214,8 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride,
     ref += ref_stride;
     LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
     sec_pred += 64;
-    AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3,
-                comp0, comp1, comp2, comp3);
+    AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, comp0,
+                comp1, comp2, comp3);
     sad0 += SAD_UB2_UH(src0, src1, comp0, comp1);
     sad1 += SAD_UB2_UH(src2, src3, comp2, comp3);
 
@@ -1224,8 +1225,8 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride,
     ref += ref_stride;
     LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
     sec_pred += 64;
-    AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3,
-                comp0, comp1, comp2, comp3);
+    AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, comp0,
+                comp1, comp2, comp3);
     sad0 += SAD_UB2_UH(src0, src1, comp0, comp1);
     sad1 += SAD_UB2_UH(src2, src3, comp2, comp3);
 
@@ -1235,8 +1236,8 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride,
     ref += ref_stride;
     LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
     sec_pred += 64;
-    AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3,
-                comp0, comp1, comp2, comp3);
+    AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, comp0,
+                comp1, comp2, comp3);
     sad0 += SAD_UB2_UH(src0, src1, comp0, comp1);
     sad1 += SAD_UB2_UH(src2, src3, comp2, comp3);
 
@@ -1246,8 +1247,8 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride,
     ref += ref_stride;
     LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
     sec_pred += 64;
-    AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3,
-                comp0, comp1, comp2, comp3);
+    AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, comp0,
+                comp1, comp2, comp3);
     sad0 += SAD_UB2_UH(src0, src1, comp0, comp1);
     sad1 += SAD_UB2_UH(src2, src3, comp2, comp3);
   }
@@ -1258,268 +1259,270 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride,
   return HADD_SW_S32(sad);
 }
 
-#define VPX_SAD_4xHEIGHT_MSA(height)                                        \
-uint32_t vpx_sad4x##height##_msa(const uint8_t *src, int32_t src_stride,    \
-                                 const uint8_t *ref, int32_t ref_stride) {  \
-  return sad_4width_msa(src, src_stride,  ref, ref_stride, height);         \
-}
+#define VPX_SAD_4xHEIGHT_MSA(height)                                         \
+  uint32_t vpx_sad4x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+                                   const uint8_t *ref, int32_t ref_stride) { \
+    return sad_4width_msa(src, src_stride, ref, ref_stride, height);         \
+  }
 
-#define VPX_SAD_8xHEIGHT_MSA(height)                                        \
-uint32_t vpx_sad8x##height##_msa(const uint8_t *src, int32_t src_stride,    \
-                                 const uint8_t *ref, int32_t ref_stride) {  \
-  return sad_8width_msa(src, src_stride, ref, ref_stride, height);          \
-}
+#define VPX_SAD_8xHEIGHT_MSA(height)                                         \
+  uint32_t vpx_sad8x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+                                   const uint8_t *ref, int32_t ref_stride) { \
+    return sad_8width_msa(src, src_stride, ref, ref_stride, height);         \
+  }
 
-#define VPX_SAD_16xHEIGHT_MSA(height)                                        \
-uint32_t vpx_sad16x##height##_msa(const uint8_t *src, int32_t src_stride,    \
-                                  const uint8_t *ref, int32_t ref_stride) {  \
-  return sad_16width_msa(src, src_stride, ref, ref_stride, height);          \
-}
+#define VPX_SAD_16xHEIGHT_MSA(height)                                         \
+  uint32_t vpx_sad16x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+                                    const uint8_t *ref, int32_t ref_stride) { \
+    return sad_16width_msa(src, src_stride, ref, ref_stride, height);         \
+  }
 
-#define VPX_SAD_32xHEIGHT_MSA(height)                                        \
-uint32_t vpx_sad32x##height##_msa(const uint8_t *src, int32_t src_stride,    \
-                                  const uint8_t *ref, int32_t ref_stride) {  \
-  return sad_32width_msa(src, src_stride, ref, ref_stride, height);          \
-}
+#define VPX_SAD_32xHEIGHT_MSA(height)                                         \
+  uint32_t vpx_sad32x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+                                    const uint8_t *ref, int32_t ref_stride) { \
+    return sad_32width_msa(src, src_stride, ref, ref_stride, height);         \
+  }
 
-#define VPX_SAD_64xHEIGHT_MSA(height)                                        \
-uint32_t vpx_sad64x##height##_msa(const uint8_t *src, int32_t src_stride,    \
-                                  const uint8_t *ref, int32_t ref_stride) {  \
-  return sad_64width_msa(src, src_stride, ref, ref_stride, height);          \
-}
+#define VPX_SAD_64xHEIGHT_MSA(height)                                         \
+  uint32_t vpx_sad64x##height##_msa(const uint8_t *src, int32_t src_stride,   \
+                                    const uint8_t *ref, int32_t ref_stride) { \
+    return sad_64width_msa(src, src_stride, ref, ref_stride, height);         \
+  }
 
-#define VPX_SAD_4xHEIGHTx3_MSA(height)                                  \
-void vpx_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride,  \
-                               const uint8_t *ref, int32_t ref_stride,  \
-                               uint32_t *sads) {                        \
-  sad_4width_x3_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_4xHEIGHTx3_MSA(height)                                   \
+  void vpx_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+                                 const uint8_t *ref, int32_t ref_stride, \
+                                 uint32_t *sads) {                       \
+    sad_4width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_8xHEIGHTx3_MSA(height)                                  \
-void vpx_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride,  \
-                               const uint8_t *ref, int32_t ref_stride,  \
-                               uint32_t *sads) {                        \
-  sad_8width_x3_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_8xHEIGHTx3_MSA(height)                                   \
+  void vpx_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+                                 const uint8_t *ref, int32_t ref_stride, \
+                                 uint32_t *sads) {                       \
+    sad_8width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_16xHEIGHTx3_MSA(height)                                  \
-void vpx_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride,  \
-                                const uint8_t *ref, int32_t ref_stride,  \
-                                uint32_t *sads) {                        \
-  sad_16width_x3_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_16xHEIGHTx3_MSA(height)                                   \
+  void vpx_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+                                  const uint8_t *ref, int32_t ref_stride, \
+                                  uint32_t *sads) {                       \
+    sad_16width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_32xHEIGHTx3_MSA(height)                                  \
-void vpx_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride,  \
-                                const uint8_t *ref, int32_t ref_stride,  \
-                                uint32_t *sads) {                        \
-  sad_32width_x3_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_32xHEIGHTx3_MSA(height)                                   \
+  void vpx_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+                                  const uint8_t *ref, int32_t ref_stride, \
+                                  uint32_t *sads) {                       \
+    sad_32width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_64xHEIGHTx3_MSA(height)                                  \
-void vpx_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride,  \
-                                const uint8_t *ref, int32_t ref_stride,  \
-                                uint32_t *sads) {                        \
-  sad_64width_x3_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_64xHEIGHTx3_MSA(height)                                   \
+  void vpx_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride, \
+                                  const uint8_t *ref, int32_t ref_stride, \
+                                  uint32_t *sads) {                       \
+    sad_64width_x3_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_4xHEIGHTx8_MSA(height)                                  \
-void vpx_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride,  \
-                               const uint8_t *ref, int32_t ref_stride,  \
-                               uint32_t *sads) {                        \
-  sad_4width_x8_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_4xHEIGHTx8_MSA(height)                                   \
+  void vpx_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+                                 const uint8_t *ref, int32_t ref_stride, \
+                                 uint32_t *sads) {                       \
+    sad_4width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_8xHEIGHTx8_MSA(height)                                  \
-void vpx_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride,  \
-                               const uint8_t *ref, int32_t ref_stride,  \
-                               uint32_t *sads) {                        \
-  sad_8width_x8_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_8xHEIGHTx8_MSA(height)                                   \
+  void vpx_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+                                 const uint8_t *ref, int32_t ref_stride, \
+                                 uint32_t *sads) {                       \
+    sad_8width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_16xHEIGHTx8_MSA(height)                                  \
-void vpx_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride,  \
-                                const uint8_t *ref, int32_t ref_stride,  \
-                                uint32_t *sads) {                        \
-  sad_16width_x8_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_16xHEIGHTx8_MSA(height)                                   \
+  void vpx_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+                                  const uint8_t *ref, int32_t ref_stride, \
+                                  uint32_t *sads) {                       \
+    sad_16width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_32xHEIGHTx8_MSA(height)                                  \
-void vpx_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride,  \
-                                const uint8_t *ref, int32_t ref_stride,  \
-                                uint32_t *sads) {                        \
-  sad_32width_x8_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_32xHEIGHTx8_MSA(height)                                   \
+  void vpx_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+                                  const uint8_t *ref, int32_t ref_stride, \
+                                  uint32_t *sads) {                       \
+    sad_32width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_64xHEIGHTx8_MSA(height)                                  \
-void vpx_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride,  \
-                                const uint8_t *ref, int32_t ref_stride,  \
-                                uint32_t *sads) {                        \
-  sad_64width_x8_msa(src, src_stride, ref, ref_stride, height, sads);    \
-}
+#define VPX_SAD_64xHEIGHTx8_MSA(height)                                   \
+  void vpx_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride, \
+                                  const uint8_t *ref, int32_t ref_stride, \
+                                  uint32_t *sads) {                       \
+    sad_64width_x8_msa(src, src_stride, ref, ref_stride, height, sads);   \
+  }
 
-#define VPX_SAD_4xHEIGHTx4D_MSA(height)                                  \
-void vpx_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride,  \
-                                const uint8_t *const refs[],             \
-                                int32_t ref_stride, uint32_t *sads) {    \
-  sad_4width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);   \
-}
+#define VPX_SAD_4xHEIGHTx4D_MSA(height)                                   \
+  void vpx_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+                                  const uint8_t *const refs[],            \
+                                  int32_t ref_stride, uint32_t *sads) {   \
+    sad_4width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
+  }
 
-#define VPX_SAD_8xHEIGHTx4D_MSA(height)                                  \
-void vpx_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride,  \
-                                const uint8_t *const refs[],             \
-                                int32_t ref_stride, uint32_t *sads) {    \
-  sad_8width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);   \
-}
+#define VPX_SAD_8xHEIGHTx4D_MSA(height)                                   \
+  void vpx_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+                                  const uint8_t *const refs[],            \
+                                  int32_t ref_stride, uint32_t *sads) {   \
+    sad_8width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
+  }
 
-#define VPX_SAD_16xHEIGHTx4D_MSA(height)                                  \
-void vpx_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride,  \
-                                 const uint8_t *const refs[],             \
-                                 int32_t ref_stride, uint32_t *sads) {    \
-  sad_16width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);   \
-}
+#define VPX_SAD_16xHEIGHTx4D_MSA(height)                                   \
+  void vpx_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+                                   const uint8_t *const refs[],            \
+                                   int32_t ref_stride, uint32_t *sads) {   \
+    sad_16width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
+  }
 
-#define VPX_SAD_32xHEIGHTx4D_MSA(height)                                  \
-void vpx_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride,  \
-                                 const uint8_t *const refs[],             \
-                                 int32_t ref_stride, uint32_t *sads) {    \
-  sad_32width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);   \
-}
+#define VPX_SAD_32xHEIGHTx4D_MSA(height)                                   \
+  void vpx_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+                                   const uint8_t *const refs[],            \
+                                   int32_t ref_stride, uint32_t *sads) {   \
+    sad_32width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
+  }
 
-#define VPX_SAD_64xHEIGHTx4D_MSA(height)                                  \
-void vpx_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride,  \
-                                 const uint8_t *const refs[],             \
-                                 int32_t ref_stride, uint32_t *sads) {    \
-  sad_64width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);   \
-}
+#define VPX_SAD_64xHEIGHTx4D_MSA(height)                                   \
+  void vpx_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
+                                   const uint8_t *const refs[],            \
+                                   int32_t ref_stride, uint32_t *sads) {   \
+    sad_64width_x4d_msa(src, src_stride, refs, ref_stride, height, sads);  \
+  }
 
-#define VPX_AVGSAD_4xHEIGHT_MSA(height)                                       \
-uint32_t vpx_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride,  \
-                                     const uint8_t *ref, int32_t ref_stride,  \
-                                     const uint8_t *second_pred) {            \
-  return avgsad_4width_msa(src, src_stride, ref, ref_stride,                  \
-                           height, second_pred);                              \
-}
+#define VPX_AVGSAD_4xHEIGHT_MSA(height)                                        \
+  uint32_t vpx_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
+                                       const uint8_t *ref, int32_t ref_stride, \
+                                       const uint8_t *second_pred) {           \
+    return avgsad_4width_msa(src, src_stride, ref, ref_stride, height,         \
+                             second_pred);                                     \
+  }
 
-#define VPX_AVGSAD_8xHEIGHT_MSA(height)                                       \
-uint32_t vpx_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride,  \
-                                     const uint8_t *ref, int32_t ref_stride,  \
-                                     const uint8_t *second_pred) {            \
-  return avgsad_8width_msa(src, src_stride, ref, ref_stride,                  \
-                           height, second_pred);                              \
-}
+#define VPX_AVGSAD_8xHEIGHT_MSA(height)                                        \
+  uint32_t vpx_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
+                                       const uint8_t *ref, int32_t ref_stride, \
+                                       const uint8_t *second_pred) {           \
+    return avgsad_8width_msa(src, src_stride, ref, ref_stride, height,         \
+                             second_pred);                                     \
+  }
 
-#define VPX_AVGSAD_16xHEIGHT_MSA(height)                                       \
-uint32_t vpx_sad16x##height##_avg_msa(const uint8_t *src, int32_t src_stride,  \
-                                      const uint8_t *ref, int32_t ref_stride,  \
-                                      const uint8_t *second_pred) {            \
-  return avgsad_16width_msa(src, src_stride, ref, ref_stride,                  \
-                            height, second_pred);                              \
-}
+#define VPX_AVGSAD_16xHEIGHT_MSA(height)                                \
+  uint32_t vpx_sad16x##height##_avg_msa(                                \
+      const uint8_t *src, int32_t src_stride, const uint8_t *ref,       \
+      int32_t ref_stride, const uint8_t *second_pred) {                 \
+    return avgsad_16width_msa(src, src_stride, ref, ref_stride, height, \
+                              second_pred);                             \
+  }
 
-#define VPX_AVGSAD_32xHEIGHT_MSA(height)                                       \
-uint32_t vpx_sad32x##height##_avg_msa(const uint8_t *src, int32_t src_stride,  \
-                                      const uint8_t *ref, int32_t ref_stride,  \
-                                      const uint8_t *second_pred) {            \
-  return avgsad_32width_msa(src, src_stride, ref, ref_stride,                  \
-                            height, second_pred);                              \
-}
+#define VPX_AVGSAD_32xHEIGHT_MSA(height)                                \
+  uint32_t vpx_sad32x##height##_avg_msa(                                \
+      const uint8_t *src, int32_t src_stride, const uint8_t *ref,       \
+      int32_t ref_stride, const uint8_t *second_pred) {                 \
+    return avgsad_32width_msa(src, src_stride, ref, ref_stride, height, \
+                              second_pred);                             \
+  }
 
-#define VPX_AVGSAD_64xHEIGHT_MSA(height)                                       \
-uint32_t vpx_sad64x##height##_avg_msa(const uint8_t *src, int32_t src_stride,  \
-                                      const uint8_t *ref, int32_t ref_stride,  \
-                                      const uint8_t *second_pred) {            \
-  return avgsad_64width_msa(src, src_stride, ref, ref_stride,                  \
-                            height, second_pred);                              \
-}
+#define VPX_AVGSAD_64xHEIGHT_MSA(height)                                \
+  uint32_t vpx_sad64x##height##_avg_msa(                                \
+      const uint8_t *src, int32_t src_stride, const uint8_t *ref,       \
+      int32_t ref_stride, const uint8_t *second_pred) {                 \
+    return avgsad_64width_msa(src, src_stride, ref, ref_stride, height, \
+                              second_pred);                             \
+  }
 
+/* clang-format off */
 // 64x64
-VPX_SAD_64xHEIGHT_MSA(64);
-VPX_SAD_64xHEIGHTx3_MSA(64);
-VPX_SAD_64xHEIGHTx8_MSA(64);
-VPX_SAD_64xHEIGHTx4D_MSA(64);
-VPX_AVGSAD_64xHEIGHT_MSA(64);
+VPX_SAD_64xHEIGHT_MSA(64)
+VPX_SAD_64xHEIGHTx3_MSA(64)
+VPX_SAD_64xHEIGHTx8_MSA(64)
+VPX_SAD_64xHEIGHTx4D_MSA(64)
+VPX_AVGSAD_64xHEIGHT_MSA(64)
 
 // 64x32
-VPX_SAD_64xHEIGHT_MSA(32);
-VPX_SAD_64xHEIGHTx3_MSA(32);
-VPX_SAD_64xHEIGHTx8_MSA(32);
-VPX_SAD_64xHEIGHTx4D_MSA(32);
-VPX_AVGSAD_64xHEIGHT_MSA(32);
+VPX_SAD_64xHEIGHT_MSA(32)
+VPX_SAD_64xHEIGHTx3_MSA(32)
+VPX_SAD_64xHEIGHTx8_MSA(32)
+VPX_SAD_64xHEIGHTx4D_MSA(32)
+VPX_AVGSAD_64xHEIGHT_MSA(32)
 
 // 32x64
-VPX_SAD_32xHEIGHT_MSA(64);
-VPX_SAD_32xHEIGHTx3_MSA(64);
-VPX_SAD_32xHEIGHTx8_MSA(64);
-VPX_SAD_32xHEIGHTx4D_MSA(64);
-VPX_AVGSAD_32xHEIGHT_MSA(64);
+VPX_SAD_32xHEIGHT_MSA(64)
+VPX_SAD_32xHEIGHTx3_MSA(64)
+VPX_SAD_32xHEIGHTx8_MSA(64)
+VPX_SAD_32xHEIGHTx4D_MSA(64)
+VPX_AVGSAD_32xHEIGHT_MSA(64)
 
 // 32x32
-VPX_SAD_32xHEIGHT_MSA(32);
-VPX_SAD_32xHEIGHTx3_MSA(32);
-VPX_SAD_32xHEIGHTx8_MSA(32);
-VPX_SAD_32xHEIGHTx4D_MSA(32);
-VPX_AVGSAD_32xHEIGHT_MSA(32);
+VPX_SAD_32xHEIGHT_MSA(32)
+VPX_SAD_32xHEIGHTx3_MSA(32)
+VPX_SAD_32xHEIGHTx8_MSA(32)
+VPX_SAD_32xHEIGHTx4D_MSA(32)
+VPX_AVGSAD_32xHEIGHT_MSA(32)
 
 // 32x16
-VPX_SAD_32xHEIGHT_MSA(16);
-VPX_SAD_32xHEIGHTx3_MSA(16);
-VPX_SAD_32xHEIGHTx8_MSA(16);
-VPX_SAD_32xHEIGHTx4D_MSA(16);
-VPX_AVGSAD_32xHEIGHT_MSA(16);
+VPX_SAD_32xHEIGHT_MSA(16)
+VPX_SAD_32xHEIGHTx3_MSA(16)
+VPX_SAD_32xHEIGHTx8_MSA(16)
+VPX_SAD_32xHEIGHTx4D_MSA(16)
+VPX_AVGSAD_32xHEIGHT_MSA(16)
 
 // 16x32
-VPX_SAD_16xHEIGHT_MSA(32);
-VPX_SAD_16xHEIGHTx3_MSA(32);
-VPX_SAD_16xHEIGHTx8_MSA(32);
-VPX_SAD_16xHEIGHTx4D_MSA(32);
-VPX_AVGSAD_16xHEIGHT_MSA(32);
+VPX_SAD_16xHEIGHT_MSA(32)
+VPX_SAD_16xHEIGHTx3_MSA(32)
+VPX_SAD_16xHEIGHTx8_MSA(32)
+VPX_SAD_16xHEIGHTx4D_MSA(32)
+VPX_AVGSAD_16xHEIGHT_MSA(32)
 
 // 16x16
-VPX_SAD_16xHEIGHT_MSA(16);
-VPX_SAD_16xHEIGHTx3_MSA(16);
-VPX_SAD_16xHEIGHTx8_MSA(16);
-VPX_SAD_16xHEIGHTx4D_MSA(16);
-VPX_AVGSAD_16xHEIGHT_MSA(16);
+VPX_SAD_16xHEIGHT_MSA(16)
+VPX_SAD_16xHEIGHTx3_MSA(16)
+VPX_SAD_16xHEIGHTx8_MSA(16)
+VPX_SAD_16xHEIGHTx4D_MSA(16)
+VPX_AVGSAD_16xHEIGHT_MSA(16)
 
 // 16x8
-VPX_SAD_16xHEIGHT_MSA(8);
-VPX_SAD_16xHEIGHTx3_MSA(8);
-VPX_SAD_16xHEIGHTx8_MSA(8);
-VPX_SAD_16xHEIGHTx4D_MSA(8);
-VPX_AVGSAD_16xHEIGHT_MSA(8);
+VPX_SAD_16xHEIGHT_MSA(8)
+VPX_SAD_16xHEIGHTx3_MSA(8)
+VPX_SAD_16xHEIGHTx8_MSA(8)
+VPX_SAD_16xHEIGHTx4D_MSA(8)
+VPX_AVGSAD_16xHEIGHT_MSA(8)
 
 // 8x16
-VPX_SAD_8xHEIGHT_MSA(16);
-VPX_SAD_8xHEIGHTx3_MSA(16);
-VPX_SAD_8xHEIGHTx8_MSA(16);
-VPX_SAD_8xHEIGHTx4D_MSA(16);
-VPX_AVGSAD_8xHEIGHT_MSA(16);
+VPX_SAD_8xHEIGHT_MSA(16)
+VPX_SAD_8xHEIGHTx3_MSA(16)
+VPX_SAD_8xHEIGHTx8_MSA(16)
+VPX_SAD_8xHEIGHTx4D_MSA(16)
+VPX_AVGSAD_8xHEIGHT_MSA(16)
 
 // 8x8
-VPX_SAD_8xHEIGHT_MSA(8);
-VPX_SAD_8xHEIGHTx3_MSA(8);
-VPX_SAD_8xHEIGHTx8_MSA(8);
-VPX_SAD_8xHEIGHTx4D_MSA(8);
-VPX_AVGSAD_8xHEIGHT_MSA(8);
+VPX_SAD_8xHEIGHT_MSA(8)
+VPX_SAD_8xHEIGHTx3_MSA(8)
+VPX_SAD_8xHEIGHTx8_MSA(8)
+VPX_SAD_8xHEIGHTx4D_MSA(8)
+VPX_AVGSAD_8xHEIGHT_MSA(8)
 
 // 8x4
-VPX_SAD_8xHEIGHT_MSA(4);
-VPX_SAD_8xHEIGHTx3_MSA(4);
-VPX_SAD_8xHEIGHTx8_MSA(4);
-VPX_SAD_8xHEIGHTx4D_MSA(4);
-VPX_AVGSAD_8xHEIGHT_MSA(4);
+VPX_SAD_8xHEIGHT_MSA(4)
+VPX_SAD_8xHEIGHTx3_MSA(4)
+VPX_SAD_8xHEIGHTx8_MSA(4)
+VPX_SAD_8xHEIGHTx4D_MSA(4)
+VPX_AVGSAD_8xHEIGHT_MSA(4)
 
 // 4x8
-VPX_SAD_4xHEIGHT_MSA(8);
-VPX_SAD_4xHEIGHTx3_MSA(8);
-VPX_SAD_4xHEIGHTx8_MSA(8);
-VPX_SAD_4xHEIGHTx4D_MSA(8);
-VPX_AVGSAD_4xHEIGHT_MSA(8);
+VPX_SAD_4xHEIGHT_MSA(8)
+VPX_SAD_4xHEIGHTx3_MSA(8)
+VPX_SAD_4xHEIGHTx8_MSA(8)
+VPX_SAD_4xHEIGHTx4D_MSA(8)
+VPX_AVGSAD_4xHEIGHT_MSA(8)
 
 // 4x4
-VPX_SAD_4xHEIGHT_MSA(4);
-VPX_SAD_4xHEIGHTx3_MSA(4);
-VPX_SAD_4xHEIGHTx8_MSA(4);
-VPX_SAD_4xHEIGHTx4D_MSA(4);
-VPX_AVGSAD_4xHEIGHT_MSA(4);
+VPX_SAD_4xHEIGHT_MSA(4)
+VPX_SAD_4xHEIGHTx3_MSA(4)
+VPX_SAD_4xHEIGHTx8_MSA(4)
+VPX_SAD_4xHEIGHTx4D_MSA(4)
+VPX_AVGSAD_4xHEIGHT_MSA(4)
+    /* clang-format on */
diff --git a/vpx_dsp/mips/sub_pixel_variance_msa.c b/vpx_dsp/mips/sub_pixel_variance_msa.c
index a592a2d078e80b361a15c759ecda263f5c811b3b..b893d7610f8c0b89ae651eedc591a44386cc738a 100644
--- a/vpx_dsp/mips/sub_pixel_variance_msa.c
+++ b/vpx_dsp/mips/sub_pixel_variance_msa.c
@@ -14,29 +14,23 @@
 #include "vpx_dsp/variance.h"
 
 static const uint8_t bilinear_filters_msa[8][2] = {
-  { 128,   0, },
-  { 112,  16, },
-  {  96,  32, },
-  {  80,  48, },
-  {  64,  64, },
-  {  48,  80, },
-  {  32,  96, },
-  {  16, 112, },
+  { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 },
+  { 64, 64 }, { 48, 80 },  { 32, 96 }, { 16, 112 },
 };
 
-#define CALC_MSE_AVG_B(src, ref, var, sub) {                       \
-  v16u8 src_l0_m, src_l1_m;                                        \
-  v8i16 res_l0_m, res_l1_m;                                        \
-                                                                   \
-  ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m);                       \
-  HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m);             \
-  DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var);  \
-                                                                   \
-  sub += res_l0_m + res_l1_m;                                      \
-}
+#define CALC_MSE_AVG_B(src, ref, var, sub)                          \
+  {                                                                 \
+    v16u8 src_l0_m, src_l1_m;                                       \
+    v8i16 res_l0_m, res_l1_m;                                       \
+                                                                    \
+    ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m);                      \
+    HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m);            \
+    DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
+                                                                    \
+    sub += res_l0_m + res_l1_m;                                     \
+  }
 
-#define VARIANCE_WxH(sse, diff, shift) \
-  sse - (((uint32_t)diff * diff) >> shift)
+#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift)
 
 #define VARIANCE_LARGE_WxH(sse, diff, shift) \
   sse - (((int64_t)diff * diff) >> shift)
@@ -45,8 +39,7 @@ static uint32_t avg_sse_diff_4width_msa(const uint8_t *src_ptr,
                                         int32_t src_stride,
                                         const uint8_t *ref_ptr,
                                         int32_t ref_stride,
-                                        const uint8_t *sec_pred,
-                                        int32_t height,
+                                        const uint8_t *sec_pred, int32_t height,
                                         int32_t *diff) {
   int32_t ht_cnt;
   uint32_t src0, src1, src2, src3;
@@ -81,8 +74,7 @@ static uint32_t avg_sse_diff_8width_msa(const uint8_t *src_ptr,
                                         int32_t src_stride,
                                         const uint8_t *ref_ptr,
                                         int32_t ref_stride,
-                                        const uint8_t *sec_pred,
-                                        int32_t height,
+                                        const uint8_t *sec_pred, int32_t height,
                                         int32_t *diff) {
   int32_t ht_cnt;
   v16u8 src0, src1, src2, src3;
@@ -99,8 +91,8 @@ static uint32_t avg_sse_diff_8width_msa(const uint8_t *src_ptr,
     LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
     ref_ptr += (4 * ref_stride);
 
-    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2,
-                src0, src1, ref0, ref1);
+    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1,
+                ref0, ref1);
     AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
     CALC_MSE_AVG_B(src0, ref0, var, avg);
     CALC_MSE_AVG_B(src1, ref1, var, avg);
@@ -117,8 +109,7 @@ static uint32_t avg_sse_diff_16width_msa(const uint8_t *src_ptr,
                                          const uint8_t *ref_ptr,
                                          int32_t ref_stride,
                                          const uint8_t *sec_pred,
-                                         int32_t height,
-                                         int32_t *diff) {
+                                         int32_t height, int32_t *diff) {
   int32_t ht_cnt;
   v16u8 src, ref, pred;
   v8i16 avg = { 0 };
@@ -173,8 +164,7 @@ static uint32_t avg_sse_diff_32width_msa(const uint8_t *src_ptr,
                                          const uint8_t *ref_ptr,
                                          int32_t ref_stride,
                                          const uint8_t *sec_pred,
-                                         int32_t height,
-                                         int32_t *diff) {
+                                         int32_t height, int32_t *diff) {
   int32_t ht_cnt;
   v16u8 src0, src1, ref0, ref1, pred0, pred1;
   v8i16 avg = { 0 };
@@ -232,8 +222,7 @@ static uint32_t avg_sse_diff_32x64_msa(const uint8_t *src_ptr,
                                        int32_t src_stride,
                                        const uint8_t *ref_ptr,
                                        int32_t ref_stride,
-                                       const uint8_t *sec_pred,
-                                       int32_t *diff) {
+                                       const uint8_t *sec_pred, int32_t *diff) {
   int32_t ht_cnt;
   v16u8 src0, src1, ref0, ref1, pred0, pred1;
   v8i16 avg0 = { 0 };
@@ -293,8 +282,7 @@ static uint32_t avg_sse_diff_64x32_msa(const uint8_t *src_ptr,
                                        int32_t src_stride,
                                        const uint8_t *ref_ptr,
                                        int32_t ref_stride,
-                                       const uint8_t *sec_pred,
-                                       int32_t *diff) {
+                                       const uint8_t *sec_pred, int32_t *diff) {
   int32_t ht_cnt;
   v16u8 src0, src1, src2, src3;
   v16u8 ref0, ref1, ref2, ref3;
@@ -310,8 +298,8 @@ static uint32_t avg_sse_diff_64x32_msa(const uint8_t *src_ptr,
     src_ptr += src_stride;
     LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
     ref_ptr += ref_stride;
-    AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3,
-                src0, src1, src2, src3);
+    AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1,
+                src2, src3);
     CALC_MSE_AVG_B(src0, ref0, var, avg0);
     CALC_MSE_AVG_B(src2, ref2, var, avg0);
     CALC_MSE_AVG_B(src1, ref1, var, avg1);
@@ -323,8 +311,8 @@ static uint32_t avg_sse_diff_64x32_msa(const uint8_t *src_ptr,
     src_ptr += src_stride;
     LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
     ref_ptr += ref_stride;
-    AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3,
-                src0, src1, src2, src3);
+    AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1,
+                src2, src3);
     CALC_MSE_AVG_B(src0, ref0, var, avg0);
     CALC_MSE_AVG_B(src2, ref2, var, avg0);
     CALC_MSE_AVG_B(src1, ref1, var, avg1);
@@ -343,8 +331,7 @@ static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr,
                                        int32_t src_stride,
                                        const uint8_t *ref_ptr,
                                        int32_t ref_stride,
-                                       const uint8_t *sec_pred,
-                                       int32_t *diff) {
+                                       const uint8_t *sec_pred, int32_t *diff) {
   int32_t ht_cnt;
   v16u8 src0, src1, src2, src3;
   v16u8 ref0, ref1, ref2, ref3;
@@ -362,8 +349,8 @@ static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr,
     src_ptr += src_stride;
     LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
     ref_ptr += ref_stride;
-    AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3,
-                src0, src1, src2, src3);
+    AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1,
+                src2, src3);
     CALC_MSE_AVG_B(src0, ref0, var, avg0);
     CALC_MSE_AVG_B(src1, ref1, var, avg1);
     CALC_MSE_AVG_B(src2, ref2, var, avg2);
@@ -375,8 +362,8 @@ static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr,
     src_ptr += src_stride;
     LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
     ref_ptr += ref_stride;
-    AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3,
-                src0, src1, src2, src3);
+    AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1,
+                src2, src3);
     CALC_MSE_AVG_B(src0, ref0, var, avg0);
     CALC_MSE_AVG_B(src1, ref1, var, avg1);
     CALC_MSE_AVG_B(src2, ref2, var, avg2);
@@ -392,13 +379,9 @@ static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_4width_h_msa(const uint8_t *src,
-                                                int32_t src_stride,
-                                                const uint8_t *dst,
-                                                int32_t dst_stride,
-                                                const uint8_t *filter,
-                                                int32_t height,
-                                                int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_4width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   uint32_t ref0, ref1, ref2, ref3;
@@ -420,11 +403,11 @@ static uint32_t sub_pixel_sse_diff_4width_h_msa(const uint8_t *src,
     INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
     VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
     VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
-                vec0, vec1, vec2, vec3);
+    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+                vec2, vec3);
     SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-    PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3,
-                src0, src1, src2, src3);
+    PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1,
+                src2, src3);
     ILVEV_W2_SB(src0, src1, src2, src3, src0, src2);
     src0 = (v16i8)__msa_ilvev_d((v2i64)src2, (v2i64)src0);
     CALC_MSE_AVG_B(src0, ref, var, avg);
@@ -436,13 +419,9 @@ static uint32_t sub_pixel_sse_diff_4width_h_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_8width_h_msa(const uint8_t *src,
-                                                int32_t src_stride,
-                                                const uint8_t *dst,
-                                                int32_t dst_stride,
-                                                const uint8_t *filter,
-                                                int32_t height,
-                                                int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_8width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 filt0, out, ref0, ref1, ref2, ref3;
@@ -464,11 +443,11 @@ static uint32_t sub_pixel_sse_diff_8width_h_msa(const uint8_t *src,
     PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
     VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
     VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
-                vec0, vec1, vec2, vec3);
+    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+                vec2, vec3);
     SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-    PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3,
-                src0, src1, src2, src3);
+    PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1,
+                src2, src3);
     out = (v16u8)__msa_ilvev_d((v2i64)src1, (v2i64)src0);
     CALC_MSE_AVG_B(out, ref0, var, avg);
     out = (v16u8)__msa_ilvev_d((v2i64)src3, (v2i64)src2);
@@ -481,13 +460,9 @@ static uint32_t sub_pixel_sse_diff_8width_h_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_16width_h_msa(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 const uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const uint8_t *filter,
-                                                 int32_t height,
-                                                 int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_16width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
@@ -512,14 +487,14 @@ static uint32_t sub_pixel_sse_diff_16width_h_msa(const uint8_t *src,
     VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
     VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5);
     VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
-                out0, out1, out2, out3);
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0,
-                out4, out5, out6, out7);
+    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
+                out2, out3);
+    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
+                out6, out7);
     SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
     SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
-    PCKEV_B4_SB(out1, out0, out3, out2, out5, out4, out7, out6,
-                src0, src1, src2, src3);
+    PCKEV_B4_SB(out1, out0, out3, out2, out5, out4, out7, out6, src0, src1,
+                src2, src3);
     CALC_MSE_AVG_B(src0, dst0, var, avg);
     CALC_MSE_AVG_B(src1, dst1, var, avg);
     CALC_MSE_AVG_B(src2, dst2, var, avg);
@@ -532,13 +507,9 @@ static uint32_t sub_pixel_sse_diff_16width_h_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_32width_h_msa(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 const uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const uint8_t *filter,
-                                                 int32_t height,
-                                                 int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_32width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[2];
 
@@ -554,13 +525,9 @@ static uint32_t sub_pixel_sse_diff_32width_h_msa(const uint8_t *src,
   return sse;
 }
 
-static uint32_t sub_pixel_sse_diff_64width_h_msa(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 const uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const uint8_t *filter,
-                                                 int32_t height,
-                                                 int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_64width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[4];
 
@@ -576,13 +543,9 @@ static uint32_t sub_pixel_sse_diff_64width_h_msa(const uint8_t *src,
   return sse;
 }
 
-static uint32_t sub_pixel_sse_diff_4width_v_msa(const uint8_t *src,
-                                                int32_t src_stride,
-                                                const uint8_t *dst,
-                                                int32_t dst_stride,
-                                                const uint8_t *filter,
-                                                int32_t height,
-                                                int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_4width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   uint32_t ref0, ref1, ref2, ref3;
@@ -608,8 +571,8 @@ static uint32_t sub_pixel_sse_diff_4width_v_msa(const uint8_t *src,
     dst += (4 * dst_stride);
 
     INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
-    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
-               src10_r, src21_r, src32_r, src43_r);
+    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
+               src32_r, src43_r);
     ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
     DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
     SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
@@ -624,13 +587,9 @@ static uint32_t sub_pixel_sse_diff_4width_v_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_8width_v_msa(const uint8_t *src,
-                                                int32_t src_stride,
-                                                const uint8_t *dst,
-                                                int32_t dst_stride,
-                                                const uint8_t *filter,
-                                                int32_t height,
-                                                int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_8width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 src0, src1, src2, src3, src4;
@@ -654,10 +613,10 @@ static uint32_t sub_pixel_sse_diff_8width_v_msa(const uint8_t *src,
     dst += (4 * dst_stride);
 
     PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
-    ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3,
-               vec0, vec1, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
-                tmp0, tmp1, tmp2, tmp3);
+    ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
+               vec3);
+    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
+                tmp2, tmp3);
     SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
     PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
     CALC_MSE_AVG_B(src0, ref0, var, avg);
@@ -671,13 +630,9 @@ static uint32_t sub_pixel_sse_diff_8width_v_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_16width_v_msa(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 const uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const uint8_t *filter,
-                                                 int32_t height,
-                                                 int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_16width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 ref0, ref1, ref2, ref3;
@@ -734,13 +689,9 @@ static uint32_t sub_pixel_sse_diff_16width_v_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_32width_v_msa(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 const uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const uint8_t *filter,
-                                                 int32_t height,
-                                                 int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_32width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[2];
 
@@ -756,13 +707,9 @@ static uint32_t sub_pixel_sse_diff_32width_v_msa(const uint8_t *src,
   return sse;
 }
 
-static uint32_t sub_pixel_sse_diff_64width_v_msa(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 const uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const uint8_t *filter,
-                                                 int32_t height,
-                                                 int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_64width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[4];
 
@@ -778,14 +725,10 @@ static uint32_t sub_pixel_sse_diff_64width_v_msa(const uint8_t *src,
   return sse;
 }
 
-static uint32_t sub_pixel_sse_diff_4width_hv_msa(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 const uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const uint8_t *filter_horiz,
-                                                 const uint8_t *filter_vert,
-                                                 int32_t height,
-                                                 int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_4width_hv_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
+    int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   uint32_t ref0, ref1, ref2, ref3;
@@ -831,14 +774,10 @@ static uint32_t sub_pixel_sse_diff_4width_hv_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_8width_hv_msa(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 const uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const uint8_t *filter_horiz,
-                                                 const uint8_t *filter_vert,
-                                                 int32_t height,
-                                                 int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_8width_hv_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
+    int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 ref0, ref1, ref2, ref3;
@@ -892,14 +831,10 @@ static uint32_t sub_pixel_sse_diff_8width_hv_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_16width_hv_msa(const uint8_t *src,
-                                                  int32_t src_stride,
-                                                  const uint8_t *dst,
-                                                  int32_t dst_stride,
-                                                  const uint8_t *filter_horiz,
-                                                  const uint8_t *filter_vert,
-                                                  int32_t height,
-                                                  int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_16width_hv_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
+    int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
@@ -969,14 +904,10 @@ static uint32_t sub_pixel_sse_diff_16width_hv_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_sse_diff_32width_hv_msa(const uint8_t *src,
-                                                  int32_t src_stride,
-                                                  const uint8_t *dst,
-                                                  int32_t dst_stride,
-                                                  const uint8_t *filter_horiz,
-                                                  const uint8_t *filter_vert,
-                                                  int32_t height,
-                                                  int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_32width_hv_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
+    int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[2];
 
@@ -993,14 +924,10 @@ static uint32_t sub_pixel_sse_diff_32width_hv_msa(const uint8_t *src,
   return sse;
 }
 
-static uint32_t sub_pixel_sse_diff_64width_hv_msa(const uint8_t *src,
-                                                  int32_t src_stride,
-                                                  const uint8_t *dst,
-                                                  int32_t dst_stride,
-                                                  const uint8_t *filter_horiz,
-                                                  const uint8_t *filter_vert,
-                                                  int32_t height,
-                                                  int32_t *diff) {
+static uint32_t sub_pixel_sse_diff_64width_hv_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
+    int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[4];
 
@@ -1017,14 +944,10 @@ static uint32_t sub_pixel_sse_diff_64width_hv_msa(const uint8_t *src,
   return sse;
 }
 
-static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(const uint8_t *src,
-                                                    int32_t src_stride,
-                                                    const uint8_t *dst,
-                                                    int32_t dst_stride,
-                                                    const uint8_t *sec_pred,
-                                                    const uint8_t *filter,
-                                                    int32_t height,
-                                                    int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   uint32_t ref0, ref1, ref2, ref3;
@@ -1049,11 +972,11 @@ static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(const uint8_t *src,
     INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
     VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
     VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
-                vec0, vec1, vec2, vec3);
+    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+                vec2, vec3);
     SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-    PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3,
-                src0, src1, src2, src3);
+    PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1,
+                src2, src3);
     ILVEV_W2_SB(src0, src1, src2, src3, src0, src2);
     out = (v16u8)__msa_ilvev_d((v2i64)src2, (v2i64)src0);
     out = __msa_aver_u_b(out, pred);
@@ -1066,14 +989,10 @@ static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_avg_sse_diff_8width_h_msa(const uint8_t *src,
-                                                    int32_t src_stride,
-                                                    const uint8_t *dst,
-                                                    int32_t dst_stride,
-                                                    const uint8_t *sec_pred,
-                                                    const uint8_t *filter,
-                                                    int32_t height,
-                                                    int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_8width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 out, pred, filt0;
@@ -1096,11 +1015,11 @@ static uint32_t sub_pixel_avg_sse_diff_8width_h_msa(const uint8_t *src,
     PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
     VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
     VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
-                vec0, vec1, vec2, vec3);
+    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+                vec2, vec3);
     SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
-    PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3,
-                src0, src1, src2, src3);
+    PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1,
+                src2, src3);
     out = (v16u8)__msa_ilvev_d((v2i64)src1, (v2i64)src0);
 
     pred = LD_UB(sec_pred);
@@ -1120,15 +1039,10 @@ static uint32_t sub_pixel_avg_sse_diff_8width_h_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t subpel_avg_ssediff_16w_h_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             const uint8_t *dst,
-                                             int32_t dst_stride,
-                                             const uint8_t *sec_pred,
-                                             const uint8_t *filter,
-                                             int32_t height,
-                                             int32_t *diff,
-                                             int32_t width) {
+static uint32_t subpel_avg_ssediff_16w_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff, int32_t width) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
@@ -1157,16 +1071,16 @@ static uint32_t subpel_avg_ssediff_16w_h_msa(const uint8_t *src,
     VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
     VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5);
     VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
-                out0, out1, out2, out3);
-    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0,
-                out4, out5, out6, out7);
+    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
+                out2, out3);
+    DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
+                out6, out7);
     SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
     SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
-    PCKEV_B4_UB(out1, out0, out3, out2, out5, out4, out7, out6,
-                tmp0, tmp1, tmp2, tmp3);
-    AVER_UB4_UB(tmp0, pred0, tmp1, pred1, tmp2, pred2, tmp3, pred3,
-                tmp0, tmp1, tmp2, tmp3);
+    PCKEV_B4_UB(out1, out0, out3, out2, out5, out4, out7, out6, tmp0, tmp1,
+                tmp2, tmp3);
+    AVER_UB4_UB(tmp0, pred0, tmp1, pred1, tmp2, pred2, tmp3, pred3, tmp0, tmp1,
+                tmp2, tmp3);
 
     CALC_MSE_AVG_B(tmp0, dst0, var, avg);
     CALC_MSE_AVG_B(tmp1, dst1, var, avg);
@@ -1180,33 +1094,25 @@ static uint32_t subpel_avg_ssediff_16w_h_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_avg_sse_diff_16width_h_msa(const uint8_t *src,
-                                                     int32_t src_stride,
-                                                     const uint8_t *dst,
-                                                     int32_t dst_stride,
-                                                     const uint8_t *sec_pred,
-                                                     const uint8_t *filter,
-                                                     int32_t height,
-                                                     int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_16width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   return subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride,
                                       sec_pred, filter, height, diff, 16);
 }
 
-static uint32_t sub_pixel_avg_sse_diff_32width_h_msa(const uint8_t *src,
-                                                     int32_t src_stride,
-                                                     const uint8_t *dst,
-                                                     int32_t dst_stride,
-                                                     const uint8_t *sec_pred,
-                                                     const uint8_t *filter,
-                                                     int32_t height,
-                                                     int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_32width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[2];
 
   for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
-    sse += subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride,
-                                        sec_pred, filter, height,
-                                        &diff0[loop_cnt], 32);
+    sse +=
+        subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, sec_pred,
+                                     filter, height, &diff0[loop_cnt], 32);
     src += 16;
     dst += 16;
     sec_pred += 16;
@@ -1217,21 +1123,17 @@ static uint32_t sub_pixel_avg_sse_diff_32width_h_msa(const uint8_t *src,
   return sse;
 }
 
-static uint32_t sub_pixel_avg_sse_diff_64width_h_msa(const uint8_t *src,
-                                                     int32_t src_stride,
-                                                     const uint8_t *dst,
-                                                     int32_t dst_stride,
-                                                     const uint8_t *sec_pred,
-                                                     const uint8_t *filter,
-                                                     int32_t height,
-                                                     int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_64width_h_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[4];
 
   for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
-    sse += subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride,
-                                        sec_pred, filter, height,
-                                        &diff0[loop_cnt], 64);
+    sse +=
+        subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, sec_pred,
+                                     filter, height, &diff0[loop_cnt], 64);
     src += 16;
     dst += 16;
     sec_pred += 16;
@@ -1242,14 +1144,10 @@ static uint32_t sub_pixel_avg_sse_diff_64width_h_msa(const uint8_t *src,
   return sse;
 }
 
-static uint32_t sub_pixel_avg_sse_diff_4width_v_msa(const uint8_t *src,
-                                                    int32_t src_stride,
-                                                    const uint8_t *dst,
-                                                    int32_t dst_stride,
-                                                    const uint8_t *sec_pred,
-                                                    const uint8_t *filter,
-                                                    int32_t height,
-                                                    int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_4width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   uint32_t ref0, ref1, ref2, ref3;
@@ -1276,8 +1174,8 @@ static uint32_t sub_pixel_avg_sse_diff_4width_v_msa(const uint8_t *src,
     dst += (4 * dst_stride);
 
     INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
-    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
-               src10_r, src21_r, src32_r, src43_r);
+    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
+               src32_r, src43_r);
     ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
     DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
     SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
@@ -1294,14 +1192,10 @@ static uint32_t sub_pixel_avg_sse_diff_4width_v_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_avg_sse_diff_8width_v_msa(const uint8_t *src,
-                                                    int32_t src_stride,
-                                                    const uint8_t *dst,
-                                                    int32_t dst_stride,
-                                                    const uint8_t *sec_pred,
-                                                    const uint8_t *filter,
-                                                    int32_t height,
-                                                    int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_8width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 src0, src1, src2, src3, src4;
@@ -1326,10 +1220,10 @@ static uint32_t sub_pixel_avg_sse_diff_8width_v_msa(const uint8_t *src,
     LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
     dst += (4 * dst_stride);
     PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
-    ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3,
-               vec0, vec1, vec2, vec3);
-    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
-                tmp0, tmp1, tmp2, tmp3);
+    ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
+               vec3);
+    DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
+                tmp2, tmp3);
     SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
     PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
     AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
@@ -1345,15 +1239,10 @@ static uint32_t sub_pixel_avg_sse_diff_8width_v_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t subpel_avg_ssediff_16w_v_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             const uint8_t *dst,
-                                             int32_t dst_stride,
-                                             const uint8_t *sec_pred,
-                                             const uint8_t *filter,
-                                             int32_t height,
-                                             int32_t *diff,
-                                             int32_t width) {
+static uint32_t subpel_avg_ssediff_16w_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff, int32_t width) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 ref0, ref1, ref2, ref3;
@@ -1401,8 +1290,8 @@ static uint32_t subpel_avg_ssediff_16w_v_msa(const uint8_t *src,
     LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
     dst += (4 * dst_stride);
 
-    AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3,
-                out0, out1, out2, out3);
+    AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, out0, out1,
+                out2, out3);
 
     CALC_MSE_AVG_B(out0, ref0, var, avg);
     CALC_MSE_AVG_B(out1, ref1, var, avg);
@@ -1416,33 +1305,25 @@ static uint32_t subpel_avg_ssediff_16w_v_msa(const uint8_t *src,
   return HADD_SW_S32(var);
 }
 
-static uint32_t sub_pixel_avg_sse_diff_16width_v_msa(const uint8_t *src,
-                                                     int32_t src_stride,
-                                                     const uint8_t *dst,
-                                                     int32_t dst_stride,
-                                                     const uint8_t *sec_pred,
-                                                     const uint8_t *filter,
-                                                     int32_t height,
-                                                     int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_16width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   return subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride,
                                       sec_pred, filter, height, diff, 16);
 }
 
-static uint32_t sub_pixel_avg_sse_diff_32width_v_msa(const uint8_t *src,
-                                                     int32_t src_stride,
-                                                     const uint8_t *dst,
-                                                     int32_t dst_stride,
-                                                     const uint8_t *sec_pred,
-                                                     const uint8_t *filter,
-                                                     int32_t height,
-                                                     int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_32width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[2];
 
   for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
-    sse += subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride,
-                                        sec_pred, filter, height,
-                                        &diff0[loop_cnt], 32);
+    sse +=
+        subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, sec_pred,
+                                     filter, height, &diff0[loop_cnt], 32);
     src += 16;
     dst += 16;
     sec_pred += 16;
@@ -1453,21 +1334,17 @@ static uint32_t sub_pixel_avg_sse_diff_32width_v_msa(const uint8_t *src,
   return sse;
 }
 
-static uint32_t sub_pixel_avg_sse_diff_64width_v_msa(const uint8_t *src,
-                                                     int32_t src_stride,
-                                                     const uint8_t *dst,
-                                                     int32_t dst_stride,
-                                                     const uint8_t *sec_pred,
-                                                     const uint8_t *filter,
-                                                     int32_t height,
-                                                     int32_t *diff) {
+static uint32_t sub_pixel_avg_sse_diff_64width_v_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
+    int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[4];
 
   for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
-    sse += subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride,
-                                        sec_pred, filter, height,
-                                        &diff0[loop_cnt], 64);
+    sse +=
+        subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, sec_pred,
+                                     filter, height, &diff0[loop_cnt], 64);
     src += 16;
     dst += 16;
     sec_pred += 16;
@@ -1479,11 +1356,9 @@ static uint32_t sub_pixel_avg_sse_diff_64width_v_msa(const uint8_t *src,
 }
 
 static uint32_t sub_pixel_avg_sse_diff_4width_hv_msa(
-  const uint8_t *src, int32_t src_stride,
-  const uint8_t *dst, int32_t dst_stride,
-  const uint8_t *sec_pred,
-  const uint8_t *filter_horiz, const uint8_t *filter_vert,
-  int32_t height, int32_t *diff) {
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
+    const uint8_t *filter_vert, int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   uint32_t ref0, ref1, ref2, ref3;
@@ -1532,11 +1407,9 @@ static uint32_t sub_pixel_avg_sse_diff_4width_hv_msa(
 }
 
 static uint32_t sub_pixel_avg_sse_diff_8width_hv_msa(
-  const uint8_t *src, int32_t src_stride,
-  const uint8_t *dst, int32_t dst_stride,
-  const uint8_t *sec_pred,
-  const uint8_t *filter_horiz, const uint8_t *filter_vert,
-  int32_t height, int32_t *diff) {
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
+    const uint8_t *filter_vert, int32_t height, int32_t *diff) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 ref0, ref1, ref2, ref3;
@@ -1598,16 +1471,10 @@ static uint32_t sub_pixel_avg_sse_diff_8width_hv_msa(
   return HADD_SW_S32(var);
 }
 
-static uint32_t subpel_avg_ssediff_16w_hv_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              const uint8_t *dst,
-                                              int32_t dst_stride,
-                                              const uint8_t *sec_pred,
-                                              const uint8_t *filter_horiz,
-                                              const uint8_t *filter_vert,
-                                              int32_t height,
-                                              int32_t *diff,
-                                              int32_t width) {
+static uint32_t subpel_avg_ssediff_16w_hv_msa(
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
+    const uint8_t *filter_vert, int32_t height, int32_t *diff, int32_t width) {
   int16_t filtval;
   uint32_t loop_cnt;
   v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
@@ -1669,8 +1536,8 @@ static uint32_t subpel_avg_ssediff_16w_hv_msa(const uint8_t *src,
     LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
     dst += (4 * dst_stride);
 
-    AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3,
-                out0, out1, out2, out3);
+    AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, out0, out1,
+                out2, out3);
 
     CALC_MSE_AVG_B(out0, ref0, var, avg);
     CALC_MSE_AVG_B(out1, ref1, var, avg);
@@ -1685,22 +1552,18 @@ static uint32_t subpel_avg_ssediff_16w_hv_msa(const uint8_t *src,
 }
 
 static uint32_t sub_pixel_avg_sse_diff_16width_hv_msa(
-  const uint8_t *src, int32_t src_stride,
-  const uint8_t *dst, int32_t dst_stride,
-  const uint8_t *sec_pred,
-  const uint8_t *filter_horiz, const uint8_t *filter_vert,
-  int32_t height, int32_t *diff) {
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
+    const uint8_t *filter_vert, int32_t height, int32_t *diff) {
   return subpel_avg_ssediff_16w_hv_msa(src, src_stride, dst, dst_stride,
                                        sec_pred, filter_horiz, filter_vert,
                                        height, diff, 16);
 }
 
 static uint32_t sub_pixel_avg_sse_diff_32width_hv_msa(
-  const uint8_t *src, int32_t src_stride,
-  const uint8_t *dst, int32_t dst_stride,
-  const uint8_t *sec_pred,
-  const uint8_t *filter_horiz, const uint8_t *filter_vert,
-  int32_t height, int32_t *diff) {
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
+    const uint8_t *filter_vert, int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[2];
 
@@ -1719,11 +1582,9 @@ static uint32_t sub_pixel_avg_sse_diff_32width_hv_msa(
 }
 
 static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa(
-  const uint8_t *src, int32_t src_stride,
-  const uint8_t *dst, int32_t dst_stride,
-  const uint8_t *sec_pred,
-  const uint8_t *filter_horiz, const uint8_t *filter_vert,
-  int32_t height, int32_t *diff) {
+    const uint8_t *src, int32_t src_stride, const uint8_t *dst,
+    int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
+    const uint8_t *filter_vert, int32_t height, int32_t *diff) {
   uint32_t loop_cnt, sse = 0;
   int32_t diff0[4];
 
@@ -1756,125 +1617,115 @@ static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa(
 #define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
 #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12);
 
-#define VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht)                         \
-uint32_t vpx_sub_pixel_variance##wd##x##ht##_msa(const uint8_t *src,     \
-                                                 int32_t src_stride,     \
-                                                 int32_t xoffset,        \
-                                                 int32_t yoffset,        \
-                                                 const uint8_t *ref,     \
-                                                 int32_t ref_stride,     \
-                                                 uint32_t *sse) {        \
-  int32_t diff;                                                          \
-  uint32_t var;                                                          \
-  const uint8_t *h_filter = bilinear_filters_msa[xoffset];               \
-  const uint8_t *v_filter = bilinear_filters_msa[yoffset];               \
-                                                                         \
-  if (yoffset) {                                                         \
-    if (xoffset) {                                                       \
-      *sse = sub_pixel_sse_diff_##wd##width_hv_msa(src, src_stride,      \
-                                                   ref, ref_stride,      \
-                                                   h_filter, v_filter,   \
-                                                   ht, &diff);           \
-    } else {                                                             \
-      *sse = sub_pixel_sse_diff_##wd##width_v_msa(src, src_stride,       \
-                                                  ref, ref_stride,       \
-                                                  v_filter, ht, &diff);  \
-    }                                                                    \
-                                                                         \
-    var = VARIANCE_##wd##Wx##ht##H(*sse, diff);                          \
-  } else {                                                               \
-    if (xoffset) {                                                       \
-      *sse = sub_pixel_sse_diff_##wd##width_h_msa(src, src_stride,       \
-                                                  ref, ref_stride,       \
-                                                  h_filter, ht, &diff);  \
-                                                                         \
-      var = VARIANCE_##wd##Wx##ht##H(*sse, diff);                        \
-    } else {                                                             \
-      var = vpx_variance##wd##x##ht##_msa(src, src_stride,               \
-                                          ref, ref_stride, sse);         \
-    }                                                                    \
-  }                                                                      \
-                                                                         \
-  return var;                                                            \
-}
+#define VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht)                              \
+  uint32_t vpx_sub_pixel_variance##wd##x##ht##_msa(                           \
+      const uint8_t *src, int32_t src_stride, int32_t xoffset,                \
+      int32_t yoffset, const uint8_t *ref, int32_t ref_stride,                \
+      uint32_t *sse) {                                                        \
+    int32_t diff;                                                             \
+    uint32_t var;                                                             \
+    const uint8_t *h_filter = bilinear_filters_msa[xoffset];                  \
+    const uint8_t *v_filter = bilinear_filters_msa[yoffset];                  \
+                                                                              \
+    if (yoffset) {                                                            \
+      if (xoffset) {                                                          \
+        *sse = sub_pixel_sse_diff_##wd##width_hv_msa(                         \
+            src, src_stride, ref, ref_stride, h_filter, v_filter, ht, &diff); \
+      } else {                                                                \
+        *sse = sub_pixel_sse_diff_##wd##width_v_msa(                          \
+            src, src_stride, ref, ref_stride, v_filter, ht, &diff);           \
+      }                                                                       \
+                                                                              \
+      var = VARIANCE_##wd##Wx##ht##H(*sse, diff);                             \
+    } else {                                                                  \
+      if (xoffset) {                                                          \
+        *sse = sub_pixel_sse_diff_##wd##width_h_msa(                          \
+            src, src_stride, ref, ref_stride, h_filter, ht, &diff);           \
+                                                                              \
+        var = VARIANCE_##wd##Wx##ht##H(*sse, diff);                           \
+      } else {                                                                \
+        var = vpx_variance##wd##x##ht##_msa(src, src_stride, ref, ref_stride, \
+                                            sse);                             \
+      }                                                                       \
+    }                                                                         \
+                                                                              \
+    return var;                                                               \
+  }
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8);
+/* clang-format off */
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4)
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8)
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 4);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 8);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 16);
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 4)
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 8)
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 16)
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 8);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 16);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 32);
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 8)
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 16)
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 32)
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 16);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 32);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 64);
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 16)
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 32)
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 64)
 
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 32);
-VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64);
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 32)
+VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64)
+/* clang-format on */
 
 #define VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht)                          \
-uint32_t vpx_sub_pixel_avg_variance##wd##x##ht##_msa(                         \
-  const uint8_t *src_ptr, int32_t src_stride,                                 \
-  int32_t xoffset, int32_t yoffset,                                           \
-  const uint8_t *ref_ptr, int32_t ref_stride,                                 \
-  uint32_t *sse, const uint8_t *sec_pred) {                                   \
-  int32_t diff;                                                               \
-  const uint8_t *h_filter = bilinear_filters_msa[xoffset];                    \
-  const uint8_t *v_filter = bilinear_filters_msa[yoffset];                    \
+  uint32_t vpx_sub_pixel_avg_variance##wd##x##ht##_msa(                       \
+      const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset,            \
+      int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride,            \
+      uint32_t *sse, const uint8_t *sec_pred) {                               \
+    int32_t diff;                                                             \
+    const uint8_t *h_filter = bilinear_filters_msa[xoffset];                  \
+    const uint8_t *v_filter = bilinear_filters_msa[yoffset];                  \
                                                                               \
-  if (yoffset) {                                                              \
-    if (xoffset) {                                                            \
-      *sse = sub_pixel_avg_sse_diff_##wd##width_hv_msa(src_ptr, src_stride,   \
-                                                       ref_ptr, ref_stride,   \
-                                                       sec_pred, h_filter,    \
-                                                       v_filter, ht, &diff);  \
-    } else {                                                                  \
-      *sse = sub_pixel_avg_sse_diff_##wd##width_v_msa(src_ptr, src_stride,    \
-                                                      ref_ptr, ref_stride,    \
-                                                      sec_pred, v_filter,     \
-                                                      ht, &diff);             \
-    }                                                                         \
-  } else {                                                                    \
-    if (xoffset) {                                                            \
-      *sse = sub_pixel_avg_sse_diff_##wd##width_h_msa(src_ptr, src_stride,    \
-                                                      ref_ptr, ref_stride,    \
-                                                      sec_pred, h_filter,     \
-                                                      ht, &diff);             \
+    if (yoffset) {                                                            \
+      if (xoffset) {                                                          \
+        *sse = sub_pixel_avg_sse_diff_##wd##width_hv_msa(                     \
+            src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter,     \
+            v_filter, ht, &diff);                                             \
+      } else {                                                                \
+        *sse = sub_pixel_avg_sse_diff_##wd##width_v_msa(                      \
+            src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, v_filter, ht, \
+            &diff);                                                           \
+      }                                                                       \
     } else {                                                                  \
-      *sse = avg_sse_diff_##wd##width_msa(src_ptr, src_stride,                \
-                                          ref_ptr, ref_stride,                \
-                                          sec_pred, ht, &diff);               \
+      if (xoffset) {                                                          \
+        *sse = sub_pixel_avg_sse_diff_##wd##width_h_msa(                      \
+            src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, ht, \
+            &diff);                                                           \
+      } else {                                                                \
+        *sse = avg_sse_diff_##wd##width_msa(src_ptr, src_stride, ref_ptr,     \
+                                            ref_stride, sec_pred, ht, &diff); \
+      }                                                                       \
     }                                                                         \
-  }                                                                           \
                                                                               \
-  return VARIANCE_##wd##Wx##ht##H(*sse, diff);                                \
-}
+    return VARIANCE_##wd##Wx##ht##H(*sse, diff);                              \
+  }
 
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 4);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 8);
+/* clang-format off */
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 4)
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 8)
 
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 4);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 8);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 16);
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 4)
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 8)
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 16)
 
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 8);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 16);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 32);
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 8)
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 16)
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 32)
 
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 16);
-VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32);
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 16)
+VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32)
+/* clang-format on */
 
 uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
                                              int32_t src_stride,
-                                             int32_t xoffset,
-                                             int32_t yoffset,
+                                             int32_t xoffset, int32_t yoffset,
                                              const uint8_t *ref_ptr,
-                                             int32_t ref_stride,
-                                             uint32_t *sse,
+                                             int32_t ref_stride, uint32_t *sse,
                                              const uint8_t *sec_pred) {
   int32_t diff;
   const uint8_t *h_filter = bilinear_filters_msa[xoffset];
@@ -1882,22 +1733,19 @@ uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
 
   if (yoffset) {
     if (xoffset) {
-      *sse = sub_pixel_avg_sse_diff_32width_hv_msa(src_ptr, src_stride,
-                                                   ref_ptr, ref_stride,
-                                                   sec_pred, h_filter,
-                                                   v_filter, 64, &diff);
+      *sse = sub_pixel_avg_sse_diff_32width_hv_msa(
+          src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter,
+          v_filter, 64, &diff);
     } else {
-      *sse = sub_pixel_avg_sse_diff_32width_v_msa(src_ptr, src_stride,
-                                                  ref_ptr, ref_stride,
-                                                  sec_pred, v_filter,
-                                                  64, &diff);
+      *sse = sub_pixel_avg_sse_diff_32width_v_msa(src_ptr, src_stride, ref_ptr,
+                                                  ref_stride, sec_pred,
+                                                  v_filter, 64, &diff);
     }
   } else {
     if (xoffset) {
-      *sse = sub_pixel_avg_sse_diff_32width_h_msa(src_ptr, src_stride,
-                                                  ref_ptr, ref_stride,
-                                                  sec_pred, h_filter,
-                                                  64, &diff);
+      *sse = sub_pixel_avg_sse_diff_32width_h_msa(src_ptr, src_stride, ref_ptr,
+                                                  ref_stride, sec_pred,
+                                                  h_filter, 64, &diff);
     } else {
       *sse = avg_sse_diff_32x64_msa(src_ptr, src_stride, ref_ptr, ref_stride,
                                     sec_pred, &diff);
@@ -1907,46 +1755,40 @@ uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
   return VARIANCE_32Wx64H(*sse, diff);
 }
 
-#define VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht)                          \
-uint32_t vpx_sub_pixel_avg_variance64x##ht##_msa(const uint8_t *src_ptr,     \
-                                                 int32_t src_stride,         \
-                                                 int32_t xoffset,            \
-                                                 int32_t yoffset,            \
-                                                 const uint8_t *ref_ptr,     \
-                                                 int32_t ref_stride,         \
-                                                 uint32_t *sse,              \
-                                                 const uint8_t *sec_pred) {  \
-  int32_t diff;                                                              \
-  const uint8_t *h_filter = bilinear_filters_msa[xoffset];                   \
-  const uint8_t *v_filter = bilinear_filters_msa[yoffset];                   \
-                                                                             \
-  if (yoffset) {                                                             \
-    if (xoffset) {                                                           \
-      *sse = sub_pixel_avg_sse_diff_64width_hv_msa(src_ptr, src_stride,      \
-                                                   ref_ptr, ref_stride,      \
-                                                   sec_pred, h_filter,       \
-                                                   v_filter, ht, &diff);     \
-    } else {                                                                 \
-      *sse = sub_pixel_avg_sse_diff_64width_v_msa(src_ptr, src_stride,       \
-                                                  ref_ptr, ref_stride,       \
-                                                  sec_pred, v_filter,        \
-                                                  ht, &diff);                \
-    }                                                                        \
-  } else {                                                                   \
-    if (xoffset) {                                                           \
-      *sse = sub_pixel_avg_sse_diff_64width_h_msa(src_ptr, src_stride,       \
-                                                  ref_ptr, ref_stride,       \
-                                                  sec_pred, h_filter,        \
-                                                  ht, &diff);                \
-    } else {                                                                 \
-      *sse = avg_sse_diff_64x##ht##_msa(src_ptr, src_stride,                 \
-                                        ref_ptr, ref_stride,                 \
-                                        sec_pred, &diff);                    \
-    }                                                                        \
-  }                                                                          \
-                                                                             \
-  return VARIANCE_64Wx##ht##H(*sse, diff);                                   \
-}
+#define VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht)                           \
+  uint32_t vpx_sub_pixel_avg_variance64x##ht##_msa(                           \
+      const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset,            \
+      int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride,            \
+      uint32_t *sse, const uint8_t *sec_pred) {                               \
+    int32_t diff;                                                             \
+    const uint8_t *h_filter = bilinear_filters_msa[xoffset];                  \
+    const uint8_t *v_filter = bilinear_filters_msa[yoffset];                  \
+                                                                              \
+    if (yoffset) {                                                            \
+      if (xoffset) {                                                          \
+        *sse = sub_pixel_avg_sse_diff_64width_hv_msa(                         \
+            src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter,     \
+            v_filter, ht, &diff);                                             \
+      } else {                                                                \
+        *sse = sub_pixel_avg_sse_diff_64width_v_msa(                          \
+            src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, v_filter, ht, \
+            &diff);                                                           \
+      }                                                                       \
+    } else {                                                                  \
+      if (xoffset) {                                                          \
+        *sse = sub_pixel_avg_sse_diff_64width_h_msa(                          \
+            src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, ht, \
+            &diff);                                                           \
+      } else {                                                                \
+        *sse = avg_sse_diff_64x##ht##_msa(src_ptr, src_stride, ref_ptr,       \
+                                          ref_stride, sec_pred, &diff);       \
+      }                                                                       \
+    }                                                                         \
+                                                                              \
+    return VARIANCE_64Wx##ht##H(*sse, diff);                                  \
+  }
 
-VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(32);
-VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(64);
+/* clang-format off */
+VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(32)
+VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(64)
+/* clang-format on */
diff --git a/vpx_dsp/mips/subtract_msa.c b/vpx_dsp/mips/subtract_msa.c
index 9ac43c5cd5225c8a579d47f3ccd31f38bb47de66..391a7ebf66251daec485a91e7ca936a4996cc42b 100644
--- a/vpx_dsp/mips/subtract_msa.c
+++ b/vpx_dsp/mips/subtract_msa.c
@@ -68,8 +68,8 @@ static void sub_blk_16x16_msa(const uint8_t *src, int32_t src_stride,
     LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
     src += (8 * src_stride);
 
-    LD_SB8(pred, pred_stride,
-           pred0, pred1, pred2, pred3, pred4, pred5, pred6, pred7);
+    LD_SB8(pred, pred_stride, pred0, pred1, pred2, pred3, pred4, pred5, pred6,
+           pred7);
     pred += (8 * pred_stride);
 
     ILVRL_B2_UB(src0, pred0, src_l0, src_l1);
@@ -226,31 +226,31 @@ static void sub_blk_64x64_msa(const uint8_t *src, int32_t src_stride,
   }
 }
 
-void vpx_subtract_block_msa(int32_t rows, int32_t cols,
-                            int16_t *diff_ptr, ptrdiff_t diff_stride,
-                            const uint8_t *src_ptr, ptrdiff_t src_stride,
-                            const uint8_t *pred_ptr, ptrdiff_t pred_stride) {
+void vpx_subtract_block_msa(int32_t rows, int32_t cols, int16_t *diff_ptr,
+                            ptrdiff_t diff_stride, const uint8_t *src_ptr,
+                            ptrdiff_t src_stride, const uint8_t *pred_ptr,
+                            ptrdiff_t pred_stride) {
   if (rows == cols) {
     switch (rows) {
       case 4:
-        sub_blk_4x4_msa(src_ptr, src_stride, pred_ptr, pred_stride,
-                        diff_ptr, diff_stride);
+        sub_blk_4x4_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr,
+                        diff_stride);
         break;
       case 8:
-        sub_blk_8x8_msa(src_ptr, src_stride, pred_ptr, pred_stride,
-                        diff_ptr, diff_stride);
+        sub_blk_8x8_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr,
+                        diff_stride);
         break;
       case 16:
-        sub_blk_16x16_msa(src_ptr, src_stride, pred_ptr, pred_stride,
-                          diff_ptr, diff_stride);
+        sub_blk_16x16_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr,
+                          diff_stride);
         break;
       case 32:
-        sub_blk_32x32_msa(src_ptr, src_stride, pred_ptr, pred_stride,
-                          diff_ptr, diff_stride);
+        sub_blk_32x32_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr,
+                          diff_stride);
         break;
       case 64:
-        sub_blk_64x64_msa(src_ptr, src_stride, pred_ptr, pred_stride,
-                          diff_ptr, diff_stride);
+        sub_blk_64x64_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr,
+                          diff_stride);
         break;
       default:
         vpx_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr,
diff --git a/vpx_dsp/mips/txfm_macros_msa.h b/vpx_dsp/mips/txfm_macros_msa.h
index 68c63d56f67f524311ff62ae8aca30a7a4758b3c..da100f6a9808af6c3105057a9662159f884fe3d9 100644
--- a/vpx_dsp/mips/txfm_macros_msa.h
+++ b/vpx_dsp/mips/txfm_macros_msa.h
@@ -13,81 +13,84 @@
 
 #include "vpx_dsp/mips/macros_msa.h"
 
-#define DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) {      \
-  v8i16 k0_m = __msa_fill_h(cnst0);                                  \
-  v4i32 s0_m, s1_m, s2_m, s3_m;                                      \
-                                                                     \
-  s0_m = (v4i32)__msa_fill_h(cnst1);                                 \
-  k0_m = __msa_ilvev_h((v8i16)s0_m, k0_m);                           \
-                                                                     \
-  ILVRL_H2_SW((-reg1), reg0, s1_m, s0_m);                            \
-  ILVRL_H2_SW(reg0, reg1, s3_m, s2_m);                               \
-  DOTP_SH2_SW(s1_m, s0_m, k0_m, k0_m, s1_m, s0_m);                   \
-  SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS);                           \
-  out0 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m);                    \
-                                                                     \
-  DOTP_SH2_SW(s3_m, s2_m, k0_m, k0_m, s1_m, s0_m);                   \
-  SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS);                           \
-  out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m);                    \
-}
+#define DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) \
+  {                                                           \
+    v8i16 k0_m = __msa_fill_h(cnst0);                         \
+    v4i32 s0_m, s1_m, s2_m, s3_m;                             \
+                                                              \
+    s0_m = (v4i32)__msa_fill_h(cnst1);                        \
+    k0_m = __msa_ilvev_h((v8i16)s0_m, k0_m);                  \
+                                                              \
+    ILVRL_H2_SW((-reg1), reg0, s1_m, s0_m);                   \
+    ILVRL_H2_SW(reg0, reg1, s3_m, s2_m);                      \
+    DOTP_SH2_SW(s1_m, s0_m, k0_m, k0_m, s1_m, s0_m);          \
+    SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS);                  \
+    out0 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m);           \
+                                                              \
+    DOTP_SH2_SW(s3_m, s2_m, k0_m, k0_m, s1_m, s0_m);          \
+    SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS);                  \
+    out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m);           \
+  }
 
-#define DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7,      \
-                              dst0, dst1, dst2, dst3) {                    \
-  v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m;                                 \
-  v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m;                                 \
-                                                                           \
-  DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5,                      \
-              tp0_m, tp2_m, tp3_m, tp4_m);                                 \
-  DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7,                      \
-              tp5_m, tp6_m, tp7_m, tp8_m);                                 \
-  BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m);     \
-  BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m);     \
-  SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, DCT_CONST_BITS);                 \
-  SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, DCT_CONST_BITS);                 \
-  PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m,      \
-              dst0, dst1, dst2, dst3);                                     \
-}
+#define DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, dst0,   \
+                              dst1, dst2, dst3)                               \
+  {                                                                           \
+    v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m;                                  \
+    v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m;                                  \
+                                                                              \
+    DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, tp0_m, tp2_m, tp3_m,  \
+                tp4_m);                                                       \
+    DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7, tp5_m, tp6_m, tp7_m,  \
+                tp8_m);                                                       \
+    BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m);      \
+    BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m);      \
+    SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, DCT_CONST_BITS);                  \
+    SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, DCT_CONST_BITS);                  \
+    PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m, dst0, \
+                dst1, dst2, dst3);                                            \
+  }
 
-#define DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) ({       \
-  v8i16 dst_m;                                        \
-  v4i32 tp0_m, tp1_m;                                 \
-                                                      \
-  DOTP_SH2_SW(in0, in1, in2, in2, tp1_m, tp0_m);      \
-  SRARI_W2_SW(tp1_m, tp0_m, DCT_CONST_BITS);          \
-  dst_m = __msa_pckev_h((v8i16)tp1_m, (v8i16)tp0_m);  \
-                                                      \
-  dst_m;                                              \
-})
+#define DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2)           \
+  ({                                                   \
+    v8i16 dst_m;                                       \
+    v4i32 tp0_m, tp1_m;                                \
+                                                       \
+    DOTP_SH2_SW(in0, in1, in2, in2, tp1_m, tp0_m);     \
+    SRARI_W2_SW(tp1_m, tp0_m, DCT_CONST_BITS);         \
+    dst_m = __msa_pckev_h((v8i16)tp1_m, (v8i16)tp0_m); \
+                                                       \
+    dst_m;                                             \
+  })
 
-#define MADD_SHORT(m0, m1, c0, c1, res0, res1) {                    \
-  v4i32 madd0_m, madd1_m, madd2_m, madd3_m;                         \
-  v8i16 madd_s0_m, madd_s1_m;                                       \
-                                                                    \
-  ILVRL_H2_SH(m1, m0, madd_s0_m, madd_s1_m);                        \
-  DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s0_m, madd_s1_m,           \
-              c0, c0, c1, c1, madd0_m, madd1_m, madd2_m, madd3_m);  \
-  SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, DCT_CONST_BITS);  \
-  PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1);      \
-}
+#define MADD_SHORT(m0, m1, c0, c1, res0, res1)                              \
+  {                                                                         \
+    v4i32 madd0_m, madd1_m, madd2_m, madd3_m;                               \
+    v8i16 madd_s0_m, madd_s1_m;                                             \
+                                                                            \
+    ILVRL_H2_SH(m1, m0, madd_s0_m, madd_s1_m);                              \
+    DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s0_m, madd_s1_m, c0, c0, c1, c1, \
+                madd0_m, madd1_m, madd2_m, madd3_m);                        \
+    SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, DCT_CONST_BITS);        \
+    PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1);            \
+  }
 
-#define MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3,         \
-                out0, out1, out2, out3) {                               \
-  v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;                     \
-  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m;                     \
-                                                                        \
-  ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m);                        \
-  ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m);                        \
-  DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m,               \
-              cst0, cst0, cst2, cst2, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
-  BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m,                           \
-              m4_m, m5_m, tmp3_m, tmp2_m);                              \
-  SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS);              \
-  PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1);                  \
-  DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m,               \
-              cst1, cst1, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
-  BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m,                           \
-              m4_m, m5_m, tmp3_m, tmp2_m);                              \
-  SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS);              \
-  PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3);                  \
-}
+#define MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1,   \
+                out2, out3)                                                   \
+  {                                                                           \
+    v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;                         \
+    v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m;                         \
+                                                                              \
+    ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m);                            \
+    ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m);                            \
+    DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, cst0, cst0, cst2, \
+                cst2, tmp0_m, tmp1_m, tmp2_m, tmp3_m);                        \
+    BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, m4_m, m5_m, tmp3_m, tmp2_m);  \
+    SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS);                  \
+    PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1);                      \
+    DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, cst1, cst1, cst3, \
+                cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m);                        \
+    BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, m4_m, m5_m, tmp3_m, tmp2_m);  \
+    SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS);                  \
+    PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3);                      \
+  }
 #endif  // VPX_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_
diff --git a/vpx_dsp/mips/variance_msa.c b/vpx_dsp/mips/variance_msa.c
index 33e175560fd3207fc86917095ca6c03d393a670e..54c5fcab94c00807d5ba5f8da84d4b6634c75b79 100644
--- a/vpx_dsp/mips/variance_msa.c
+++ b/vpx_dsp/mips/variance_msa.c
@@ -11,28 +11,29 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_dsp/mips/macros_msa.h"
 
-#define CALC_MSE_B(src, ref, var) {                                \
-  v16u8 src_l0_m, src_l1_m;                                        \
-  v8i16 res_l0_m, res_l1_m;                                        \
-                                                                   \
-  ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m);                       \
-  HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m);             \
-  DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var);  \
-}
+#define CALC_MSE_B(src, ref, var)                                   \
+  {                                                                 \
+    v16u8 src_l0_m, src_l1_m;                                       \
+    v8i16 res_l0_m, res_l1_m;                                       \
+                                                                    \
+    ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m);                      \
+    HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m);            \
+    DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
+  }
 
-#define CALC_MSE_AVG_B(src, ref, var, sub) {                       \
-  v16u8 src_l0_m, src_l1_m;                                        \
-  v8i16 res_l0_m, res_l1_m;                                        \
-                                                                   \
-  ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m);                       \
-  HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m);             \
-  DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var);  \
-                                                                   \
-  sub += res_l0_m + res_l1_m;                                      \
-}
+#define CALC_MSE_AVG_B(src, ref, var, sub)                          \
+  {                                                                 \
+    v16u8 src_l0_m, src_l1_m;                                       \
+    v8i16 res_l0_m, res_l1_m;                                       \
+                                                                    \
+    ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m);                      \
+    HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m);            \
+    DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
+                                                                    \
+    sub += res_l0_m + res_l1_m;                                     \
+  }
 
-#define VARIANCE_WxH(sse, diff, shift) \
-  sse - (((uint32_t)diff * diff) >> shift)
+#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift)
 
 #define VARIANCE_LARGE_WxH(sse, diff, shift) \
   sse - (((int64_t)diff * diff) >> shift)
@@ -80,8 +81,8 @@ static uint32_t sse_diff_8width_msa(const uint8_t *src_ptr, int32_t src_stride,
     LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
     ref_ptr += (4 * ref_stride);
 
-    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2,
-                src0, src1, ref0, ref1);
+    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1,
+                ref0, ref1);
     CALC_MSE_AVG_B(src0, ref0, var, avg);
     CALC_MSE_AVG_B(src1, ref1, var, avg);
   }
@@ -370,8 +371,8 @@ static uint32_t sse_8width_msa(const uint8_t *src_ptr, int32_t src_stride,
     LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
     ref_ptr += (4 * ref_stride);
 
-    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2,
-                src0, src1, ref0, ref1);
+    PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1,
+                ref0, ref1);
     CALC_MSE_B(src0, ref0, var);
     CALC_MSE_B(src1, ref1, var);
   }
@@ -526,22 +527,21 @@ uint32_t vpx_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride,
 #define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
 #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12);
 
-#define VPX_VARIANCE_WDXHT_MSA(wd, ht)                               \
-uint32_t vpx_variance##wd##x##ht##_msa(const uint8_t *src,           \
-                                       int32_t src_stride,           \
-                                       const uint8_t *ref,           \
-                                       int32_t ref_stride,           \
-                                       uint32_t *sse) {              \
-  int32_t diff;                                                      \
-                                                                     \
-  *sse = sse_diff_##wd##width_msa(src, src_stride, ref, ref_stride,  \
-                                  ht, &diff);                        \
-                                                                     \
-  return VARIANCE_##wd##Wx##ht##H(*sse, diff);                       \
-}
+#define VPX_VARIANCE_WDXHT_MSA(wd, ht)                                         \
+  uint32_t vpx_variance##wd##x##ht##_msa(                                      \
+      const uint8_t *src, int32_t src_stride, const uint8_t *ref,              \
+      int32_t ref_stride, uint32_t *sse) {                                     \
+    int32_t diff;                                                              \
+                                                                               \
+    *sse =                                                                     \
+        sse_diff_##wd##width_msa(src, src_stride, ref, ref_stride, ht, &diff); \
+                                                                               \
+    return VARIANCE_##wd##Wx##ht##H(*sse, diff);                               \
+  }
 
-VPX_VARIANCE_WDXHT_MSA(4, 4);
-VPX_VARIANCE_WDXHT_MSA(4, 8);
+/* clang-format off */
+VPX_VARIANCE_WDXHT_MSA(4, 4)
+VPX_VARIANCE_WDXHT_MSA(4, 8)
 
 VPX_VARIANCE_WDXHT_MSA(8, 4)
 VPX_VARIANCE_WDXHT_MSA(8, 8)
@@ -553,6 +553,7 @@ VPX_VARIANCE_WDXHT_MSA(16, 32)
 
 VPX_VARIANCE_WDXHT_MSA(32, 16)
 VPX_VARIANCE_WDXHT_MSA(32, 32)
+/* clang-format on */
 
 uint32_t vpx_variance32x64_msa(const uint8_t *src, int32_t src_stride,
                                const uint8_t *ref, int32_t ref_stride,
@@ -585,8 +586,7 @@ uint32_t vpx_variance64x64_msa(const uint8_t *src, int32_t src_stride,
 }
 
 uint32_t vpx_mse8x8_msa(const uint8_t *src, int32_t src_stride,
-                        const uint8_t *ref, int32_t ref_stride,
-                        uint32_t *sse) {
+                        const uint8_t *ref, int32_t ref_stride, uint32_t *sse) {
   *sse = sse_8width_msa(src, src_stride, ref, ref_stride, 8);
 
   return *sse;
@@ -617,17 +617,15 @@ uint32_t vpx_mse16x16_msa(const uint8_t *src, int32_t src_stride,
 }
 
 void vpx_get8x8var_msa(const uint8_t *src, int32_t src_stride,
-                       const uint8_t *ref, int32_t ref_stride,
-                       uint32_t *sse, int32_t *sum) {
+                       const uint8_t *ref, int32_t ref_stride, uint32_t *sse,
+                       int32_t *sum) {
   *sse = sse_diff_8width_msa(src, src_stride, ref, ref_stride, 8, sum);
 }
 
 void vpx_get16x16var_msa(const uint8_t *src, int32_t src_stride,
-                         const uint8_t *ref, int32_t ref_stride,
-                         uint32_t *sse, int32_t *sum) {
+                         const uint8_t *ref, int32_t ref_stride, uint32_t *sse,
+                         int32_t *sum) {
   *sse = sse_diff_16width_msa(src, src_stride, ref, ref_stride, 16, sum);
 }
 
-uint32_t vpx_get_mb_ss_msa(const int16_t *src) {
-  return get_mb_ss_msa(src);
-}
+uint32_t vpx_get_mb_ss_msa(const int16_t *src) { return get_mb_ss_msa(src); }
diff --git a/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c b/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c
index f6244d834b7d4f24acd22c1b961071c62e71e798..ad2af286692155bccbee8df80abbbd41dee655d6 100644
--- a/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c
+++ b/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c
@@ -13,8 +13,7 @@
 #include "vpx_dsp/mips/vpx_convolve_msa.h"
 
 static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               int8_t *filter) {
   v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
@@ -48,8 +47,7 @@ static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src,
 }
 
 static void common_hz_8t_and_aver_dst_4x8_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               int8_t *filter) {
   v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
@@ -92,10 +90,8 @@ static void common_hz_8t_and_aver_dst_4x8_msa(const uint8_t *src,
 }
 
 static void common_hz_8t_and_aver_dst_4w_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             uint8_t *dst,
-                                             int32_t dst_stride,
-                                             int8_t *filter,
+                                             int32_t src_stride, uint8_t *dst,
+                                             int32_t dst_stride, int8_t *filter,
                                              int32_t height) {
   if (4 == height) {
     common_hz_8t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter);
@@ -105,10 +101,8 @@ static void common_hz_8t_and_aver_dst_4w_msa(const uint8_t *src,
 }
 
 static void common_hz_8t_and_aver_dst_8w_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             uint8_t *dst,
-                                             int32_t dst_stride,
-                                             int8_t *filter,
+                                             int32_t src_stride, uint8_t *dst,
+                                             int32_t dst_stride, int8_t *filter,
                                              int32_t height) {
   int32_t loop_cnt;
   v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
@@ -136,18 +130,16 @@ static void common_hz_8t_and_aver_dst_8w_msa(const uint8_t *src,
     LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
     SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
     SAT_SH4_SH(out0, out1, out2, out3, 7);
-    CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3,
-                            dst, dst_stride);
+    CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3, dst,
+                            dst_stride);
     dst += (4 * dst_stride);
   }
 }
 
 static void common_hz_8t_and_aver_dst_16w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   int32_t loop_cnt;
   v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
   v16u8 mask0, mask1, mask2, mask3, dst0, dst1;
@@ -199,11 +191,9 @@ static void common_hz_8t_and_aver_dst_16w_msa(const uint8_t *src,
 }
 
 static void common_hz_8t_and_aver_dst_32w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
   v16u8 dst1, dst2, mask0, mask1, mask2, mask3;
@@ -256,11 +246,9 @@ static void common_hz_8t_and_aver_dst_32w_msa(const uint8_t *src,
 }
 
 static void common_hz_8t_and_aver_dst_64w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   uint32_t loop_cnt, cnt;
   v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
   v16u8 dst1, dst2, mask0, mask1, mask2, mask3;
@@ -318,8 +306,7 @@ static void common_hz_8t_and_aver_dst_64w_msa(const uint8_t *src,
 }
 
 static void common_hz_2t_and_aver_dst_4x4_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               int8_t *filter) {
   v16i8 src0, src1, src2, src3, mask;
@@ -344,8 +331,7 @@ static void common_hz_2t_and_aver_dst_4x4_msa(const uint8_t *src,
 }
 
 static void common_hz_2t_and_aver_dst_4x8_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               int8_t *filter) {
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
@@ -378,10 +364,8 @@ static void common_hz_2t_and_aver_dst_4x8_msa(const uint8_t *src,
 }
 
 static void common_hz_2t_and_aver_dst_4w_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             uint8_t *dst,
-                                             int32_t dst_stride,
-                                             int8_t *filter,
+                                             int32_t src_stride, uint8_t *dst,
+                                             int32_t dst_stride, int8_t *filter,
                                              int32_t height) {
   if (4 == height) {
     common_hz_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter);
@@ -391,8 +375,7 @@ static void common_hz_2t_and_aver_dst_4w_msa(const uint8_t *src,
 }
 
 static void common_hz_2t_and_aver_dst_8x4_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               int8_t *filter) {
   v16i8 src0, src1, src2, src3, mask;
@@ -412,16 +395,13 @@ static void common_hz_2t_and_aver_dst_8x4_msa(const uint8_t *src,
               vec2, vec3);
   SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
   LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
-                     dst, dst_stride);
+  PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
+                     dst_stride);
 }
 
-static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst,
-                                                  int32_t dst_stride,
-                                                  int8_t *filter,
-                                                  int32_t height) {
+static void common_hz_2t_and_aver_dst_8x8mult_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter, int32_t height) {
   v16i8 src0, src1, src2, src3, mask;
   v16u8 filt0, dst0, dst1, dst2, dst3;
   v8u16 vec0, vec1, vec2, vec3, filt;
@@ -442,8 +422,8 @@ static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
   LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
   LD_SB4(src, src_stride, src0, src1, src2, src3);
   src += (4 * src_stride);
-  PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
-                     dst, dst_stride);
+  PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
+                     dst_stride);
   dst += (4 * dst_stride);
 
   VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
@@ -452,8 +432,8 @@ static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
               vec2, vec3);
   SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
   LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-  PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
-                     dst, dst_stride);
+  PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
+                     dst_stride);
   dst += (4 * dst_stride);
 
   if (16 == height) {
@@ -467,8 +447,8 @@ static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
     SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
     LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
     LD_SB4(src, src_stride, src0, src1, src2, src3);
-    PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
-                       dst, dst_stride);
+    PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
+                       dst_stride);
     dst += (4 * dst_stride);
 
     VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
@@ -477,16 +457,14 @@ static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
                 vec2, vec3);
     SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
     LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
-                       dst, dst_stride);
+    PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst,
+                       dst_stride);
   }
 }
 
 static void common_hz_2t_and_aver_dst_8w_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             uint8_t *dst,
-                                             int32_t dst_stride,
-                                             int8_t *filter,
+                                             int32_t src_stride, uint8_t *dst,
+                                             int32_t dst_stride, int8_t *filter,
                                              int32_t height) {
   if (4 == height) {
     common_hz_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, filter);
@@ -497,11 +475,9 @@ static void common_hz_2t_and_aver_dst_8w_msa(const uint8_t *src,
 }
 
 static void common_hz_2t_and_aver_dst_16w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
   v16u8 filt0, dst0, dst1, dst2, dst3;
@@ -566,11 +542,9 @@ static void common_hz_2t_and_aver_dst_16w_msa(const uint8_t *src,
 }
 
 static void common_hz_2t_and_aver_dst_32w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
   v16u8 filt0, dst0, dst1, dst2, dst3;
@@ -617,11 +591,9 @@ static void common_hz_2t_and_aver_dst_32w_msa(const uint8_t *src,
 }
 
 static void common_hz_2t_and_aver_dst_64w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
   v16u8 filt0, dst0, dst1, dst2, dst3;
@@ -662,8 +634,8 @@ static void common_hz_2t_and_aver_dst_64w_msa(const uint8_t *src,
 void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
                                  uint8_t *dst, ptrdiff_t dst_stride,
                                  const int16_t *filter_x, int x_step_q4,
-                                 const int16_t *filter_y, int y_step_q4,
-                                 int w, int h) {
+                                 const int16_t *filter_y, int y_step_q4, int w,
+                                 int h) {
   int8_t cnt, filt_hor[8];
 
   assert(x_step_q4 == 16);
@@ -676,67 +648,55 @@ void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
   if (((const int32_t *)filter_x)[0] == 0) {
     switch (w) {
       case 4:
-        common_hz_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride,
-                                         dst, (int32_t)dst_stride,
-                                         &filt_hor[3], h);
+        common_hz_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
+                                         (int32_t)dst_stride, &filt_hor[3], h);
         break;
       case 8:
-        common_hz_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride,
-                                         dst, (int32_t)dst_stride,
-                                         &filt_hor[3], h);
+        common_hz_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
+                                         (int32_t)dst_stride, &filt_hor[3], h);
         break;
       case 16:
-        common_hz_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          &filt_hor[3], h);
+        common_hz_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, &filt_hor[3], h);
         break;
       case 32:
-        common_hz_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          &filt_hor[3], h);
+        common_hz_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, &filt_hor[3], h);
         break;
       case 64:
-        common_hz_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          &filt_hor[3], h);
+        common_hz_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, &filt_hor[3], h);
         break;
       default:
-        vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
-                                  filter_x, x_step_q4, filter_y, y_step_q4,
-                                  w, h);
+        vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+                                  x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   } else {
     switch (w) {
       case 4:
-        common_hz_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride,
-                                         dst, (int32_t)dst_stride,
-                                         filt_hor, h);
+        common_hz_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
+                                         (int32_t)dst_stride, filt_hor, h);
         break;
       case 8:
-        common_hz_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride,
-                                         dst, (int32_t)dst_stride,
-                                         filt_hor, h);
+        common_hz_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
+                                         (int32_t)dst_stride, filt_hor, h);
         break;
       case 16:
-        common_hz_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          filt_hor, h);
+        common_hz_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, filt_hor, h);
         break;
       case 32:
-        common_hz_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          filt_hor, h);
+        common_hz_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, filt_hor, h);
         break;
       case 64:
-        common_hz_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          filt_hor, h);
+        common_hz_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, filt_hor, h);
         break;
       default:
-        vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
-                                  filter_x, x_step_q4, filter_y, y_step_q4,
-                                  w, h);
+        vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+                                  x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   }
diff --git a/vpx_dsp/mips/vpx_convolve8_avg_msa.c b/vpx_dsp/mips/vpx_convolve8_avg_msa.c
index 2abde6de83c3e14131866426cb21721764d76cdc..1cfa63201c5acfefbf1170ec42d902be0c26edea 100644
--- a/vpx_dsp/mips/vpx_convolve8_avg_msa.c
+++ b/vpx_dsp/mips/vpx_convolve8_avg_msa.c
@@ -12,13 +12,9 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_dsp/mips/vpx_convolve_msa.h"
 
-static void common_hv_8ht_8vt_and_aver_dst_4w_msa(const uint8_t *src,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst,
-                                                  int32_t dst_stride,
-                                                  int8_t *filter_horiz,
-                                                  int8_t *filter_vert,
-                                                  int32_t height) {
+static void common_hv_8ht_8vt_and_aver_dst_4w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
   v16u8 dst0, dst1, dst2, dst3, mask0, mask1, mask2, mask3, tmp0, tmp1;
@@ -64,15 +60,15 @@ static void common_hv_8ht_8vt_and_aver_dst_4w_msa(const uint8_t *src,
     src += (4 * src_stride);
 
     LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8);
     vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
     res0 = FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt_vt0, filt_vt1,
                                filt_vt2, filt_vt3);
 
-    hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8);
     vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8);
     res1 = FILT_8TAP_DPADD_S_H(vec1, vec2, vec3, vec4, filt_vt0, filt_vt1,
@@ -94,13 +90,9 @@ static void common_hv_8ht_8vt_and_aver_dst_4w_msa(const uint8_t *src,
   }
 }
 
-static void common_hv_8ht_8vt_and_aver_dst_8w_msa(const uint8_t *src,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst,
-                                                  int32_t dst_stride,
-                                                  int8_t *filter_horiz,
-                                                  int8_t *filter_vert,
-                                                  int32_t height) {
+static void common_hv_8ht_8vt_and_aver_dst_8w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
   v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3;
@@ -154,20 +146,20 @@ static void common_hv_8ht_8vt_and_aver_dst_8w_msa(const uint8_t *src,
 
     LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
 
-    hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
     tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1,
                                filt_vt2, filt_vt3);
 
-    hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7);
     tmp1 = FILT_8TAP_DPADD_S_H(out4, out5, out6, out7, filt_vt0, filt_vt1,
                                filt_vt2, filt_vt3);
 
-    hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     out8 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8);
     tmp2 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out8, filt_vt0, filt_vt1,
                                filt_vt2, filt_vt3);
@@ -180,8 +172,8 @@ static void common_hv_8ht_8vt_and_aver_dst_8w_msa(const uint8_t *src,
 
     SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
     SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
-    CONVERT_UB_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst2, dst3,
-                            dst, dst_stride);
+    CONVERT_UB_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst2, dst3, dst,
+                            dst_stride);
     dst += (4 * dst_stride);
 
     hz_out6 = hz_out10;
@@ -194,13 +186,9 @@ static void common_hv_8ht_8vt_and_aver_dst_8w_msa(const uint8_t *src,
   }
 }
 
-static void common_hv_8ht_8vt_and_aver_dst_16w_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter_horiz,
-                                                   int8_t *filter_vert,
-                                                   int32_t height) {
+static void common_hv_8ht_8vt_and_aver_dst_16w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   int32_t multiple8_cnt;
   for (multiple8_cnt = 2; multiple8_cnt--;) {
     common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride,
@@ -210,13 +198,9 @@ static void common_hv_8ht_8vt_and_aver_dst_16w_msa(const uint8_t *src,
   }
 }
 
-static void common_hv_8ht_8vt_and_aver_dst_32w_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter_horiz,
-                                                   int8_t *filter_vert,
-                                                   int32_t height) {
+static void common_hv_8ht_8vt_and_aver_dst_32w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   int32_t multiple8_cnt;
   for (multiple8_cnt = 4; multiple8_cnt--;) {
     common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride,
@@ -226,13 +210,9 @@ static void common_hv_8ht_8vt_and_aver_dst_32w_msa(const uint8_t *src,
   }
 }
 
-static void common_hv_8ht_8vt_and_aver_dst_64w_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter_horiz,
-                                                   int8_t *filter_vert,
-                                                   int32_t height) {
+static void common_hv_8ht_8vt_and_aver_dst_64w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   int32_t multiple8_cnt;
   for (multiple8_cnt = 8; multiple8_cnt--;) {
     common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride,
@@ -242,12 +222,9 @@ static void common_hv_8ht_8vt_and_aver_dst_64w_msa(const uint8_t *src,
   }
 }
 
-static void common_hv_2ht_2vt_and_aver_dst_4x4_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter_horiz,
-                                                   int8_t *filter_vert) {
+static void common_hv_2ht_2vt_and_aver_dst_4x4_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert) {
   v16i8 src0, src1, src2, src3, src4, mask;
   v16u8 filt_hz, filt_vt, vec0, vec1;
   v16u8 dst0, dst1, dst2, dst3, res0, res1;
@@ -280,12 +257,9 @@ static void common_hv_2ht_2vt_and_aver_dst_4x4_msa(const uint8_t *src,
   ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
 }
 
-static void common_hv_2ht_2vt_and_aver_dst_4x8_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter_horiz,
-                                                   int8_t *filter_vert) {
+static void common_hv_2ht_2vt_and_aver_dst_4x8_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert) {
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask;
   v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3, res0, res1, res2, res3;
   v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
@@ -316,29 +290,25 @@ static void common_hv_2ht_2vt_and_aver_dst_4x8_msa(const uint8_t *src,
   hz_out7 = (v8u16)__msa_pckod_d((v2i64)hz_out8, (v2i64)hz_out6);
 
   LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
-  ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst2,
-             dst4, dst6);
+  ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst2, dst4,
+             dst6);
   ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
   ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt,
-              tmp0, tmp1, tmp2, tmp3);
+  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, tmp0,
+              tmp1, tmp2, tmp3);
   SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-  PCKEV_B4_UB(tmp0, tmp0, tmp1, tmp1, tmp2, tmp2, tmp3, tmp3, res0, res1,
-              res2, res3);
-  AVER_UB4_UB(res0, dst0, res1, dst2, res2, dst4, res3, dst6, res0, res1,
-              res2, res3);
+  PCKEV_B4_UB(tmp0, tmp0, tmp1, tmp1, tmp2, tmp2, tmp3, tmp3, res0, res1, res2,
+              res3);
+  AVER_UB4_UB(res0, dst0, res1, dst2, res2, dst4, res3, dst6, res0, res1, res2,
+              res3);
   ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
   dst += (4 * dst_stride);
   ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
 }
 
-static void common_hv_2ht_2vt_and_aver_dst_4w_msa(const uint8_t *src,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst,
-                                                  int32_t dst_stride,
-                                                  int8_t *filter_horiz,
-                                                  int8_t *filter_vert,
-                                                  int32_t height) {
+static void common_hv_2ht_2vt_and_aver_dst_4w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   if (4 == height) {
     common_hv_2ht_2vt_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride,
                                            filter_horiz, filter_vert);
@@ -348,12 +318,9 @@ static void common_hv_2ht_2vt_and_aver_dst_4w_msa(const uint8_t *src,
   }
 }
 
-static void common_hv_2ht_2vt_and_aver_dst_8x4_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter_horiz,
-                                                   int8_t *filter_vert) {
+static void common_hv_2ht_2vt_and_aver_dst_8x4_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert) {
   v16i8 src0, src1, src2, src3, src4, mask;
   v16u8 filt_hz, filt_vt, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3;
   v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3;
@@ -390,17 +357,13 @@ static void common_hv_2ht_2vt_and_aver_dst_8x4_msa(const uint8_t *src,
   tmp3 = __msa_dotp_u_h(vec3, filt_vt);
 
   SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-  PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3,
-                     dst, dst_stride);
+  PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst,
+                     dst_stride);
 }
 
-static void common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(const uint8_t *src,
-                                                       int32_t src_stride,
-                                                       uint8_t *dst,
-                                                       int32_t dst_stride,
-                                                       int8_t *filter_horiz,
-                                                       int8_t *filter_vert,
-                                                       int32_t height) {
+static void common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, mask;
   v16u8 filt_hz, filt_vt, vec0, dst0, dst1, dst2, dst3;
@@ -445,36 +408,27 @@ static void common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(const uint8_t *src,
 
     SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
     LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
-    PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3,
-                       dst, dst_stride);
+    PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst,
+                       dst_stride);
     dst += (4 * dst_stride);
   }
 }
 
-static void common_hv_2ht_2vt_and_aver_dst_8w_msa(const uint8_t *src,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst,
-                                                  int32_t dst_stride,
-                                                  int8_t *filter_horiz,
-                                                  int8_t *filter_vert,
-                                                  int32_t height) {
+static void common_hv_2ht_2vt_and_aver_dst_8w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   if (4 == height) {
     common_hv_2ht_2vt_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride,
                                            filter_horiz, filter_vert);
   } else {
-    common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(src, src_stride, dst, dst_stride,
-                                               filter_horiz, filter_vert,
-                                               height);
+    common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(
+        src, src_stride, dst, dst_stride, filter_horiz, filter_vert, height);
   }
 }
 
-static void common_hv_2ht_2vt_and_aver_dst_16w_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter_horiz,
-                                                   int8_t *filter_vert,
-                                                   int32_t height) {
+static void common_hv_2ht_2vt_and_aver_dst_16w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
   v16u8 filt_hz, filt_vt, vec0, vec1, dst0, dst1, dst2, dst3;
@@ -536,13 +490,9 @@ static void common_hv_2ht_2vt_and_aver_dst_16w_msa(const uint8_t *src,
   }
 }
 
-static void common_hv_2ht_2vt_and_aver_dst_32w_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter_horiz,
-                                                   int8_t *filter_vert,
-                                                   int32_t height) {
+static void common_hv_2ht_2vt_and_aver_dst_32w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   int32_t multiple8_cnt;
   for (multiple8_cnt = 2; multiple8_cnt--;) {
     common_hv_2ht_2vt_and_aver_dst_16w_msa(src, src_stride, dst, dst_stride,
@@ -552,13 +502,9 @@ static void common_hv_2ht_2vt_and_aver_dst_32w_msa(const uint8_t *src,
   }
 }
 
-static void common_hv_2ht_2vt_and_aver_dst_64w_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter_horiz,
-                                                   int8_t *filter_vert,
-                                                   int32_t height) {
+static void common_hv_2ht_2vt_and_aver_dst_64w_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter_horiz, int8_t *filter_vert, int32_t height) {
   int32_t multiple8_cnt;
   for (multiple8_cnt = 4; multiple8_cnt--;) {
     common_hv_2ht_2vt_and_aver_dst_16w_msa(src, src_stride, dst, dst_stride,
@@ -571,8 +517,8 @@ static void common_hv_2ht_2vt_and_aver_dst_64w_msa(const uint8_t *src,
 void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
                            uint8_t *dst, ptrdiff_t dst_stride,
                            const int16_t *filter_x, int x_step_q4,
-                           const int16_t *filter_y, int y_step_q4,
-                           int w, int h) {
+                           const int16_t *filter_y, int y_step_q4, int w,
+                           int h) {
   int8_t cnt, filt_hor[8], filt_ver[8];
 
   assert(x_step_q4 == 16);
@@ -589,72 +535,69 @@ void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
       ((const int32_t *)filter_y)[0] == 0) {
     switch (w) {
       case 4:
-        common_hv_2ht_2vt_and_aver_dst_4w_msa(src, (int32_t)src_stride,
-                                              dst, (int32_t)dst_stride,
-                                              &filt_hor[3], &filt_ver[3], h);
+        common_hv_2ht_2vt_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
+                                              (int32_t)dst_stride, &filt_hor[3],
+                                              &filt_ver[3], h);
         break;
       case 8:
-        common_hv_2ht_2vt_and_aver_dst_8w_msa(src, (int32_t)src_stride,
-                                              dst, (int32_t)dst_stride,
-                                              &filt_hor[3], &filt_ver[3], h);
+        common_hv_2ht_2vt_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
+                                              (int32_t)dst_stride, &filt_hor[3],
+                                              &filt_ver[3], h);
         break;
       case 16:
-        common_hv_2ht_2vt_and_aver_dst_16w_msa(src, (int32_t)src_stride,
-                                               dst, (int32_t)dst_stride,
+        common_hv_2ht_2vt_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
+                                               (int32_t)dst_stride,
                                                &filt_hor[3], &filt_ver[3], h);
         break;
       case 32:
-        common_hv_2ht_2vt_and_aver_dst_32w_msa(src, (int32_t)src_stride,
-                                               dst, (int32_t)dst_stride,
+        common_hv_2ht_2vt_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
+                                               (int32_t)dst_stride,
                                                &filt_hor[3], &filt_ver[3], h);
         break;
       case 64:
-        common_hv_2ht_2vt_and_aver_dst_64w_msa(src, (int32_t)src_stride,
-                                               dst, (int32_t)dst_stride,
+        common_hv_2ht_2vt_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
+                                               (int32_t)dst_stride,
                                                &filt_hor[3], &filt_ver[3], h);
         break;
       default:
-        vpx_convolve8_avg_c(src, src_stride, dst, dst_stride,
-                            filter_x, x_step_q4, filter_y, y_step_q4,
-                            w, h);
+        vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
+                            x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   } else if (((const int32_t *)filter_x)[0] == 0 ||
              ((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve8_avg_c(src, src_stride, dst, dst_stride,
-                        filter_x, x_step_q4, filter_y, y_step_q4,
-                        w, h);
+    vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+                        filter_y, y_step_q4, w, h);
   } else {
     switch (w) {
       case 4:
-        common_hv_8ht_8vt_and_aver_dst_4w_msa(src, (int32_t)src_stride,
-                                              dst, (int32_t)dst_stride,
-                                              filt_hor, filt_ver, h);
+        common_hv_8ht_8vt_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
+                                              (int32_t)dst_stride, filt_hor,
+                                              filt_ver, h);
         break;
       case 8:
-        common_hv_8ht_8vt_and_aver_dst_8w_msa(src, (int32_t)src_stride,
-                                              dst, (int32_t)dst_stride,
-                                              filt_hor, filt_ver, h);
+        common_hv_8ht_8vt_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
+                                              (int32_t)dst_stride, filt_hor,
+                                              filt_ver, h);
         break;
       case 16:
-        common_hv_8ht_8vt_and_aver_dst_16w_msa(src, (int32_t)src_stride,
-                                               dst, (int32_t)dst_stride,
-                                               filt_hor, filt_ver, h);
+        common_hv_8ht_8vt_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
+                                               (int32_t)dst_stride, filt_hor,
+                                               filt_ver, h);
         break;
       case 32:
-        common_hv_8ht_8vt_and_aver_dst_32w_msa(src, (int32_t)src_stride,
-                                               dst, (int32_t)dst_stride,
-                                               filt_hor, filt_ver, h);
+        common_hv_8ht_8vt_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
+                                               (int32_t)dst_stride, filt_hor,
+                                               filt_ver, h);
         break;
       case 64:
-        common_hv_8ht_8vt_and_aver_dst_64w_msa(src, (int32_t)src_stride,
-                                               dst, (int32_t)dst_stride,
-                                               filt_hor, filt_ver, h);
+        common_hv_8ht_8vt_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
+                                               (int32_t)dst_stride, filt_hor,
+                                               filt_ver, h);
         break;
       default:
-        vpx_convolve8_avg_c(src, src_stride, dst, dst_stride,
-                            filter_x, x_step_q4, filter_y, y_step_q4,
-                            w, h);
+        vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x,
+                            x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   }
diff --git a/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c b/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
index 0164e41aa161a3c5614b86c2ac71687089a29083..146ce3b2f596d63ffc2bcc01ae03da08cdf9cbc4 100644
--- a/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
+++ b/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
@@ -13,10 +13,8 @@
 #include "vpx_dsp/mips/vpx_convolve_msa.h"
 
 static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             uint8_t *dst,
-                                             int32_t dst_stride,
-                                             int8_t *filter,
+                                             int32_t src_stride, uint8_t *dst,
+                                             int32_t dst_stride, int8_t *filter,
                                              int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
@@ -73,10 +71,8 @@ static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src,
 }
 
 static void common_vt_8t_and_aver_dst_8w_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             uint8_t *dst,
-                                             int32_t dst_stride,
-                                             int8_t *filter,
+                                             int32_t src_stride, uint8_t *dst,
+                                             int32_t dst_stride, int8_t *filter,
                                              int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
@@ -106,18 +102,18 @@ static void common_vt_8t_and_aver_dst_8w_msa(const uint8_t *src,
     XORI_B4_128_SB(src7, src8, src9, src10);
     ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
                src87_r, src98_r, src109_r);
-    out0 = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0,
-                               filt1, filt2, filt3);
-    out1 = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0,
-                               filt1, filt2, filt3);
-    out2 = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0,
-                               filt1, filt2, filt3);
+    out0 = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0, filt1,
+                               filt2, filt3);
+    out1 = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0, filt1,
+                               filt2, filt3);
+    out2 = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0, filt1,
+                               filt2, filt3);
     out3 = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
                                filt1, filt2, filt3);
     SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
     SAT_SH4_SH(out0, out1, out2, out3, 7);
-    CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3,
-                            dst, dst_stride);
+    CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3, dst,
+                            dst_stride);
     dst += (4 * dst_stride);
 
     src10_r = src54_r;
@@ -130,13 +126,9 @@ static void common_vt_8t_and_aver_dst_8w_msa(const uint8_t *src,
   }
 }
 
-static void common_vt_8t_and_aver_dst_16w_mult_msa(const uint8_t *src,
-                                                   int32_t src_stride,
-                                                   uint8_t *dst,
-                                                   int32_t dst_stride,
-                                                   int8_t *filter,
-                                                   int32_t height,
-                                                   int32_t width) {
+static void common_vt_8t_and_aver_dst_16w_mult_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter, int32_t height, int32_t width) {
   const uint8_t *src_tmp;
   uint8_t *dst_tmp;
   uint32_t loop_cnt, cnt;
@@ -227,38 +219,31 @@ static void common_vt_8t_and_aver_dst_16w_mult_msa(const uint8_t *src,
 }
 
 static void common_vt_8t_and_aver_dst_16w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride,
                                          filter, height, 16);
 }
 
 static void common_vt_8t_and_aver_dst_32w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride,
                                          filter, height, 32);
 }
 
 static void common_vt_8t_and_aver_dst_64w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride,
                                          filter, height, 64);
 }
 
 static void common_vt_2t_and_aver_dst_4x4_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               int8_t *filter) {
   v16i8 src0, src1, src2, src3, src4;
@@ -292,8 +277,7 @@ static void common_vt_2t_and_aver_dst_4x4_msa(const uint8_t *src,
 }
 
 static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               int8_t *filter) {
   v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
@@ -311,15 +295,15 @@ static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src,
   src8 = LD_SB(src);
 
   LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
-  ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst1,
-             dst2, dst3);
+  ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst1, dst2,
+             dst3);
   ILVR_D2_UB(dst1, dst0, dst3, dst2, dst0, dst1);
   ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
              src32_r, src43_r);
   ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
              src76_r, src87_r);
-  ILVR_D4_UB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
-             src87_r, src76_r, src2110, src4332, src6554, src8776);
+  ILVR_D4_UB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src87_r,
+             src76_r, src2110, src4332, src6554, src8776);
   DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0,
               tmp0, tmp1, tmp2, tmp3);
   SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
@@ -331,10 +315,8 @@ static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src,
 }
 
 static void common_vt_2t_and_aver_dst_4w_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             uint8_t *dst,
-                                             int32_t dst_stride,
-                                             int8_t *filter,
+                                             int32_t src_stride, uint8_t *dst,
+                                             int32_t dst_stride, int8_t *filter,
                                              int32_t height) {
   if (4 == height) {
     common_vt_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter);
@@ -344,8 +326,7 @@ static void common_vt_2t_and_aver_dst_4w_msa(const uint8_t *src,
 }
 
 static void common_vt_2t_and_aver_dst_8x4_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
                                               int8_t *filter) {
   v16u8 src0, src1, src2, src3, src4;
@@ -364,16 +345,13 @@ static void common_vt_2t_and_aver_dst_8x4_msa(const uint8_t *src,
   DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
               tmp2, tmp3);
   SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-  PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3,
-                     dst, dst_stride);
+  PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst,
+                     dst_stride);
 }
 
-static void common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst,
-                                                  int32_t dst_stride,
-                                                  int8_t *filter,
-                                                  int32_t height) {
+static void common_vt_2t_and_aver_dst_8x8mult_msa(
+    const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
+    int8_t *filter, int32_t height) {
   uint32_t loop_cnt;
   v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
   v16u8 dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
@@ -393,22 +371,22 @@ static void common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
     src += (8 * src_stride);
     LD_UB8(dst, dst_stride, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8);
 
-    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1,
-               vec2, vec3);
-    ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5,
-               vec6, vec7);
+    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
+               vec3);
+    ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6,
+               vec7);
     DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
                 tmp2, tmp3);
     SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST8x4_UB(tmp0, dst1, tmp1, dst2, tmp2, dst3, tmp3, dst4,
-                       dst, dst_stride);
+    PCKEV_AVG_ST8x4_UB(tmp0, dst1, tmp1, dst2, tmp2, dst3, tmp3, dst4, dst,
+                       dst_stride);
     dst += (4 * dst_stride);
 
     DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1,
                 tmp2, tmp3);
     SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
-    PCKEV_AVG_ST8x4_UB(tmp0, dst5, tmp1, dst6, tmp2, dst7, tmp3, dst8,
-                       dst, dst_stride);
+    PCKEV_AVG_ST8x4_UB(tmp0, dst5, tmp1, dst6, tmp2, dst7, tmp3, dst8, dst,
+                       dst_stride);
     dst += (4 * dst_stride);
 
     src0 = src8;
@@ -416,10 +394,8 @@ static void common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
 }
 
 static void common_vt_2t_and_aver_dst_8w_msa(const uint8_t *src,
-                                             int32_t src_stride,
-                                             uint8_t *dst,
-                                             int32_t dst_stride,
-                                             int8_t *filter,
+                                             int32_t src_stride, uint8_t *dst,
+                                             int32_t dst_stride, int8_t *filter,
                                              int32_t height) {
   if (4 == height) {
     common_vt_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, filter);
@@ -430,11 +406,9 @@ static void common_vt_2t_and_aver_dst_8w_msa(const uint8_t *src,
 }
 
 static void common_vt_2t_and_aver_dst_16w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   uint32_t loop_cnt;
   v16u8 src0, src1, src2, src3, src4, dst0, dst1, dst2, dst3, filt0;
   v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
@@ -481,11 +455,9 @@ static void common_vt_2t_and_aver_dst_16w_msa(const uint8_t *src,
 }
 
 static void common_vt_2t_and_aver_dst_32w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   uint32_t loop_cnt;
   v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9;
   v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
@@ -554,11 +526,9 @@ static void common_vt_2t_and_aver_dst_32w_msa(const uint8_t *src,
 }
 
 static void common_vt_2t_and_aver_dst_64w_msa(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
+                                              int32_t src_stride, uint8_t *dst,
                                               int32_t dst_stride,
-                                              int8_t *filter,
-                                              int32_t height) {
+                                              int8_t *filter, int32_t height) {
   uint32_t loop_cnt;
   v16u8 src0, src1, src2, src3, src4, src5;
   v16u8 src6, src7, src8, src9, src10, src11, filt0;
@@ -636,8 +606,8 @@ static void common_vt_2t_and_aver_dst_64w_msa(const uint8_t *src,
 void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
                                 uint8_t *dst, ptrdiff_t dst_stride,
                                 const int16_t *filter_x, int x_step_q4,
-                                const int16_t *filter_y, int y_step_q4,
-                                int w, int h) {
+                                const int16_t *filter_y, int y_step_q4, int w,
+                                int h) {
   int8_t cnt, filt_ver[8];
 
   assert(y_step_q4 == 16);
@@ -650,68 +620,56 @@ void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
   if (((const int32_t *)filter_y)[0] == 0) {
     switch (w) {
       case 4:
-        common_vt_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride,
-                                         dst, (int32_t)dst_stride,
-                                         &filt_ver[3], h);
+        common_vt_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
+                                         (int32_t)dst_stride, &filt_ver[3], h);
         break;
       case 8:
-        common_vt_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride,
-                                         dst, (int32_t)dst_stride,
-                                         &filt_ver[3], h);
+        common_vt_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
+                                         (int32_t)dst_stride, &filt_ver[3], h);
         break;
       case 16:
-        common_vt_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          &filt_ver[3], h);
+        common_vt_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, &filt_ver[3], h);
         break;
       case 32:
-        common_vt_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          &filt_ver[3], h);
+        common_vt_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, &filt_ver[3], h);
         break;
       case 64:
-        common_vt_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          &filt_ver[3], h);
+        common_vt_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, &filt_ver[3], h);
         break;
       default:
-        vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
-                                 filter_x, x_step_q4, filter_y, y_step_q4,
-                                 w, h);
+        vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+                                 x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   } else {
     switch (w) {
       case 4:
-        common_vt_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride,
-                                         dst, (int32_t)dst_stride,
-                                         filt_ver, h);
+        common_vt_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
+                                         (int32_t)dst_stride, filt_ver, h);
         break;
       case 8:
-        common_vt_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride,
-                                         dst, (int32_t)dst_stride,
-                                         filt_ver, h);
+        common_vt_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
+                                         (int32_t)dst_stride, filt_ver, h);
         break;
       case 16:
-        common_vt_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          filt_ver, h);
+        common_vt_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, filt_ver, h);
 
         break;
       case 32:
-        common_vt_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          filt_ver, h);
+        common_vt_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, filt_ver, h);
         break;
       case 64:
-        common_vt_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride,
-                                          dst, (int32_t)dst_stride,
-                                          filt_ver, h);
+        common_vt_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
+                                          (int32_t)dst_stride, filt_ver, h);
         break;
       default:
-        vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
-                                 filter_x, x_step_q4, filter_y, y_step_q4,
-                                 w, h);
+        vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
+                                 x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   }
diff --git a/vpx_dsp/mips/vpx_convolve8_horiz_msa.c b/vpx_dsp/mips/vpx_convolve8_horiz_msa.c
index dbd120b0d5c546629f1b1175147e8c9169b45947..9e8bf7b5194374ac0d384bb690e8649c4fb353ec 100644
--- a/vpx_dsp/mips/vpx_convolve8_horiz_msa.c
+++ b/vpx_dsp/mips/vpx_convolve8_horiz_msa.c
@@ -325,7 +325,7 @@ static void common_hz_2t_4x4_msa(const uint8_t *src, int32_t src_stride,
 
   /* rearranging filter */
   filt = LD_UH(filter);
-  filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
 
   LD_SB4(src, src_stride, src0, src1, src2, src3);
   VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
@@ -347,7 +347,7 @@ static void common_hz_2t_4x8_msa(const uint8_t *src, int32_t src_stride,
 
   /* rearranging filter */
   filt = LD_UH(filter);
-  filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
 
   LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
   VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
@@ -355,8 +355,8 @@ static void common_hz_2t_4x8_msa(const uint8_t *src, int32_t src_stride,
   DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5,
               vec6, vec7);
   SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS);
-  PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1,
-              res2, res3);
+  PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2,
+              res3);
   ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
   dst += (4 * dst_stride);
   ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
@@ -383,7 +383,7 @@ static void common_hz_2t_8x4_msa(const uint8_t *src, int32_t src_stride,
 
   /* rearranging filter */
   filt = LD_UH(filter);
-  filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
 
   LD_SB4(src, src_stride, src0, src1, src2, src3);
   VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
@@ -406,7 +406,7 @@ static void common_hz_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride,
 
   /* rearranging filter */
   filt = LD_UH(filter);
-  filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
 
   LD_SB4(src, src_stride, src0, src1, src2, src3);
   src += (4 * src_stride);
@@ -482,7 +482,7 @@ static void common_hz_2t_16w_msa(const uint8_t *src, int32_t src_stride,
 
   /* rearranging filter */
   filt = LD_UH(filter);
-  filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
 
   LD_SB4(src, src_stride, src0, src2, src4, src6);
   LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
@@ -545,7 +545,7 @@ static void common_hz_2t_32w_msa(const uint8_t *src, int32_t src_stride,
 
   /* rearranging filter */
   filt = LD_UH(filter);
-  filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
 
   for (loop_cnt = height >> 1; loop_cnt--;) {
     src0 = LD_SB(src);
@@ -590,7 +590,7 @@ static void common_hz_2t_64w_msa(const uint8_t *src, int32_t src_stride,
 
   /* rearranging filter */
   filt = LD_UH(filter);
-  filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+  filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
 
   for (loop_cnt = height; loop_cnt--;) {
     src0 = LD_SB(src);
@@ -622,8 +622,8 @@ static void common_hz_2t_64w_msa(const uint8_t *src, int32_t src_stride,
 void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
                              uint8_t *dst, ptrdiff_t dst_stride,
                              const int16_t *filter_x, int x_step_q4,
-                             const int16_t *filter_y, int y_step_q4,
-                             int w, int h) {
+                             const int16_t *filter_y, int y_step_q4, int w,
+                             int h) {
   int8_t cnt, filt_hor[8];
 
   assert(x_step_q4 == 16);
@@ -636,67 +636,55 @@ void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
   if (((const int32_t *)filter_x)[0] == 0) {
     switch (w) {
       case 4:
-        common_hz_2t_4w_msa(src, (int32_t)src_stride,
-                            dst, (int32_t)dst_stride,
+        common_hz_2t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                             &filt_hor[3], h);
         break;
       case 8:
-        common_hz_2t_8w_msa(src, (int32_t)src_stride,
-                            dst, (int32_t)dst_stride,
+        common_hz_2t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                             &filt_hor[3], h);
         break;
       case 16:
-        common_hz_2t_16w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_hz_2t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              &filt_hor[3], h);
         break;
       case 32:
-        common_hz_2t_32w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_hz_2t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              &filt_hor[3], h);
         break;
       case 64:
-        common_hz_2t_64w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_hz_2t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              &filt_hor[3], h);
         break;
       default:
-        vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride,
-                              filter_x, x_step_q4, filter_y, y_step_q4,
-                              w, h);
+        vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+                              x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   } else {
     switch (w) {
       case 4:
-        common_hz_8t_4w_msa(src, (int32_t)src_stride,
-                            dst, (int32_t)dst_stride,
+        common_hz_8t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                             filt_hor, h);
         break;
       case 8:
-        common_hz_8t_8w_msa(src, (int32_t)src_stride,
-                            dst, (int32_t)dst_stride,
+        common_hz_8t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                             filt_hor, h);
         break;
       case 16:
-        common_hz_8t_16w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_hz_8t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              filt_hor, h);
         break;
       case 32:
-        common_hz_8t_32w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_hz_8t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              filt_hor, h);
         break;
       case 64:
-        common_hz_8t_64w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_hz_8t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              filt_hor, h);
         break;
       default:
-        vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride,
-                              filter_x, x_step_q4, filter_y, y_step_q4,
-                              w, h);
+        vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x,
+                              x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   }
diff --git a/vpx_dsp/mips/vpx_convolve8_msa.c b/vpx_dsp/mips/vpx_convolve8_msa.c
index 7546f13150095c355b8fd1a200071e668b96978e..b16ec57886a533a1283c7f440f5bd24b2b2f34d5 100644
--- a/vpx_dsp/mips/vpx_convolve8_msa.c
+++ b/vpx_dsp/mips/vpx_convolve8_msa.c
@@ -69,15 +69,15 @@ static void common_hv_8ht_8vt_4w_msa(const uint8_t *src, int32_t src_stride,
     XORI_B4_128_SB(src7, src8, src9, src10);
     src += (4 * src_stride);
 
-    hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8);
     out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
     tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1,
                                filt_vt2, filt_vt3);
 
-    hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8);
     out4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8);
     tmp1 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out4, filt_vt0, filt_vt1,
@@ -151,20 +151,20 @@ static void common_hv_8ht_8vt_8w_msa(const uint8_t *src, int32_t src_stride,
 
     XORI_B4_128_SB(src7, src8, src9, src10);
 
-    hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
     tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1,
                                filt_vt2, filt_vt3);
 
-    hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7);
     tmp1 = FILT_8TAP_DPADD_S_H(out4, out5, out6, out7, filt_vt0, filt_vt1,
                                filt_vt2, filt_vt3);
 
-    hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3,
-                              filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+    hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, filt_hz0,
+                              filt_hz1, filt_hz2, filt_hz3);
     out8 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8);
     tmp2 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out8, filt_vt0, filt_vt1,
                                filt_vt2, filt_vt3);
@@ -295,11 +295,11 @@ static void common_hv_2ht_2vt_4x8_msa(const uint8_t *src, int32_t src_stride,
 
   ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
   ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3);
-  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt,
-              vec4, vec5, vec6, vec7);
+  DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, vec4,
+              vec5, vec6, vec7);
   SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS);
-  PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1,
-              res2, res3);
+  PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2,
+              res3);
   ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
   dst += (4 * dst_stride);
   ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
@@ -361,12 +361,10 @@ static void common_hv_2ht_2vt_8x4_msa(const uint8_t *src, int32_t src_stride,
 }
 
 static void common_hv_2ht_2vt_8x8mult_msa(const uint8_t *src,
-                                          int32_t src_stride,
-                                          uint8_t *dst,
+                                          int32_t src_stride, uint8_t *dst,
                                           int32_t dst_stride,
                                           int8_t *filter_horiz,
-                                          int8_t *filter_vert,
-                                          int32_t height) {
+                                          int8_t *filter_vert, int32_t height) {
   uint32_t loop_cnt;
   v16i8 src0, src1, src2, src3, src4, mask, out0, out1;
   v16u8 filt_hz, filt_vt, vec0;
@@ -542,11 +540,10 @@ static void common_hv_2ht_2vt_64w_msa(const uint8_t *src, int32_t src_stride,
   }
 }
 
-void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride,
-                       uint8_t *dst, ptrdiff_t dst_stride,
-                       const int16_t *filter_x, int32_t x_step_q4,
-                       const int16_t *filter_y, int32_t y_step_q4,
-                       int32_t w, int32_t h) {
+void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                       ptrdiff_t dst_stride, const int16_t *filter_x,
+                       int32_t x_step_q4, const int16_t *filter_y,
+                       int32_t y_step_q4, int32_t w, int32_t h) {
   int8_t cnt, filt_hor[8], filt_ver[8];
 
   assert(x_step_q4 == 16);
@@ -563,72 +560,69 @@ void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride,
       ((const int32_t *)filter_y)[0] == 0) {
     switch (w) {
       case 4:
-        common_hv_2ht_2vt_4w_msa(src, (int32_t)src_stride,
-                                 dst, (int32_t)dst_stride,
-                                 &filt_hor[3], &filt_ver[3], (int32_t)h);
+        common_hv_2ht_2vt_4w_msa(src, (int32_t)src_stride, dst,
+                                 (int32_t)dst_stride, &filt_hor[3],
+                                 &filt_ver[3], (int32_t)h);
         break;
       case 8:
-        common_hv_2ht_2vt_8w_msa(src, (int32_t)src_stride,
-                                 dst, (int32_t)dst_stride,
-                                 &filt_hor[3], &filt_ver[3], (int32_t)h);
+        common_hv_2ht_2vt_8w_msa(src, (int32_t)src_stride, dst,
+                                 (int32_t)dst_stride, &filt_hor[3],
+                                 &filt_ver[3], (int32_t)h);
         break;
       case 16:
-        common_hv_2ht_2vt_16w_msa(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  &filt_hor[3], &filt_ver[3], (int32_t)h);
+        common_hv_2ht_2vt_16w_msa(src, (int32_t)src_stride, dst,
+                                  (int32_t)dst_stride, &filt_hor[3],
+                                  &filt_ver[3], (int32_t)h);
         break;
       case 32:
-        common_hv_2ht_2vt_32w_msa(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  &filt_hor[3], &filt_ver[3], (int32_t)h);
+        common_hv_2ht_2vt_32w_msa(src, (int32_t)src_stride, dst,
+                                  (int32_t)dst_stride, &filt_hor[3],
+                                  &filt_ver[3], (int32_t)h);
         break;
       case 64:
-        common_hv_2ht_2vt_64w_msa(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  &filt_hor[3], &filt_ver[3], (int32_t)h);
+        common_hv_2ht_2vt_64w_msa(src, (int32_t)src_stride, dst,
+                                  (int32_t)dst_stride, &filt_hor[3],
+                                  &filt_ver[3], (int32_t)h);
         break;
       default:
-        vpx_convolve8_c(src, src_stride, dst, dst_stride,
-                        filter_x, x_step_q4, filter_y, y_step_q4,
-                        w, h);
+        vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+                        filter_y, y_step_q4, w, h);
         break;
     }
   } else if (((const int32_t *)filter_x)[0] == 0 ||
              ((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve8_c(src, src_stride, dst, dst_stride,
-                    filter_x, x_step_q4, filter_y, y_step_q4,
-                    w, h);
+    vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+                    filter_y, y_step_q4, w, h);
   } else {
     switch (w) {
       case 4:
-        common_hv_8ht_8vt_4w_msa(src, (int32_t)src_stride,
-                                 dst, (int32_t)dst_stride,
-                                 filt_hor, filt_ver, (int32_t)h);
+        common_hv_8ht_8vt_4w_msa(src, (int32_t)src_stride, dst,
+                                 (int32_t)dst_stride, filt_hor, filt_ver,
+                                 (int32_t)h);
         break;
       case 8:
-        common_hv_8ht_8vt_8w_msa(src, (int32_t)src_stride,
-                                 dst, (int32_t)dst_stride,
-                                 filt_hor, filt_ver, (int32_t)h);
+        common_hv_8ht_8vt_8w_msa(src, (int32_t)src_stride, dst,
+                                 (int32_t)dst_stride, filt_hor, filt_ver,
+                                 (int32_t)h);
         break;
       case 16:
-        common_hv_8ht_8vt_16w_msa(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  filt_hor, filt_ver, (int32_t)h);
+        common_hv_8ht_8vt_16w_msa(src, (int32_t)src_stride, dst,
+                                  (int32_t)dst_stride, filt_hor, filt_ver,
+                                  (int32_t)h);
         break;
       case 32:
-        common_hv_8ht_8vt_32w_msa(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  filt_hor, filt_ver, (int32_t)h);
+        common_hv_8ht_8vt_32w_msa(src, (int32_t)src_stride, dst,
+                                  (int32_t)dst_stride, filt_hor, filt_ver,
+                                  (int32_t)h);
         break;
       case 64:
-        common_hv_8ht_8vt_64w_msa(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  filt_hor, filt_ver, (int32_t)h);
+        common_hv_8ht_8vt_64w_msa(src, (int32_t)src_stride, dst,
+                                  (int32_t)dst_stride, filt_hor, filt_ver,
+                                  (int32_t)h);
         break;
       default:
-        vpx_convolve8_c(src, src_stride, dst, dst_stride,
-                        filter_x, x_step_q4, filter_y, y_step_q4,
-                        w, h);
+        vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
+                        filter_y, y_step_q4, w, h);
         break;
     }
   }
diff --git a/vpx_dsp/mips/vpx_convolve8_vert_msa.c b/vpx_dsp/mips/vpx_convolve8_vert_msa.c
index 527d4571991cd5ba56b5505f15bf74620ec3bcc2..410682271f5872f29eb53736db964568e2a5cb58 100644
--- a/vpx_dsp/mips/vpx_convolve8_vert_msa.c
+++ b/vpx_dsp/mips/vpx_convolve8_vert_msa.c
@@ -222,11 +222,11 @@ static void common_vt_8t_16w_mult_msa(const uint8_t *src, int32_t src_stride,
     LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
     XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
     src_tmp += (7 * src_stride);
-    ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r,
-               src32_r, src54_r, src21_r);
+    ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
+               src54_r, src21_r);
     ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
-    ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l,
-               src32_l, src54_l, src21_l);
+    ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, src32_l,
+               src54_l, src21_l);
     ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
 
     for (loop_cnt = (height >> 2); loop_cnt--;) {
@@ -344,8 +344,8 @@ static void common_vt_2t_4x8_msa(const uint8_t *src, int32_t src_stride,
              src32_r, src43_r);
   ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
              src76_r, src87_r);
-  ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
-             src87_r, src76_r, src2110, src4332, src6554, src8776);
+  ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src87_r,
+             src76_r, src2110, src4332, src6554, src8776);
   DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0,
               tmp0, tmp1, tmp2, tmp3);
   SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
@@ -407,10 +407,10 @@ static void common_vt_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride,
     LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8);
     src += (8 * src_stride);
 
-    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1,
-               vec2, vec3);
-    ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5,
-               vec6, vec7);
+    ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
+               vec3);
+    ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6,
+               vec7);
     DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
                 tmp2, tmp3);
     SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
@@ -629,8 +629,8 @@ static void common_vt_2t_64w_msa(const uint8_t *src, int32_t src_stride,
 void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int x_step_q4,
-                            const int16_t *filter_y, int y_step_q4,
-                            int w, int h) {
+                            const int16_t *filter_y, int y_step_q4, int w,
+                            int h) {
   int8_t cnt, filt_ver[8];
 
   assert(y_step_q4 == 16);
@@ -643,67 +643,55 @@ void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
   if (((const int32_t *)filter_y)[0] == 0) {
     switch (w) {
       case 4:
-        common_vt_2t_4w_msa(src, (int32_t)src_stride,
-                            dst, (int32_t)dst_stride,
+        common_vt_2t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                             &filt_ver[3], h);
         break;
       case 8:
-        common_vt_2t_8w_msa(src, (int32_t)src_stride,
-                            dst, (int32_t)dst_stride,
+        common_vt_2t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                             &filt_ver[3], h);
         break;
       case 16:
-        common_vt_2t_16w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_vt_2t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              &filt_ver[3], h);
         break;
       case 32:
-        common_vt_2t_32w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_vt_2t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              &filt_ver[3], h);
         break;
       case 64:
-        common_vt_2t_64w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_vt_2t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              &filt_ver[3], h);
         break;
       default:
-        vpx_convolve8_vert_c(src, src_stride, dst, dst_stride,
-                             filter_x, x_step_q4, filter_y, y_step_q4,
-                             w, h);
+        vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+                             x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   } else {
     switch (w) {
       case 4:
-        common_vt_8t_4w_msa(src, (int32_t)src_stride,
-                            dst, (int32_t)dst_stride,
+        common_vt_8t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                             filt_ver, h);
         break;
       case 8:
-        common_vt_8t_8w_msa(src, (int32_t)src_stride,
-                            dst, (int32_t)dst_stride,
+        common_vt_8t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                             filt_ver, h);
         break;
       case 16:
-        common_vt_8t_16w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_vt_8t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              filt_ver, h);
         break;
       case 32:
-        common_vt_8t_32w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_vt_8t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              filt_ver, h);
         break;
       case 64:
-        common_vt_8t_64w_msa(src, (int32_t)src_stride,
-                             dst, (int32_t)dst_stride,
+        common_vt_8t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
                              filt_ver, h);
         break;
       default:
-        vpx_convolve8_vert_c(src, src_stride, dst, dst_stride,
-                             filter_x, x_step_q4, filter_y, y_step_q4,
-                             w, h);
+        vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x,
+                             x_step_q4, filter_y, y_step_q4, w, h);
         break;
     }
   }
diff --git a/vpx_dsp/mips/vpx_convolve_avg_msa.c b/vpx_dsp/mips/vpx_convolve_avg_msa.c
index 4c3d978031a57e008e5903e471e9ebd86b227035..45399bad852c552722f969c9c92a8c18995227b7 100644
--- a/vpx_dsp/mips/vpx_convolve_avg_msa.c
+++ b/vpx_dsp/mips/vpx_convolve_avg_msa.c
@@ -10,8 +10,8 @@
 
 #include "vpx_dsp/mips/macros_msa.h"
 
-static void avg_width4_msa(const uint8_t *src, int32_t src_stride,
-                           uint8_t *dst, int32_t dst_stride, int32_t height) {
+static void avg_width4_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst,
+                           int32_t dst_stride, int32_t height) {
   int32_t cnt;
   uint32_t out0, out1, out2, out3;
   v16u8 src0, src1, src2, src3;
@@ -24,8 +24,8 @@ static void avg_width4_msa(const uint8_t *src, int32_t src_stride,
 
       LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
 
-      AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3,
-                  dst0, dst1, dst2, dst3);
+      AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1,
+                  dst2, dst3);
 
       out0 = __msa_copy_u_w((v4i32)dst0, 0);
       out1 = __msa_copy_u_w((v4i32)dst1, 0);
@@ -53,8 +53,8 @@ static void avg_width4_msa(const uint8_t *src, int32_t src_stride,
   }
 }
 
-static void avg_width8_msa(const uint8_t *src, int32_t src_stride,
-                           uint8_t *dst, int32_t dst_stride, int32_t height) {
+static void avg_width8_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst,
+                           int32_t dst_stride, int32_t height) {
   int32_t cnt;
   uint64_t out0, out1, out2, out3;
   v16u8 src0, src1, src2, src3;
@@ -65,8 +65,8 @@ static void avg_width8_msa(const uint8_t *src, int32_t src_stride,
     src += (4 * src_stride);
     LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
 
-    AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3,
-                dst0, dst1, dst2, dst3);
+    AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1,
+                dst2, dst3);
 
     out0 = __msa_copy_u_d((v2i64)dst0, 0);
     out1 = __msa_copy_u_d((v2i64)dst1, 0);
@@ -88,10 +88,10 @@ static void avg_width16_msa(const uint8_t *src, int32_t src_stride,
     src += (8 * src_stride);
     LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
 
-    AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3,
-                dst0, dst1, dst2, dst3);
-    AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7,
-                dst4, dst5, dst6, dst7);
+    AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1,
+                dst2, dst3);
+    AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5,
+                dst6, dst7);
     ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst, dst_stride);
     dst += (8 * dst_stride);
   }
@@ -120,14 +120,14 @@ static void avg_width32_msa(const uint8_t *src, int32_t src_stride,
     LD_UB4(dst_dup + 16, dst_stride, dst9, dst11, dst13, dst15);
     dst_dup += (4 * dst_stride);
 
-    AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3,
-                dst0, dst1, dst2, dst3);
-    AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7,
-                dst4, dst5, dst6, dst7);
-    AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11,
-                dst8, dst9, dst10, dst11);
-    AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15,
-                dst12, dst13, dst14, dst15);
+    AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1,
+                dst2, dst3);
+    AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5,
+                dst6, dst7);
+    AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, dst8, dst9,
+                dst10, dst11);
+    AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, dst12,
+                dst13, dst14, dst15);
 
     ST_UB4(dst0, dst2, dst4, dst6, dst, dst_stride);
     ST_UB4(dst1, dst3, dst5, dst7, dst + 16, dst_stride);
@@ -166,14 +166,14 @@ static void avg_width64_msa(const uint8_t *src, int32_t src_stride,
     LD_UB4(dst_dup, 16, dst12, dst13, dst14, dst15);
     dst_dup += dst_stride;
 
-    AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3,
-                dst0, dst1, dst2, dst3);
-    AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7,
-                dst4, dst5, dst6, dst7);
-    AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11,
-                dst8, dst9, dst10, dst11);
-    AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15,
-                dst12, dst13, dst14, dst15);
+    AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1,
+                dst2, dst3);
+    AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5,
+                dst6, dst7);
+    AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, dst8, dst9,
+                dst10, dst11);
+    AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, dst12,
+                dst13, dst14, dst15);
 
     ST_UB4(dst0, dst1, dst2, dst3, dst, 16);
     dst += dst_stride;
diff --git a/vpx_dsp/mips/vpx_convolve_copy_msa.c b/vpx_dsp/mips/vpx_convolve_copy_msa.c
index ba4012281e84ae9402cff599fabc65b65f81de18..c3d87a4ab8112a3ceec5d64cd7356c2bfdec2dc7 100644
--- a/vpx_dsp/mips/vpx_convolve_copy_msa.c
+++ b/vpx_dsp/mips/vpx_convolve_copy_msa.c
@@ -105,12 +105,12 @@ static void copy_16multx8mult_msa(const uint8_t *src, int32_t src_stride,
     dst_tmp = dst;
 
     for (loop_cnt = (height >> 3); loop_cnt--;) {
-      LD_UB8(src_tmp, src_stride,
-             src0, src1, src2, src3, src4, src5, src6, src7);
+      LD_UB8(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6,
+             src7);
       src_tmp += (8 * src_stride);
 
-      ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7,
-             dst_tmp, dst_stride);
+      ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst_tmp,
+             dst_stride);
       dst_tmp += (8 * dst_stride);
     }
 
diff --git a/vpx_dsp/mips/vpx_convolve_msa.h b/vpx_dsp/mips/vpx_convolve_msa.h
index e0013983ae6ae4c0635819753def290e94917dac..198c21ed20a5d9624dd91aa702e49e76698fcdf3 100644
--- a/vpx_dsp/mips/vpx_convolve_msa.h
+++ b/vpx_dsp/mips/vpx_convolve_msa.h
@@ -16,104 +16,109 @@
 
 extern const uint8_t mc_filt_mask_arr[16 * 3];
 
-#define FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3,         \
-                            filt0, filt1, filt2, filt3) ({  \
-  v8i16 tmp0, tmp1;                                         \
-                                                            \
-  tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0);         \
-  tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1);  \
-  tmp1 = __msa_dotp_s_h((v16i8)vec2, (v16i8)filt2);         \
-  tmp1 = __msa_dpadd_s_h(tmp1, (v16i8)vec3, (v16i8)filt3);  \
-  tmp0 = __msa_adds_s_h(tmp0, tmp1);                        \
-                                                            \
-  tmp0;                                                     \
-})
+#define FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt0, filt1, filt2, \
+                            filt3)                                       \
+  ({                                                                     \
+    v8i16 tmp0, tmp1;                                                    \
+                                                                         \
+    tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0);                    \
+    tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1);             \
+    tmp1 = __msa_dotp_s_h((v16i8)vec2, (v16i8)filt2);                    \
+    tmp1 = __msa_dpadd_s_h(tmp1, (v16i8)vec3, (v16i8)filt3);             \
+    tmp0 = __msa_adds_s_h(tmp0, tmp1);                                   \
+                                                                         \
+    tmp0;                                                                \
+  })
 
-#define HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3,        \
-                        filt_h0, filt_h1, filt_h2, filt_h3) ({         \
-  v16i8 vec0_m, vec1_m, vec2_m, vec3_m;                                \
-  v8i16 hz_out_m;                                                      \
-                                                                       \
-  VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3,                   \
-             vec0_m, vec1_m, vec2_m, vec3_m);                          \
-  hz_out_m = FILT_8TAP_DPADD_S_H(vec0_m, vec1_m, vec2_m, vec3_m,       \
-                                 filt_h0, filt_h1, filt_h2, filt_h3);  \
-                                                                       \
-  hz_out_m = __msa_srari_h(hz_out_m, FILTER_BITS);                     \
-  hz_out_m = __msa_sat_s_h(hz_out_m, 7);                               \
-                                                                       \
-  hz_out_m;                                                            \
-})
+#define HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3, filt_h0,       \
+                        filt_h1, filt_h2, filt_h3)                             \
+  ({                                                                           \
+    v16i8 vec0_m, vec1_m, vec2_m, vec3_m;                                      \
+    v8i16 hz_out_m;                                                            \
+                                                                               \
+    VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3, vec0_m, vec1_m, vec2_m, \
+               vec3_m);                                                        \
+    hz_out_m = FILT_8TAP_DPADD_S_H(vec0_m, vec1_m, vec2_m, vec3_m, filt_h0,    \
+                                   filt_h1, filt_h2, filt_h3);                 \
+                                                                               \
+    hz_out_m = __msa_srari_h(hz_out_m, FILTER_BITS);                           \
+    hz_out_m = __msa_sat_s_h(hz_out_m, 7);                                     \
+                                                                               \
+    hz_out_m;                                                                  \
+  })
 
-#define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3,               \
-                                   mask0, mask1, mask2, mask3,           \
-                                   filt0, filt1, filt2, filt3,           \
-                                   out0, out1) {                         \
-  v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;  \
-  v8i16 res0_m, res1_m, res2_m, res3_m;                                  \
-                                                                         \
-  VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m);      \
-  DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, res0_m, res1_m);             \
-  VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m);      \
-  DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, res0_m, res1_m);            \
-  VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m);      \
-  DOTP_SB2_SH(vec4_m, vec5_m, filt2, filt2, res2_m, res3_m);             \
-  VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m);      \
-  DPADD_SB2_SH(vec6_m, vec7_m, filt3, filt3, res2_m, res3_m);            \
-  ADDS_SH2_SH(res0_m, res2_m, res1_m, res3_m, out0, out1);               \
-}
+#define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,     \
+                                   mask2, mask3, filt0, filt1, filt2, filt3, \
+                                   out0, out1)                               \
+  {                                                                          \
+    v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;    \
+    v8i16 res0_m, res1_m, res2_m, res3_m;                                    \
+                                                                             \
+    VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m);        \
+    DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, res0_m, res1_m);               \
+    VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m);        \
+    DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, res0_m, res1_m);              \
+    VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m);        \
+    DOTP_SB2_SH(vec4_m, vec5_m, filt2, filt2, res2_m, res3_m);               \
+    VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m);        \
+    DPADD_SB2_SH(vec6_m, vec7_m, filt3, filt3, res2_m, res3_m);              \
+    ADDS_SH2_SH(res0_m, res2_m, res1_m, res3_m, out0, out1);                 \
+  }
 
-#define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3,                  \
-                                   mask0, mask1, mask2, mask3,              \
-                                   filt0, filt1, filt2, filt3,              \
-                                   out0, out1, out2, out3) {                \
-  v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;     \
-  v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m;     \
-                                                                            \
-  VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m);         \
-  VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m);         \
-  DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0,   \
-              res0_m, res1_m, res2_m, res3_m);                              \
-  VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0_m, vec1_m);         \
-  VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m);         \
-  DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt2, filt2, filt2, filt2,   \
-              res4_m, res5_m, res6_m, res7_m);                              \
-  VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec4_m, vec5_m);         \
-  VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m);         \
-  DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt1, filt1, filt1, filt1,  \
-               res0_m, res1_m, res2_m, res3_m);                             \
-  VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec4_m, vec5_m);         \
-  VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m);         \
-  DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt3, filt3, filt3, filt3,  \
-               res4_m, res5_m, res6_m, res7_m);                             \
-  ADDS_SH4_SH(res0_m, res4_m, res1_m, res5_m, res2_m, res6_m, res3_m,       \
-              res7_m, out0, out1, out2, out3);                              \
-}
+#define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,     \
+                                   mask2, mask3, filt0, filt1, filt2, filt3, \
+                                   out0, out1, out2, out3)                   \
+  {                                                                          \
+    v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;    \
+    v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m;    \
+                                                                             \
+    VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m);        \
+    VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m);        \
+    DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0,  \
+                res0_m, res1_m, res2_m, res3_m);                             \
+    VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0_m, vec1_m);        \
+    VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m);        \
+    DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt2, filt2, filt2, filt2,  \
+                res4_m, res5_m, res6_m, res7_m);                             \
+    VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec4_m, vec5_m);        \
+    VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m);        \
+    DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt1, filt1, filt1, filt1, \
+                 res0_m, res1_m, res2_m, res3_m);                            \
+    VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec4_m, vec5_m);        \
+    VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m);        \
+    DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt3, filt3, filt3, filt3, \
+                 res4_m, res5_m, res6_m, res7_m);                            \
+    ADDS_SH4_SH(res0_m, res4_m, res1_m, res5_m, res2_m, res6_m, res3_m,      \
+                res7_m, out0, out1, out2, out3);                             \
+  }
 
-#define PCKEV_XORI128_AVG_ST_UB(in0, in1, dst, pdst) {  \
-  v16u8 tmp_m;                                          \
-                                                        \
-  tmp_m = PCKEV_XORI128_UB(in1, in0);                   \
-  tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst);            \
-  ST_UB(tmp_m, (pdst));                                 \
-}
+#define PCKEV_XORI128_AVG_ST_UB(in0, in1, dst, pdst) \
+  {                                                  \
+    v16u8 tmp_m;                                     \
+                                                     \
+    tmp_m = PCKEV_XORI128_UB(in1, in0);              \
+    tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst);       \
+    ST_UB(tmp_m, (pdst));                            \
+  }
 
-#define PCKEV_AVG_ST_UB(in0, in1, dst, pdst) {           \
-  v16u8 tmp_m;                                           \
-                                                         \
-  tmp_m = (v16u8)__msa_pckev_b((v16i8)in0, (v16i8)in1);  \
-  tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst);             \
-  ST_UB(tmp_m, (pdst));                                  \
-}
+#define PCKEV_AVG_ST_UB(in0, in1, dst, pdst)              \
+  {                                                       \
+    v16u8 tmp_m;                                          \
+                                                          \
+    tmp_m = (v16u8)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
+    tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst);            \
+    ST_UB(tmp_m, (pdst));                                 \
+  }
 
-#define PCKEV_AVG_ST8x4_UB(in1, dst0, in2, dst1, in3, dst2, in4, dst3,  \
-                           pdst, stride) {                              \
-  v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                 \
-  uint8_t *pdst_m = (uint8_t *)(pdst);                                  \
-                                                                        \
-  PCKEV_B2_UB(in2, in1, in4, in3, tmp0_m, tmp1_m);                      \
-  PCKEV_D2_UB(dst1, dst0, dst3, dst2, tmp2_m, tmp3_m);                  \
-  AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m);          \
-  ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride);                             \
-}
-#endif  /* VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_ */
+#define PCKEV_AVG_ST8x4_UB(in1, dst0, in2, dst1, in3, dst2, in4, dst3, pdst, \
+                           stride)                                           \
+  {                                                                          \
+    v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                    \
+    uint8_t *pdst_m = (uint8_t *)(pdst);                                     \
+                                                                             \
+    PCKEV_B2_UB(in2, in1, in4, in3, tmp0_m, tmp1_m);                         \
+    PCKEV_D2_UB(dst1, dst0, dst3, dst2, tmp2_m, tmp3_m);                     \
+    AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m);             \
+    ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride);                                \
+  }
+#endif /* VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_ */
diff --git a/vpx_dsp/prob.c b/vpx_dsp/prob.c
index 639d24dd2f0158c1f119df9c2f50cf4f7b3bf5fc..819e95062e9fea6d4791d283ce3138fd9235ddcb 100644
--- a/vpx_dsp/prob.c
+++ b/vpx_dsp/prob.c
@@ -11,22 +11,16 @@
 #include "./prob.h"
 
 const uint8_t vpx_norm[256] = {
-  0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
-  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
-  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+  0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+  3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
 };
 
 static unsigned int tree_merge_probs_impl(unsigned int i,
@@ -35,13 +29,13 @@ static unsigned int tree_merge_probs_impl(unsigned int i,
                                           const unsigned int *counts,
                                           vpx_prob *probs) {
   const int l = tree[i];
-  const unsigned int left_count = (l <= 0)
-                 ? counts[-l]
-                 : tree_merge_probs_impl(l, tree, pre_probs, counts, probs);
+  const unsigned int left_count =
+      (l <= 0) ? counts[-l]
+               : tree_merge_probs_impl(l, tree, pre_probs, counts, probs);
   const int r = tree[i + 1];
-  const unsigned int right_count = (r <= 0)
-                 ? counts[-r]
-                 : tree_merge_probs_impl(r, tree, pre_probs, counts, probs);
+  const unsigned int right_count =
+      (r <= 0) ? counts[-r]
+               : tree_merge_probs_impl(r, tree, pre_probs, counts, probs);
   const unsigned int ct[2] = { left_count, right_count };
   probs[i >> 1] = mode_mv_merge_probs(pre_probs[i >> 1], ct);
   return left_count + right_count;
diff --git a/vpx_dsp/prob.h b/vpx_dsp/prob.h
index c3cb103ffb5e8ca77f234eddbe01f3d304631176..148116ed08994e5c0db84cd47f7a8acc51ea0077 100644
--- a/vpx_dsp/prob.h
+++ b/vpx_dsp/prob.h
@@ -24,11 +24,11 @@ typedef uint8_t vpx_prob;
 
 #define MAX_PROB 255
 
-#define vpx_prob_half ((vpx_prob) 128)
+#define vpx_prob_half ((vpx_prob)128)
 
 typedef int8_t vpx_tree_index;
 
-#define TREE_SIZE(leaf_count) (2 * (leaf_count) - 2)
+#define TREE_SIZE(leaf_count) (2 * (leaf_count)-2)
 
 #define vpx_complement(x) (255 - x)
 
@@ -60,8 +60,7 @@ static INLINE vpx_prob weighted_prob(int prob1, int prob2, int factor) {
   return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8);
 }
 
-static INLINE vpx_prob merge_probs(vpx_prob pre_prob,
-                                   const unsigned int ct[2],
+static INLINE vpx_prob merge_probs(vpx_prob pre_prob, const unsigned int ct[2],
                                    unsigned int count_sat,
                                    unsigned int max_update_factor) {
   const vpx_prob prob = get_binary_prob(ct[0], ct[1]);
@@ -72,7 +71,7 @@ static INLINE vpx_prob merge_probs(vpx_prob pre_prob,
 
 // MODE_MV_MAX_UPDATE_FACTOR (128) * count / MODE_MV_COUNT_SAT;
 static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = {
-  0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64,
+  0,  6,  12, 19, 25, 32,  38,  44,  51,  57, 64,
   70, 76, 83, 89, 96, 102, 108, 115, 121, 128
 };
 
@@ -93,7 +92,6 @@ static INLINE vpx_prob mode_mv_merge_probs(vpx_prob pre_prob,
 void vpx_tree_merge_probs(const vpx_tree_index *tree, const vpx_prob *pre_probs,
                           const unsigned int *counts, vpx_prob *probs);
 
-
 DECLARE_ALIGNED(16, extern const uint8_t, vpx_norm[256]);
 
 #ifdef __cplusplus
diff --git a/vpx_dsp/psnrhvs.c b/vpx_dsp/psnrhvs.c
index 300170579121bd9b8a36a53b21c8421fb3d1bfc4..58ef9f00e884fb680ab931d7871074ef9f401860 100644
--- a/vpx_dsp/psnrhvs.c
+++ b/vpx_dsp/psnrhvs.c
@@ -20,13 +20,13 @@
 #include "vpx_ports/system_state.h"
 
 #if !defined(M_PI)
-# define M_PI (3.141592653589793238462643)
+#define M_PI (3.141592653589793238462643)
 #endif
 #include <string.h>
 
 static void od_bin_fdct8x8(tran_low_t *y, int ystride, const int16_t *x,
                            int xstride) {
-  (void) xstride;
+  (void)xstride;
   vpx_fdct8x8(x, y, ystride);
 }
 
@@ -34,56 +34,59 @@ static void od_bin_fdct8x8(tran_low_t *y, int ystride, const int16_t *x,
  * transparency. This is not the JPEG based matrix from the paper,
  this one gives a slightly higher MOS agreement.*/
 static const float csf_y[8][8] = {
-    {1.6193873005, 2.2901594831, 2.08509755623, 1.48366094411, 1.00227514334,
-     0.678296995242, 0.466224900598, 0.3265091542},
-    {2.2901594831, 1.94321815382, 2.04793073064, 1.68731108984, 1.2305666963,
-     0.868920337363, 0.61280991668, 0.436405793551},
-    {2.08509755623, 2.04793073064, 1.34329019223, 1.09205635862, 0.875748795257,
-     0.670882927016, 0.501731932449, 0.372504254596},
-    {1.48366094411, 1.68731108984, 1.09205635862, 0.772819797575,
-     0.605636379554, 0.48309405692, 0.380429446972, 0.295774038565},
-    {1.00227514334, 1.2305666963, 0.875748795257, 0.605636379554,
-     0.448996256676, 0.352889268808, 0.283006984131, 0.226951348204},
-    {0.678296995242, 0.868920337363, 0.670882927016, 0.48309405692,
-     0.352889268808, 0.27032073436, 0.215017739696, 0.17408067321},
-    {0.466224900598, 0.61280991668, 0.501731932449, 0.380429446972,
-     0.283006984131, 0.215017739696, 0.168869545842, 0.136153931001},
-    {0.3265091542, 0.436405793551, 0.372504254596, 0.295774038565,
-     0.226951348204, 0.17408067321, 0.136153931001, 0.109083846276}};
+  { 1.6193873005, 2.2901594831, 2.08509755623, 1.48366094411, 1.00227514334,
+    0.678296995242, 0.466224900598, 0.3265091542 },
+  { 2.2901594831, 1.94321815382, 2.04793073064, 1.68731108984, 1.2305666963,
+    0.868920337363, 0.61280991668, 0.436405793551 },
+  { 2.08509755623, 2.04793073064, 1.34329019223, 1.09205635862, 0.875748795257,
+    0.670882927016, 0.501731932449, 0.372504254596 },
+  { 1.48366094411, 1.68731108984, 1.09205635862, 0.772819797575, 0.605636379554,
+    0.48309405692, 0.380429446972, 0.295774038565 },
+  { 1.00227514334, 1.2305666963, 0.875748795257, 0.605636379554, 0.448996256676,
+    0.352889268808, 0.283006984131, 0.226951348204 },
+  { 0.678296995242, 0.868920337363, 0.670882927016, 0.48309405692,
+    0.352889268808, 0.27032073436, 0.215017739696, 0.17408067321 },
+  { 0.466224900598, 0.61280991668, 0.501731932449, 0.380429446972,
+    0.283006984131, 0.215017739696, 0.168869545842, 0.136153931001 },
+  { 0.3265091542, 0.436405793551, 0.372504254596, 0.295774038565,
+    0.226951348204, 0.17408067321, 0.136153931001, 0.109083846276 }
+};
 static const float csf_cb420[8][8] = {
-    {1.91113096927, 2.46074210438, 1.18284184739, 1.14982565193, 1.05017074788,
-     0.898018824055, 0.74725392039, 0.615105596242},
-    {2.46074210438, 1.58529308355, 1.21363250036, 1.38190029285, 1.33100189972,
-     1.17428548929, 0.996404342439, 0.830890433625},
-    {1.18284184739, 1.21363250036, 0.978712413627, 1.02624506078, 1.03145147362,
-     0.960060382087, 0.849823426169, 0.731221236837},
-    {1.14982565193, 1.38190029285, 1.02624506078, 0.861317501629,
-     0.801821139099, 0.751437590932, 0.685398513368, 0.608694761374},
-    {1.05017074788, 1.33100189972, 1.03145147362, 0.801821139099,
-     0.676555426187, 0.605503172737, 0.55002013668, 0.495804539034},
-    {0.898018824055, 1.17428548929, 0.960060382087, 0.751437590932,
-     0.605503172737, 0.514674450957, 0.454353482512, 0.407050308965},
-    {0.74725392039, 0.996404342439, 0.849823426169, 0.685398513368,
-     0.55002013668, 0.454353482512, 0.389234902883, 0.342353999733},
-    {0.615105596242, 0.830890433625, 0.731221236837, 0.608694761374,
-     0.495804539034, 0.407050308965, 0.342353999733, 0.295530605237}};
+  { 1.91113096927, 2.46074210438, 1.18284184739, 1.14982565193, 1.05017074788,
+    0.898018824055, 0.74725392039, 0.615105596242 },
+  { 2.46074210438, 1.58529308355, 1.21363250036, 1.38190029285, 1.33100189972,
+    1.17428548929, 0.996404342439, 0.830890433625 },
+  { 1.18284184739, 1.21363250036, 0.978712413627, 1.02624506078, 1.03145147362,
+    0.960060382087, 0.849823426169, 0.731221236837 },
+  { 1.14982565193, 1.38190029285, 1.02624506078, 0.861317501629, 0.801821139099,
+    0.751437590932, 0.685398513368, 0.608694761374 },
+  { 1.05017074788, 1.33100189972, 1.03145147362, 0.801821139099, 0.676555426187,
+    0.605503172737, 0.55002013668, 0.495804539034 },
+  { 0.898018824055, 1.17428548929, 0.960060382087, 0.751437590932,
+    0.605503172737, 0.514674450957, 0.454353482512, 0.407050308965 },
+  { 0.74725392039, 0.996404342439, 0.849823426169, 0.685398513368,
+    0.55002013668, 0.454353482512, 0.389234902883, 0.342353999733 },
+  { 0.615105596242, 0.830890433625, 0.731221236837, 0.608694761374,
+    0.495804539034, 0.407050308965, 0.342353999733, 0.295530605237 }
+};
 static const float csf_cr420[8][8] = {
-    {2.03871978502, 2.62502345193, 1.26180942886, 1.11019789803, 1.01397751469,
-     0.867069376285, 0.721500455585, 0.593906509971},
-    {2.62502345193, 1.69112867013, 1.17180569821, 1.3342742857, 1.28513006198,
-     1.13381474809, 0.962064122248, 0.802254508198},
-    {1.26180942886, 1.17180569821, 0.944981930573, 0.990876405848,
-     0.995903384143, 0.926972725286, 0.820534991409, 0.706020324706},
-    {1.11019789803, 1.3342742857, 0.990876405848, 0.831632933426, 0.77418706195,
-     0.725539939514, 0.661776842059, 0.587716619023},
-    {1.01397751469, 1.28513006198, 0.995903384143, 0.77418706195,
-     0.653238524286, 0.584635025748, 0.531064164893, 0.478717061273},
-    {0.867069376285, 1.13381474809, 0.926972725286, 0.725539939514,
-     0.584635025748, 0.496936637883, 0.438694579826, 0.393021669543},
-    {0.721500455585, 0.962064122248, 0.820534991409, 0.661776842059,
-     0.531064164893, 0.438694579826, 0.375820256136, 0.330555063063},
-    {0.593906509971, 0.802254508198, 0.706020324706, 0.587716619023,
-     0.478717061273, 0.393021669543, 0.330555063063, 0.285345396658}};
+  { 2.03871978502, 2.62502345193, 1.26180942886, 1.11019789803, 1.01397751469,
+    0.867069376285, 0.721500455585, 0.593906509971 },
+  { 2.62502345193, 1.69112867013, 1.17180569821, 1.3342742857, 1.28513006198,
+    1.13381474809, 0.962064122248, 0.802254508198 },
+  { 1.26180942886, 1.17180569821, 0.944981930573, 0.990876405848,
+    0.995903384143, 0.926972725286, 0.820534991409, 0.706020324706 },
+  { 1.11019789803, 1.3342742857, 0.990876405848, 0.831632933426, 0.77418706195,
+    0.725539939514, 0.661776842059, 0.587716619023 },
+  { 1.01397751469, 1.28513006198, 0.995903384143, 0.77418706195, 0.653238524286,
+    0.584635025748, 0.531064164893, 0.478717061273 },
+  { 0.867069376285, 1.13381474809, 0.926972725286, 0.725539939514,
+    0.584635025748, 0.496936637883, 0.438694579826, 0.393021669543 },
+  { 0.721500455585, 0.962064122248, 0.820534991409, 0.661776842059,
+    0.531064164893, 0.438694579826, 0.375820256136, 0.330555063063 },
+  { 0.593906509971, 0.802254508198, 0.706020324706, 0.587716619023,
+    0.478717061273, 0.393021669543, 0.330555063063, 0.285345396658 }
+};
 
 static double convert_score_db(double _score, double _weight) {
   return 10 * (log10(255 * 255) - log10(_weight * _score));
@@ -100,7 +103,7 @@ static double calc_psnrhvs(const unsigned char *_src, int _systride,
   int pixels;
   int x;
   int y;
-  (void) _par;
+  (void)_par;
   ret = pixels = 0;
   /*In the PSNR-HVS-M paper[1] the authors describe the construction of
    their masking table as "we have used the quantization table for the
@@ -120,8 +123,8 @@ static double calc_psnrhvs(const unsigned char *_src, int _systride,
    Electronics VPQM-07, Scottsdale, Arizona, USA, 25-26 January, 2007, 4 p.*/
   for (x = 0; x < 8; x++)
     for (y = 0; y < 8; y++)
-      mask[x][y] = (_csf[x][y] * 0.3885746225901003)
-          * (_csf[x][y] * 0.3885746225901003);
+      mask[x][y] =
+          (_csf[x][y] * 0.3885746225901003) * (_csf[x][y] * 0.3885746225901003);
   for (y = 0; y < _h - 7; y += _step) {
     for (x = 0; x < _w - 7; x += _step) {
       int i;
@@ -151,27 +154,23 @@ static double calc_psnrhvs(const unsigned char *_src, int _systride,
       }
       s_gmean /= 64.f;
       d_gmean /= 64.f;
-      for (i = 0; i < 4; i++)
-        s_means[i] /= 16.f;
-      for (i = 0; i < 4; i++)
-        d_means[i] /= 16.f;
+      for (i = 0; i < 4; i++) s_means[i] /= 16.f;
+      for (i = 0; i < 4; i++) d_means[i] /= 16.f;
       for (i = 0; i < 8; i++) {
         for (j = 0; j < 8; j++) {
           int sub = ((i & 12) >> 2) + ((j & 12) >> 1);
           s_gvar += (dct_s[i * 8 + j] - s_gmean) * (dct_s[i * 8 + j] - s_gmean);
           d_gvar += (dct_d[i * 8 + j] - d_gmean) * (dct_d[i * 8 + j] - d_gmean);
-          s_vars[sub] += (dct_s[i * 8 + j] - s_means[sub])
-              * (dct_s[i * 8 + j] - s_means[sub]);
-          d_vars[sub] += (dct_d[i * 8 + j] - d_means[sub])
-              * (dct_d[i * 8 + j] - d_means[sub]);
+          s_vars[sub] += (dct_s[i * 8 + j] - s_means[sub]) *
+                         (dct_s[i * 8 + j] - s_means[sub]);
+          d_vars[sub] += (dct_d[i * 8 + j] - d_means[sub]) *
+                         (dct_d[i * 8 + j] - d_means[sub]);
         }
       }
       s_gvar *= 1 / 63.f * 64;
       d_gvar *= 1 / 63.f * 64;
-      for (i = 0; i < 4; i++)
-        s_vars[i] *= 1 / 15.f * 16;
-      for (i = 0; i < 4; i++)
-        d_vars[i] *= 1 / 15.f * 16;
+      for (i = 0; i < 4; i++) s_vars[i] *= 1 / 15.f * 16;
+      for (i = 0; i < 4; i++) d_vars[i] *= 1 / 15.f * 16;
       if (s_gvar > 0)
         s_gvar = (s_vars[0] + s_vars[1] + s_vars[2] + s_vars[3]) / s_gvar;
       if (d_gvar > 0)
@@ -186,8 +185,7 @@ static double calc_psnrhvs(const unsigned char *_src, int _systride,
           d_mask += dct_d_coef[i * 8 + j] * dct_d_coef[i * 8 + j] * mask[i][j];
       s_mask = sqrt(s_mask * s_gvar) / 32.f;
       d_mask = sqrt(d_mask * d_gvar) / 32.f;
-      if (d_mask > s_mask)
-        s_mask = d_mask;
+      if (d_mask > s_mask) s_mask = d_mask;
       for (i = 0; i < 8; i++) {
         for (j = 0; j < 8; j++) {
           float err;
diff --git a/vpx_dsp/quantize.c b/vpx_dsp/quantize.c
index 1be9fb7138c63ab8bda14c60ead1e19657943b43..e65f4f3cdd3cc7462dbe856ae99b5da8606bbe9c 100644
--- a/vpx_dsp/quantize.c
+++ b/vpx_dsp/quantize.c
@@ -11,8 +11,7 @@
 #include "vpx_dsp/quantize.h"
 #include "vpx_mem/vpx_mem.h"
 
-void vpx_quantize_dc(const tran_low_t *coeff_ptr,
-                     int n_coeffs, int skip_block,
+void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
                      const int16_t *round_ptr, const int16_t quant,
                      tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                      const int16_t dequant_ptr, uint16_t *eob_ptr) {
@@ -28,20 +27,19 @@ void vpx_quantize_dc(const tran_low_t *coeff_ptr,
   if (!skip_block) {
     tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX);
     tmp = (tmp * quant) >> 16;
-    qcoeff_ptr[rc]  = (tmp ^ coeff_sign) - coeff_sign;
+    qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
     dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr;
-    if (tmp)
-      eob = 0;
+    if (tmp) eob = 0;
   }
   *eob_ptr = eob + 1;
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr,
-                            int n_coeffs, int skip_block,
-                            const int16_t *round_ptr, const int16_t quant,
-                            tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                            const int16_t dequant_ptr, uint16_t *eob_ptr) {
+void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+                            int skip_block, const int16_t *round_ptr,
+                            const int16_t quant, tran_low_t *qcoeff_ptr,
+                            tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+                            uint16_t *eob_ptr) {
   int eob = -1;
 
   memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
@@ -55,8 +53,7 @@ void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr,
     const uint32_t abs_qcoeff = (uint32_t)((tmp * quant) >> 16);
     qcoeff_ptr[0] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
     dqcoeff_ptr[0] = qcoeff_ptr[0] * dequant_ptr;
-    if (abs_qcoeff)
-      eob = 0;
+    if (abs_qcoeff) eob = 0;
   }
   *eob_ptr = eob + 1;
 }
@@ -80,19 +77,16 @@ void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
     tmp = clamp(abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1),
                 INT16_MIN, INT16_MAX);
     tmp = (tmp * quant) >> 15;
-    qcoeff_ptr[rc]  = (tmp ^ coeff_sign) - coeff_sign;
+    qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
     dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr / 2;
-    if (tmp)
-      eob = 0;
+    if (tmp) eob = 0;
   }
   *eob_ptr = eob + 1;
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr,
-                                  int skip_block,
-                                  const int16_t *round_ptr,
-                                  const int16_t quant,
+void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
+                                  const int16_t *round_ptr, const int16_t quant,
                                   tran_low_t *qcoeff_ptr,
                                   tran_low_t *dqcoeff_ptr,
                                   const int16_t dequant_ptr,
@@ -111,24 +105,22 @@ void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr,
     const uint32_t abs_qcoeff = (uint32_t)((tmp * quant) >> 15);
     qcoeff_ptr[0] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
     dqcoeff_ptr[0] = qcoeff_ptr[0] * dequant_ptr / 2;
-    if (abs_qcoeff)
-      eob = 0;
+    if (abs_qcoeff) eob = 0;
   }
   *eob_ptr = eob + 1;
 }
 #endif
 
 void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                      int skip_block,
-                      const int16_t *zbin_ptr, const int16_t *round_ptr,
-                      const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
-                      tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                      const int16_t *dequant_ptr,
-                      uint16_t *eob_ptr,
-                      const int16_t *scan, const int16_t *iscan) {
+                      int skip_block, const int16_t *zbin_ptr,
+                      const int16_t *round_ptr, const int16_t *quant_ptr,
+                      const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+                      tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                      uint16_t *eob_ptr, const int16_t *scan,
+                      const int16_t *iscan) {
   int i, non_zero_count = (int)n_coeffs, eob = -1;
-  const int zbins[2] = {zbin_ptr[0], zbin_ptr[1]};
-  const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
+  const int zbins[2] = { zbin_ptr[0], zbin_ptr[1] };
+  const int nzbins[2] = { zbins[0] * -1, zbins[1] * -1 };
   (void)iscan;
 
   memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
@@ -157,12 +149,12 @@ void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
       if (abs_coeff >= zbins[rc != 0]) {
         int tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX);
         tmp = ((((tmp * quant_ptr[rc != 0]) >> 16) + tmp) *
-                  quant_shift_ptr[rc != 0]) >> 16;  // quantization
-        qcoeff_ptr[rc]  = (tmp ^ coeff_sign) - coeff_sign;
+               quant_shift_ptr[rc != 0]) >>
+              16;  // quantization
+        qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
         dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
 
-        if (tmp)
-          eob = i;
+        if (tmp) eob = i;
       }
     }
   }
@@ -175,12 +167,11 @@ void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                              const int16_t *round_ptr, const int16_t *quant_ptr,
                              const int16_t *quant_shift_ptr,
                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                             const int16_t *dequant_ptr,
-                             uint16_t *eob_ptr, const int16_t *scan,
-                             const int16_t *iscan) {
+                             const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                             const int16_t *scan, const int16_t *iscan) {
   int i, non_zero_count = (int)n_coeffs, eob = -1;
-  const int zbins[2] = {zbin_ptr[0], zbin_ptr[1]};
-  const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
+  const int zbins[2] = { zbin_ptr[0], zbin_ptr[1] };
+  const int nzbins[2] = { zbins[0] * -1, zbins[1] * -1 };
   (void)iscan;
 
   memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
@@ -213,8 +204,7 @@ void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
             (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 16);
         qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
         dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
-        if (abs_qcoeff)
-          eob = i;
+        if (abs_qcoeff) eob = i;
       }
     }
   }
@@ -223,17 +213,15 @@ void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
 #endif
 
 void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
-                            int skip_block,
-                            const int16_t *zbin_ptr, const int16_t *round_ptr,
-                            const int16_t *quant_ptr,
+                            int skip_block, const int16_t *zbin_ptr,
+                            const int16_t *round_ptr, const int16_t *quant_ptr,
                             const int16_t *quant_shift_ptr,
                             tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                            const int16_t *dequant_ptr,
-                            uint16_t *eob_ptr,
+                            const int16_t *dequant_ptr, uint16_t *eob_ptr,
                             const int16_t *scan, const int16_t *iscan) {
-  const int zbins[2] = {ROUND_POWER_OF_TWO(zbin_ptr[0], 1),
-                        ROUND_POWER_OF_TWO(zbin_ptr[1], 1)};
-  const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
+  const int zbins[2] = { ROUND_POWER_OF_TWO(zbin_ptr[0], 1),
+                         ROUND_POWER_OF_TWO(zbin_ptr[1], 1) };
+  const int nzbins[2] = { zbins[0] * -1, zbins[1] * -1 };
 
   int idx = 0;
   int idx_arr[1024];
@@ -266,33 +254,28 @@ void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
       abs_coeff += ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
       abs_coeff = clamp(abs_coeff, INT16_MIN, INT16_MAX);
       tmp = ((((abs_coeff * quant_ptr[rc != 0]) >> 16) + abs_coeff) *
-               quant_shift_ptr[rc != 0]) >> 15;
+             quant_shift_ptr[rc != 0]) >>
+            15;
 
       qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
       dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
 
-      if (tmp)
-        eob = idx_arr[i];
+      if (tmp) eob = idx_arr[i];
     }
   }
   *eob_ptr = eob + 1;
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vpx_highbd_quantize_b_32x32_c(const tran_low_t *coeff_ptr,
-                                   intptr_t n_coeffs, int skip_block,
-                                   const int16_t *zbin_ptr,
-                                   const int16_t *round_ptr,
-                                   const int16_t *quant_ptr,
-                                   const int16_t *quant_shift_ptr,
-                                   tran_low_t *qcoeff_ptr,
-                                   tran_low_t *dqcoeff_ptr,
-                                   const int16_t *dequant_ptr,
-                                   uint16_t *eob_ptr,
-                                   const int16_t *scan, const int16_t *iscan) {
-  const int zbins[2] = {ROUND_POWER_OF_TWO(zbin_ptr[0], 1),
-                        ROUND_POWER_OF_TWO(zbin_ptr[1], 1)};
-  const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
+void vpx_highbd_quantize_b_32x32_c(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+    const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
+    const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+    const int16_t *scan, const int16_t *iscan) {
+  const int zbins[2] = { ROUND_POWER_OF_TWO(zbin_ptr[0], 1),
+                         ROUND_POWER_OF_TWO(zbin_ptr[1], 1) };
+  const int nzbins[2] = { zbins[0] * -1, zbins[1] * -1 };
 
   int idx = 0;
   int idx_arr[1024];
@@ -321,15 +304,14 @@ void vpx_highbd_quantize_b_32x32_c(const tran_low_t *coeff_ptr,
       const int coeff = coeff_ptr[rc];
       const int coeff_sign = (coeff >> 31);
       const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
-      const int64_t tmp1 = abs_coeff
-                         + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
+      const int64_t tmp1 =
+          abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
       const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1;
       const uint32_t abs_qcoeff =
           (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 15);
       qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
       dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
-      if (abs_qcoeff)
-        eob = idx_arr[i];
+      if (abs_qcoeff) eob = idx_arr[i];
     }
   }
   *eob_ptr = eob + 1;
diff --git a/vpx_dsp/quantize.h b/vpx_dsp/quantize.h
index 5280b08ff4b7ccdeec871ebebaca8c02341f7cfc..75ab9f28bb4d405792c433768f4defdfd2470fa4 100644
--- a/vpx_dsp/quantize.h
+++ b/vpx_dsp/quantize.h
@@ -18,8 +18,7 @@
 extern "C" {
 #endif
 
-void vpx_quantize_dc(const tran_low_t *coeff_ptr,
-                     int n_coeffs, int skip_block,
+void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
                      const int16_t *round_ptr, const int16_t quant_ptr,
                      tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                      const int16_t dequant_ptr, uint16_t *eob_ptr);
@@ -29,19 +28,17 @@ void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                            const int16_t dequant_ptr, uint16_t *eob_ptr);
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr,
-                            int n_coeffs, int skip_block,
-                            const int16_t *round_ptr, const int16_t quant_ptr,
-                            tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
-                            const int16_t dequant_ptr, uint16_t *eob_ptr);
-void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr,
-                                  int skip_block,
+void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+                            int skip_block, const int16_t *round_ptr,
+                            const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
+                            tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+                            uint16_t *eob_ptr);
+void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                                   const int16_t *round_ptr,
                                   const int16_t quant_ptr,
                                   tran_low_t *qcoeff_ptr,
                                   tran_low_t *dqcoeff_ptr,
-                                  const int16_t dequant_ptr,
-                                  uint16_t *eob_ptr);
+                                  const int16_t dequant_ptr, uint16_t *eob_ptr);
 #endif
 
 #ifdef __cplusplus
diff --git a/vpx_dsp/sad.c b/vpx_dsp/sad.c
index 20a32cce4e2ebd79e78c48762f4fb17e0f524681..9a4a79fc37c439af69c47ed3e46cbda3b915f69a 100644
--- a/vpx_dsp/sad.c
+++ b/vpx_dsp/sad.c
@@ -17,15 +17,13 @@
 #include "vpx_ports/mem.h"
 
 /* Sum the difference between every corresponding element of the buffers. */
-static INLINE unsigned int sad(const uint8_t *a, int a_stride,
-                               const uint8_t *b, int b_stride,
-                               int width, int height) {
+static INLINE unsigned int sad(const uint8_t *a, int a_stride, const uint8_t *b,
+                               int b_stride, int width, int height) {
   int y, x;
   unsigned int sad = 0;
 
   for (y = 0; y < height; y++) {
-    for (x = 0; x < width; x++)
-      sad += abs(a[x] - b[x]);
+    for (x = 0; x < width; x++) sad += abs(a[x] - b[x]);
 
     a += a_stride;
     b += b_stride;
@@ -74,40 +72,43 @@ static INLINE void highbd_avg_pred(uint16_t *comp_pred, const uint8_t *pred8,
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-#define sadMxN(m, n) \
-unsigned int vpx_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
-                                  const uint8_t *ref, int ref_stride) { \
-  return sad(src, src_stride, ref, ref_stride, m, n); \
-} \
-unsigned int vpx_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \
-                                      const uint8_t *ref, int ref_stride, \
-                                      const uint8_t *second_pred) { \
-  uint8_t comp_pred[m * n]; \
-  avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \
-  return sad(src, src_stride, comp_pred, m, m, n); \
-}
+#define sadMxN(m, n)                                                        \
+  unsigned int vpx_sad##m##x##n##_c(const uint8_t *src, int src_stride,     \
+                                    const uint8_t *ref, int ref_stride) {   \
+    return sad(src, src_stride, ref, ref_stride, m, n);                     \
+  }                                                                         \
+  unsigned int vpx_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \
+                                        const uint8_t *ref, int ref_stride, \
+                                        const uint8_t *second_pred) {       \
+    uint8_t comp_pred[m * n];                                               \
+    avg_pred(comp_pred, second_pred, m, n, ref, ref_stride);                \
+    return sad(src, src_stride, comp_pred, m, m, n);                        \
+  }
 
 // depending on call sites, pass **ref_array to avoid & in subsequent call and
 // de-dup with 4D below.
-#define sadMxNxK(m, n, k) \
-void vpx_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \
-                                const uint8_t *ref_array, int ref_stride, \
-                                uint32_t *sad_array) { \
-  int i; \
-  for (i = 0; i < k; ++i) \
-    sad_array[i] = vpx_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \
-}
+#define sadMxNxK(m, n, k)                                                   \
+  void vpx_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride,       \
+                                  const uint8_t *ref_array, int ref_stride, \
+                                  uint32_t *sad_array) {                    \
+    int i;                                                                  \
+    for (i = 0; i < k; ++i)                                                 \
+      sad_array[i] =                                                        \
+          vpx_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \
+  }
 
 // This appears to be equivalent to the above when k == 4 and refs is const
-#define sadMxNx4D(m, n) \
-void vpx_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \
-                             const uint8_t *const ref_array[], int ref_stride, \
-                             uint32_t *sad_array) { \
-  int i; \
-  for (i = 0; i < 4; ++i) \
-    sad_array[i] = vpx_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \
-}
+#define sadMxNx4D(m, n)                                                    \
+  void vpx_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride,         \
+                               const uint8_t *const ref_array[],           \
+                               int ref_stride, uint32_t *sad_array) {      \
+    int i;                                                                 \
+    for (i = 0; i < 4; ++i)                                                \
+      sad_array[i] =                                                       \
+          vpx_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \
+  }
 
+/* clang-format off */
 // 64x64
 sadMxN(64, 64)
 sadMxNxK(64, 64, 3)
@@ -175,18 +176,18 @@ sadMxN(4, 4)
 sadMxNxK(4, 4, 3)
 sadMxNxK(4, 4, 8)
 sadMxNx4D(4, 4)
+/* clang-format on */
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static INLINE unsigned int highbd_sad(const uint8_t *a8, int a_stride,
-                                      const uint8_t *b8, int b_stride,
-                                      int width, int height) {
+        static INLINE
+    unsigned int highbd_sad(const uint8_t *a8, int a_stride, const uint8_t *b8,
+                            int b_stride, int width, int height) {
   int y, x;
   unsigned int sad = 0;
   const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
   const uint16_t *b = CONVERT_TO_SHORTPTR(b8);
   for (y = 0; y < height; y++) {
-    for (x = 0; x < width; x++)
-      sad += abs(a[x] - b[x]);
+    for (x = 0; x < width; x++) sad += abs(a[x] - b[x]);
 
     a += a_stride;
     b += b_stride;
@@ -201,8 +202,7 @@ static INLINE unsigned int highbd_sadb(const uint8_t *a8, int a_stride,
   unsigned int sad = 0;
   const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
   for (y = 0; y < height; y++) {
-    for (x = 0; x < width; x++)
-      sad += abs(a[x] - b[x]);
+    for (x = 0; x < width; x++) sad += abs(a[x] - b[x]);
 
     a += a_stride;
     b += b_stride;
@@ -210,43 +210,43 @@ static INLINE unsigned int highbd_sadb(const uint8_t *a8, int a_stride,
   return sad;
 }
 
-#define highbd_sadMxN(m, n) \
-unsigned int vpx_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
-                                         const uint8_t *ref, int ref_stride) { \
-  return highbd_sad(src, src_stride, ref, ref_stride, m, n); \
-} \
-unsigned int vpx_highbd_sad##m##x##n##_avg_c(const uint8_t *src, \
-                                             int src_stride, \
-                                             const uint8_t *ref, \
-                                             int ref_stride, \
-                                             const uint8_t *second_pred) { \
-  uint16_t comp_pred[m * n]; \
-  highbd_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \
-  return highbd_sadb(src, src_stride, comp_pred, m, m, n); \
-}
+#define highbd_sadMxN(m, n)                                                    \
+  unsigned int vpx_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
+                                           const uint8_t *ref,                 \
+                                           int ref_stride) {                   \
+    return highbd_sad(src, src_stride, ref, ref_stride, m, n);                 \
+  }                                                                            \
+  unsigned int vpx_highbd_sad##m##x##n##_avg_c(                                \
+      const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,  \
+      const uint8_t *second_pred) {                                            \
+    uint16_t comp_pred[m * n];                                                 \
+    highbd_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride);            \
+    return highbd_sadb(src, src_stride, comp_pred, m, m, n);                   \
+  }
 
-#define highbd_sadMxNxK(m, n, k) \
-void vpx_highbd_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \
-                                       const uint8_t *ref_array, int ref_stride, \
-                                       uint32_t *sad_array) { \
-  int i; \
-  for (i = 0; i < k; ++i) { \
-    sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, &ref_array[i], \
-                                               ref_stride); \
-  } \
-}
+#define highbd_sadMxNxK(m, n, k)                                             \
+  void vpx_highbd_sad##m##x##n##x##k##_c(                                    \
+      const uint8_t *src, int src_stride, const uint8_t *ref_array,          \
+      int ref_stride, uint32_t *sad_array) {                                 \
+    int i;                                                                   \
+    for (i = 0; i < k; ++i) {                                                \
+      sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride,            \
+                                                 &ref_array[i], ref_stride); \
+    }                                                                        \
+  }
 
-#define highbd_sadMxNx4D(m, n) \
-void vpx_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \
-                                    const uint8_t *const ref_array[], \
-                                    int ref_stride, uint32_t *sad_array) { \
-  int i; \
-  for (i = 0; i < 4; ++i) { \
-    sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, ref_array[i], \
-                                               ref_stride); \
-  } \
-}
+#define highbd_sadMxNx4D(m, n)                                               \
+  void vpx_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride,    \
+                                      const uint8_t *const ref_array[],      \
+                                      int ref_stride, uint32_t *sad_array) { \
+    int i;                                                                   \
+    for (i = 0; i < 4; ++i) {                                                \
+      sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride,            \
+                                                 ref_array[i], ref_stride);  \
+    }                                                                        \
+  }
 
+/* clang-format off */
 // 64x64
 highbd_sadMxN(64, 64)
 highbd_sadMxNxK(64, 64, 3)
@@ -314,5 +314,6 @@ highbd_sadMxN(4, 4)
 highbd_sadMxNxK(4, 4, 3)
 highbd_sadMxNxK(4, 4, 8)
 highbd_sadMxNx4D(4, 4)
+/* clang-format on */
 
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vpx_dsp/ssim.c b/vpx_dsp/ssim.c
index b66cfb4c973124eae5b3d184f68c6a2544bcf10a..b0bd0028b001f69135a2bc21c323680608ba3bfd 100644
--- a/vpx_dsp/ssim.c
+++ b/vpx_dsp/ssim.c
@@ -14,8 +14,8 @@
 #include "vpx_ports/mem.h"
 #include "vpx_ports/system_state.h"
 
-void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r,
-                            int rp, uint32_t *sum_s, uint32_t *sum_r,
+void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
+                            uint32_t *sum_s, uint32_t *sum_r,
                             uint32_t *sum_sq_s, uint32_t *sum_sq_r,
                             uint32_t *sum_sxr) {
   int i, j;
@@ -30,9 +30,8 @@ void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r,
   }
 }
 void vpx_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
-                          uint32_t *sum_s, uint32_t *sum_r,
-                          uint32_t *sum_sq_s, uint32_t *sum_sq_r,
-                          uint32_t *sum_sxr) {
+                          uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s,
+                          uint32_t *sum_sq_r, uint32_t *sum_sxr) {
   int i, j;
   for (i = 0; i < 8; i++, s += sp, r += rp) {
     for (j = 0; j < 8; j++) {
@@ -46,9 +45,8 @@ void vpx_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vpx_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp,
-                                 const uint16_t *r, int rp,
-                                 uint32_t *sum_s, uint32_t *sum_r,
+void vpx_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r,
+                                 int rp, uint32_t *sum_s, uint32_t *sum_r,
                                  uint32_t *sum_sq_s, uint32_t *sum_sq_r,
                                  uint32_t *sum_sxr) {
   int i, j;
@@ -64,12 +62,11 @@ void vpx_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp,
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
-static const int64_t cc1 =  26634;  // (64^2*(.01*255)^2
+static const int64_t cc1 = 26634;   // (64^2*(.01*255)^2
 static const int64_t cc2 = 239708;  // (64^2*(.03*255)^2
 
-static double similarity(uint32_t sum_s, uint32_t sum_r,
-                         uint32_t sum_sq_s, uint32_t sum_sq_r,
-                         uint32_t sum_sxr, int count) {
+static double similarity(uint32_t sum_s, uint32_t sum_r, uint32_t sum_sq_s,
+                         uint32_t sum_sq_r, uint32_t sum_sxr, int count) {
   int64_t ssim_n, ssim_d;
   int64_t c1, c2;
 
@@ -77,12 +74,12 @@ static double similarity(uint32_t sum_s, uint32_t sum_r,
   c1 = (cc1 * count * count) >> 12;
   c2 = (cc2 * count * count) >> 12;
 
-  ssim_n = (2 * sum_s * sum_r + c1) * ((int64_t) 2 * count * sum_sxr -
-                                       (int64_t) 2 * sum_s * sum_r + c2);
+  ssim_n = (2 * sum_s * sum_r + c1) *
+           ((int64_t)2 * count * sum_sxr - (int64_t)2 * sum_s * sum_r + c2);
 
   ssim_d = (sum_s * sum_s + sum_r * sum_r + c1) *
            ((int64_t)count * sum_sq_s - (int64_t)sum_s * sum_s +
-            (int64_t)count * sum_sq_r - (int64_t) sum_r * sum_r + c2);
+            (int64_t)count * sum_sq_r - (int64_t)sum_r * sum_r + c2);
 
   return ssim_n * 1.0 / ssim_d;
 }
@@ -101,12 +98,8 @@ static double highbd_ssim_8x8(const uint16_t *s, int sp, const uint16_t *r,
   const int oshift = bd - 8;
   vpx_highbd_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
                             &sum_sxr);
-  return similarity(sum_s >> oshift,
-                    sum_r >> oshift,
-                    sum_sq_s >> (2 * oshift),
-                    sum_sq_r >> (2 * oshift),
-                    sum_sxr >> (2 * oshift),
-                    64);
+  return similarity(sum_s >> oshift, sum_r >> oshift, sum_sq_s >> (2 * oshift),
+                    sum_sq_r >> (2 * oshift), sum_sxr >> (2 * oshift), 64);
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
@@ -145,9 +138,9 @@ static double vpx_highbd_ssim2(const uint8_t *img1, const uint8_t *img2,
   for (i = 0; i <= height - 8;
        i += 4, img1 += stride_img1 * 4, img2 += stride_img2 * 4) {
     for (j = 0; j <= width - 8; j += 4) {
-      double v = highbd_ssim_8x8(CONVERT_TO_SHORTPTR(img1 + j), stride_img1,
-                                 CONVERT_TO_SHORTPTR(img2 + j), stride_img2,
-                                 bd);
+      double v =
+          highbd_ssim_8x8(CONVERT_TO_SHORTPTR(img1 + j), stride_img1,
+                          CONVERT_TO_SHORTPTR(img2 + j), stride_img2, bd);
       ssim_total += v;
       samples++;
     }
@@ -158,22 +151,18 @@ static double vpx_highbd_ssim2(const uint8_t *img1, const uint8_t *img2,
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source,
-                     const YV12_BUFFER_CONFIG *dest,
-                     double *weight) {
+                     const YV12_BUFFER_CONFIG *dest, double *weight) {
   double a, b, c;
   double ssimv;
 
-  a = vpx_ssim2(source->y_buffer, dest->y_buffer,
-                source->y_stride, dest->y_stride,
-                source->y_crop_width, source->y_crop_height);
+  a = vpx_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
+                dest->y_stride, source->y_crop_width, source->y_crop_height);
 
-  b = vpx_ssim2(source->u_buffer, dest->u_buffer,
-                source->uv_stride, dest->uv_stride,
-                source->uv_crop_width, source->uv_crop_height);
+  b = vpx_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
+                dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
 
-  c = vpx_ssim2(source->v_buffer, dest->v_buffer,
-                source->uv_stride, dest->uv_stride,
-                source->uv_crop_width, source->uv_crop_height);
+  c = vpx_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
+                dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
 
   ssimv = a * .8 + .1 * (b + c);
 
@@ -183,22 +172,19 @@ double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source,
 }
 
 double vpx_calc_ssimg(const YV12_BUFFER_CONFIG *source,
-                      const YV12_BUFFER_CONFIG *dest,
-                      double *ssim_y, double *ssim_u, double *ssim_v) {
+                      const YV12_BUFFER_CONFIG *dest, double *ssim_y,
+                      double *ssim_u, double *ssim_v) {
   double ssim_all = 0;
   double a, b, c;
 
-  a = vpx_ssim2(source->y_buffer, dest->y_buffer,
-                source->y_stride, dest->y_stride,
-                source->y_crop_width, source->y_crop_height);
+  a = vpx_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
+                dest->y_stride, source->y_crop_width, source->y_crop_height);
 
-  b = vpx_ssim2(source->u_buffer, dest->u_buffer,
-                source->uv_stride, dest->uv_stride,
-                source->uv_crop_width, source->uv_crop_height);
+  b = vpx_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
+                dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
 
-  c = vpx_ssim2(source->v_buffer, dest->v_buffer,
-                source->uv_stride, dest->uv_stride,
-                source->uv_crop_width, source->uv_crop_height);
+  c = vpx_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
+                dest->uv_stride, source->uv_crop_width, source->uv_crop_height);
   *ssim_y = a;
   *ssim_u = b;
   *ssim_v = c;
@@ -242,13 +228,13 @@ static double ssimv_similarity(const Ssimv *sv, int64_t n) {
   const int64_t c2 = (cc2 * n * n) >> 12;
 
   const double l = 1.0 * (2 * sv->sum_s * sv->sum_r + c1) /
-      (sv->sum_s * sv->sum_s + sv->sum_r * sv->sum_r + c1);
+                   (sv->sum_s * sv->sum_s + sv->sum_r * sv->sum_r + c1);
 
   // Since these variables are unsigned sums, convert to double so
   // math is done in double arithmetic.
-  const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2)
-      / (n * sv->sum_sq_s - sv->sum_s * sv->sum_s + n * sv->sum_sq_r
-         - sv->sum_r * sv->sum_r + c2);
+  const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2) /
+                   (n * sv->sum_sq_s - sv->sum_s * sv->sum_s +
+                    n * sv->sum_sq_r - sv->sum_r * sv->sum_r + c2);
 
   return l * v;
 }
@@ -277,24 +263,21 @@ static double ssimv_similarity2(const Ssimv *sv, int64_t n) {
 
   // Since these variables are unsigned, sums convert to double so
   // math is done in double arithmetic.
-  const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2)
-      / (n * sv->sum_sq_s - sv->sum_s * sv->sum_s +
-         n * sv->sum_sq_r - sv->sum_r * sv->sum_r + c2);
+  const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2) /
+                   (n * sv->sum_sq_s - sv->sum_s * sv->sum_s +
+                    n * sv->sum_sq_r - sv->sum_r * sv->sum_r + c2);
 
   return l * v;
 }
 static void ssimv_parms(uint8_t *img1, int img1_pitch, uint8_t *img2,
                         int img2_pitch, Ssimv *sv) {
-  vpx_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch,
-                     &sv->sum_s, &sv->sum_r, &sv->sum_sq_s, &sv->sum_sq_r,
-                     &sv->sum_sxr);
+  vpx_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch, &sv->sum_s, &sv->sum_r,
+                     &sv->sum_sq_s, &sv->sum_sq_r, &sv->sum_sxr);
 }
 
-double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch,
-                            uint8_t *img2, int img2_pitch,
-                            int width, int height,
-                            Ssimv *sv2, Metrics *m,
-                            int do_inconsistency) {
+double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
+                            int img2_pitch, int width, int height, Ssimv *sv2,
+                            Metrics *m, int do_inconsistency) {
   double dssim_total = 0;
   double ssim_total = 0;
   double ssim2_total = 0;
@@ -305,10 +288,10 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch,
   double old_ssim_total = 0;
   vpx_clear_system_state();
   // We can sample points as frequently as we like start with 1 per 4x4.
-  for (i = 0; i < height; i += 4,
-       img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
+  for (i = 0; i < height;
+       i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
     for (j = 0; j < width; j += 4, ++c) {
-      Ssimv sv = {0};
+      Ssimv sv = { 0 };
       double ssim;
       double ssim2;
       double dssim;
@@ -394,27 +377,29 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch,
 
         // This measures how much consistent variance is in two consecutive
         // source frames. 1.0 means they have exactly the same variance.
-        const double variance_term = (2.0 * var_old * var_new + c1) /
+        const double variance_term =
+            (2.0 * var_old * var_new + c1) /
             (1.0 * var_old * var_old + 1.0 * var_new * var_new + c1);
 
         // This measures how consistent the local mean are between two
         // consecutive frames. 1.0 means they have exactly the same mean.
-        const double mean_term = (2.0 * mean_old * mean_new + c2) /
+        const double mean_term =
+            (2.0 * mean_old * mean_new + c2) /
             (1.0 * mean_old * mean_old + 1.0 * mean_new * mean_new + c2);
 
         // This measures how consistent the ssims of two
         // consecutive frames is. 1.0 means they are exactly the same.
-        double ssim_term = pow((2.0 * ssim_old * ssim_new + c3) /
-                               (ssim_old * ssim_old + ssim_new * ssim_new + c3),
-                               5);
+        double ssim_term =
+            pow((2.0 * ssim_old * ssim_new + c3) /
+                    (ssim_old * ssim_old + ssim_new * ssim_new + c3),
+                5);
 
         double this_inconsistency;
 
         // Floating point math sometimes makes this > 1 by a tiny bit.
         // We want the metric to scale between 0 and 1.0 so we can convert
         // it to an snr scaled value.
-        if (ssim_term > 1)
-          ssim_term = 1;
+        if (ssim_term > 1) ssim_term = 1;
 
         // This converts the consistency metric to an inconsistency metric
         // ( so we can scale it like psnr to something like sum square error.
@@ -442,8 +427,7 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch,
   ssim2_total *= norm;
   m->ssim2 = ssim2_total;
   m->ssim = ssim_total;
-  if (old_ssim_total == 0)
-    inconsistency_total = 0;
+  if (old_ssim_total == 0) inconsistency_total = 0;
 
   m->ssimc = inconsistency_total;
 
@@ -451,25 +435,24 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch,
   return inconsistency_total;
 }
 
-
 #if CONFIG_VPX_HIGHBITDEPTH
 double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
-                            const YV12_BUFFER_CONFIG *dest,
-                            double *weight, unsigned int bd) {
+                            const YV12_BUFFER_CONFIG *dest, double *weight,
+                            unsigned int bd) {
   double a, b, c;
   double ssimv;
 
-  a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer,
-                       source->y_stride, dest->y_stride,
-                       source->y_crop_width, source->y_crop_height, bd);
+  a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
+                       dest->y_stride, source->y_crop_width,
+                       source->y_crop_height, bd);
 
-  b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer,
-                       source->uv_stride, dest->uv_stride,
-                       source->uv_crop_width, source->uv_crop_height, bd);
+  b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
+                       dest->uv_stride, source->uv_crop_width,
+                       source->uv_crop_height, bd);
 
-  c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer,
-                       source->uv_stride, dest->uv_stride,
-                       source->uv_crop_width, source->uv_crop_height, bd);
+  c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
+                       dest->uv_stride, source->uv_crop_width,
+                       source->uv_crop_height, bd);
 
   ssimv = a * .8 + .1 * (b + c);
 
@@ -484,17 +467,17 @@ double vpx_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source,
   double ssim_all = 0;
   double a, b, c;
 
-  a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer,
-                       source->y_stride, dest->y_stride,
-                       source->y_crop_width, source->y_crop_height, bd);
+  a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride,
+                       dest->y_stride, source->y_crop_width,
+                       source->y_crop_height, bd);
 
-  b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer,
-                       source->uv_stride, dest->uv_stride,
-                       source->uv_crop_width, source->uv_crop_height, bd);
+  b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride,
+                       dest->uv_stride, source->uv_crop_width,
+                       source->uv_crop_height, bd);
 
-  c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer,
-                       source->uv_stride, dest->uv_stride,
-                       source->uv_crop_width, source->uv_crop_height, bd);
+  c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride,
+                       dest->uv_stride, source->uv_crop_width,
+                       source->uv_crop_height, bd);
   *ssim_y = a;
   *ssim_u = b;
   *ssim_v = c;
diff --git a/vpx_dsp/ssim.h b/vpx_dsp/ssim.h
index 431aa4c195ace61a773510bca3a2c30b445e4c0b..cc9e2b44e7098f1a5e52dec340c6affedf01d05f 100644
--- a/vpx_dsp/ssim.h
+++ b/vpx_dsp/ssim.h
@@ -61,37 +61,32 @@ typedef struct {
 } Metrics;
 
 double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
-                      int img2_pitch, int width, int height, Ssimv *sv2,
-                      Metrics *m, int do_inconsistency);
+                            int img2_pitch, int width, int height, Ssimv *sv2,
+                            Metrics *m, int do_inconsistency);
 
 double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source,
-                     const YV12_BUFFER_CONFIG *dest,
-                     double *weight);
+                     const YV12_BUFFER_CONFIG *dest, double *weight);
 
 double vpx_calc_ssimg(const YV12_BUFFER_CONFIG *source,
-                      const YV12_BUFFER_CONFIG *dest,
-                      double *ssim_y, double *ssim_u, double *ssim_v);
+                      const YV12_BUFFER_CONFIG *dest, double *ssim_y,
+                      double *ssim_u, double *ssim_v);
 
 double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source,
-                         const YV12_BUFFER_CONFIG *dest,
-                         double *ssim_y, double *ssim_u, double *ssim_v);
+                         const YV12_BUFFER_CONFIG *dest, double *ssim_y,
+                         double *ssim_u, double *ssim_v);
 
 double vpx_psnrhvs(const YV12_BUFFER_CONFIG *source,
-                   const YV12_BUFFER_CONFIG *dest,
-                   double *ssim_y, double *ssim_u, double *ssim_v);
+                   const YV12_BUFFER_CONFIG *dest, double *ssim_y,
+                   double *ssim_u, double *ssim_v);
 
 #if CONFIG_VPX_HIGHBITDEPTH
 double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
-                            const YV12_BUFFER_CONFIG *dest,
-                            double *weight,
+                            const YV12_BUFFER_CONFIG *dest, double *weight,
                             unsigned int bd);
 
 double vpx_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source,
-                             const YV12_BUFFER_CONFIG *dest,
-                             double *ssim_y,
-                             double *ssim_u,
-                             double *ssim_v,
-                             unsigned int bd);
+                             const YV12_BUFFER_CONFIG *dest, double *ssim_y,
+                             double *ssim_u, double *ssim_v, unsigned int bd);
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #ifdef __cplusplus
diff --git a/vpx_dsp/subtract.c b/vpx_dsp/subtract.c
index 1ba8b8cb0c17749384256d5ba129dc4b4d3f9592..dc4d662e70f2198f46ed0a4fb81fe7d951811ba5 100644
--- a/vpx_dsp/subtract.c
+++ b/vpx_dsp/subtract.c
@@ -16,32 +16,30 @@
 #include "vpx/vpx_integer.h"
 #include "vpx_ports/mem.h"
 
-void vpx_subtract_block_c(int rows, int cols,
-                          int16_t *diff, ptrdiff_t diff_stride,
-                          const uint8_t *src, ptrdiff_t src_stride,
-                          const uint8_t *pred, ptrdiff_t pred_stride) {
+void vpx_subtract_block_c(int rows, int cols, int16_t *diff,
+                          ptrdiff_t diff_stride, const uint8_t *src,
+                          ptrdiff_t src_stride, const uint8_t *pred,
+                          ptrdiff_t pred_stride) {
   int r, c;
 
   for (r = 0; r < rows; r++) {
-    for (c = 0; c < cols; c++)
-      diff[c] = src[c] - pred[c];
+    for (c = 0; c < cols; c++) diff[c] = src[c] - pred[c];
 
     diff += diff_stride;
     pred += pred_stride;
-    src  += src_stride;
+    src += src_stride;
   }
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vpx_highbd_subtract_block_c(int rows, int cols,
-                                 int16_t *diff, ptrdiff_t diff_stride,
-                                 const uint8_t *src8, ptrdiff_t src_stride,
-                                 const uint8_t *pred8, ptrdiff_t pred_stride,
-                                 int bd) {
+void vpx_highbd_subtract_block_c(int rows, int cols, int16_t *diff,
+                                 ptrdiff_t diff_stride, const uint8_t *src8,
+                                 ptrdiff_t src_stride, const uint8_t *pred8,
+                                 ptrdiff_t pred_stride, int bd) {
   int r, c;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
-  (void) bd;
+  (void)bd;
 
   for (r = 0; r < rows; r++) {
     for (c = 0; c < cols; c++) {
@@ -50,7 +48,7 @@ void vpx_highbd_subtract_block_c(int rows, int cols,
 
     diff += diff_stride;
     pred += pred_stride;
-    src  += src_stride;
+    src += src_stride;
   }
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vpx_dsp/txfm_common.h b/vpx_dsp/txfm_common.h
index 442e6a57b5ba5d84a7435674c7ff7f4217b3a0ce..fd27f928ed3f5ff81c1b07a7a5eacc68bf975f35 100644
--- a/vpx_dsp/txfm_common.h
+++ b/vpx_dsp/txfm_common.h
@@ -15,7 +15,7 @@
 
 // Constants and Macros used by all idct/dct functions
 #define DCT_CONST_BITS 14
-#define DCT_CONST_ROUNDING  (1 << (DCT_CONST_BITS - 1))
+#define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1))
 
 #define UNIT_QUANT_SHIFT 2
 #define UNIT_QUANT_FACTOR (1 << UNIT_QUANT_SHIFT)
@@ -25,15 +25,15 @@
 //    printf("static const int cospi_%d_64 = %.0f;\n", i,
 //           round(16384 * cos(i*M_PI/64)));
 // Note: sin(k*Pi/64) = cos((32-k)*Pi/64)
-static const tran_high_t cospi_1_64  = 16364;
-static const tran_high_t cospi_2_64  = 16305;
-static const tran_high_t cospi_3_64  = 16207;
-static const tran_high_t cospi_4_64  = 16069;
-static const tran_high_t cospi_5_64  = 15893;
-static const tran_high_t cospi_6_64  = 15679;
-static const tran_high_t cospi_7_64  = 15426;
-static const tran_high_t cospi_8_64  = 15137;
-static const tran_high_t cospi_9_64  = 14811;
+static const tran_high_t cospi_1_64 = 16364;
+static const tran_high_t cospi_2_64 = 16305;
+static const tran_high_t cospi_3_64 = 16207;
+static const tran_high_t cospi_4_64 = 16069;
+static const tran_high_t cospi_5_64 = 15893;
+static const tran_high_t cospi_6_64 = 15679;
+static const tran_high_t cospi_7_64 = 15426;
+static const tran_high_t cospi_8_64 = 15137;
+static const tran_high_t cospi_9_64 = 14811;
 static const tran_high_t cospi_10_64 = 14449;
 static const tran_high_t cospi_11_64 = 14053;
 static const tran_high_t cospi_12_64 = 13623;
diff --git a/vpx_dsp/variance.c b/vpx_dsp/variance.c
index 6b4b384f9d23a8ea81f809fba6fcbf7d4f8f0ee8..32ce102807b3d5d738ae09e048c03182ec5da977 100644
--- a/vpx_dsp/variance.c
+++ b/vpx_dsp/variance.c
@@ -17,18 +17,12 @@
 #include "vpx_dsp/variance.h"
 
 static const uint8_t bilinear_filters[8][2] = {
-  { 128,   0  },
-  { 112,  16  },
-  {  96,  32  },
-  {  80,  48  },
-  {  64,  64  },
-  {  48,  80  },
-  {  32,  96  },
-  {  16, 112  },
+  { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 },
+  { 64, 64 }, { 48, 80 },  { 32, 96 }, { 16, 112 },
 };
 
-uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int  a_stride,
-                            const uint8_t *b, int  b_stride) {
+uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b,
+                            int b_stride) {
   int distortion = 0;
   int r, c;
 
@@ -58,28 +52,23 @@ uint32_t vpx_get_mb_ss_c(const int16_t *a) {
 uint32_t vpx_variance_halfpixvar16x16_h_c(const uint8_t *a, int a_stride,
                                           const uint8_t *b, int b_stride,
                                           uint32_t *sse) {
-  return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 0,
-                                       b, b_stride, sse);
+  return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 0, b, b_stride, sse);
 }
 
-
 uint32_t vpx_variance_halfpixvar16x16_v_c(const uint8_t *a, int a_stride,
                                           const uint8_t *b, int b_stride,
                                           uint32_t *sse) {
-  return vpx_sub_pixel_variance16x16_c(a, a_stride, 0, 4,
-                                       b, b_stride, sse);
+  return vpx_sub_pixel_variance16x16_c(a, a_stride, 0, 4, b, b_stride, sse);
 }
 
 uint32_t vpx_variance_halfpixvar16x16_hv_c(const uint8_t *a, int a_stride,
                                            const uint8_t *b, int b_stride,
                                            uint32_t *sse) {
-  return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 4,
-                                       b, b_stride, sse);
+  return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 4, b, b_stride, sse);
 }
 
-static void variance(const uint8_t *a, int  a_stride,
-                     const uint8_t *b, int  b_stride,
-                     int  w, int  h, uint32_t *sse, int *sum) {
+static void variance(const uint8_t *a, int a_stride, const uint8_t *b,
+                     int b_stride, int w, int h, uint32_t *sse, int *sum) {
   int i, j;
 
   *sum = 0;
@@ -115,9 +104,8 @@ static void var_filter_block2d_bil_first_pass(const uint8_t *a, uint16_t *b,
 
   for (i = 0; i < output_height; ++i) {
     for (j = 0; j < output_width; ++j) {
-      b[j] = ROUND_POWER_OF_TWO((int)a[0] * filter[0] +
-                          (int)a[pixel_step] * filter[1],
-                          FILTER_BITS);
+      b[j] = ROUND_POWER_OF_TWO(
+          (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS);
 
       ++a;
     }
@@ -142,13 +130,12 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b,
                                                unsigned int output_height,
                                                unsigned int output_width,
                                                const uint8_t *filter) {
-  unsigned int  i, j;
+  unsigned int i, j;
 
   for (i = 0; i < output_height; ++i) {
     for (j = 0; j < output_width; ++j) {
-      b[j] = ROUND_POWER_OF_TWO((int)a[0] * filter[0] +
-                          (int)a[pixel_step] * filter[1],
-                          FILTER_BITS);
+      b[j] = ROUND_POWER_OF_TWO(
+          (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS);
       ++a;
     }
 
@@ -157,83 +144,80 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b,
   }
 }
 
-#define VAR(W, H) \
-uint32_t vpx_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
-                                   const uint8_t *b, int b_stride, \
-                                   uint32_t *sse) { \
-  int sum; \
-  variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
-  return *sse - (((int64_t)sum * sum) / (W * H)); \
-}
+#define VAR(W, H)                                                    \
+  uint32_t vpx_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+                                     const uint8_t *b, int b_stride, \
+                                     uint32_t *sse) {                \
+    int sum;                                                         \
+    variance(a, a_stride, b, b_stride, W, H, sse, &sum);             \
+    return *sse - (((int64_t)sum * sum) / (W * H));                  \
+  }
 
-#define SUBPIX_VAR(W, H) \
-uint32_t vpx_sub_pixel_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
-                                             int xoffset, int  yoffset, \
-                                             const uint8_t *b, int b_stride, \
-                                             uint32_t *sse) { \
-  uint16_t fdata3[(H + 1) * W]; \
-  uint8_t temp2[H * W]; \
-\
-  var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
-                                    bilinear_filters[xoffset]); \
-  var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                     bilinear_filters[yoffset]); \
-\
-  return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \
-}
+#define SUBPIX_VAR(W, H)                                                \
+  uint32_t vpx_sub_pixel_variance##W##x##H##_c(                         \
+      const uint8_t *a, int a_stride, int xoffset, int yoffset,         \
+      const uint8_t *b, int b_stride, uint32_t *sse) {                  \
+    uint16_t fdata3[(H + 1) * W];                                       \
+    uint8_t temp2[H * W];                                               \
+                                                                        \
+    var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
+                                      bilinear_filters[xoffset]);       \
+    var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
+                                       bilinear_filters[yoffset]);      \
+                                                                        \
+    return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse);       \
+  }
 
-#define SUBPIX_AVG_VAR(W, H) \
-uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c(const uint8_t *a, \
-                                                 int  a_stride, \
-                                                 int xoffset, int  yoffset, \
-                                                 const uint8_t *b, \
-                                                 int b_stride, \
-                                                 uint32_t *sse, \
-                                                 const uint8_t *second_pred) { \
-  uint16_t fdata3[(H + 1) * W]; \
-  uint8_t temp2[H * W]; \
-  DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
-\
-  var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
-                                    bilinear_filters[xoffset]); \
-  var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                     bilinear_filters[yoffset]); \
-\
-  vpx_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \
-\
-  return vpx_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \
-}
+#define SUBPIX_AVG_VAR(W, H)                                            \
+  uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c(                     \
+      const uint8_t *a, int a_stride, int xoffset, int yoffset,         \
+      const uint8_t *b, int b_stride, uint32_t *sse,                    \
+      const uint8_t *second_pred) {                                     \
+    uint16_t fdata3[(H + 1) * W];                                       \
+    uint8_t temp2[H * W];                                               \
+    DECLARE_ALIGNED(16, uint8_t, temp3[H * W]);                         \
+                                                                        \
+    var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
+                                      bilinear_filters[xoffset]);       \
+    var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,       \
+                                       bilinear_filters[yoffset]);      \
+                                                                        \
+    vpx_comp_avg_pred(temp3, second_pred, W, H, temp2, W);              \
+                                                                        \
+    return vpx_variance##W##x##H##_c(temp3, W, b, b_stride, sse);       \
+  }
 
 /* Identical to the variance call except it takes an additional parameter, sum,
  * and returns that value using pass-by-reference instead of returning
  * sse - sum^2 / w*h
  */
-#define GET_VAR(W, H) \
-void vpx_get##W##x##H##var_c(const uint8_t *a, int a_stride, \
-                             const uint8_t *b, int b_stride, \
-                             uint32_t *sse, int *sum) { \
-  variance(a, a_stride, b, b_stride, W, H, sse, sum); \
-}
+#define GET_VAR(W, H)                                                         \
+  void vpx_get##W##x##H##var_c(const uint8_t *a, int a_stride,                \
+                               const uint8_t *b, int b_stride, uint32_t *sse, \
+                               int *sum) {                                    \
+    variance(a, a_stride, b, b_stride, W, H, sse, sum);                       \
+  }
 
 /* Identical to the variance call except it does not calculate the
  * sse - sum^2 / w*h and returns sse in addtion to modifying the passed in
  * variable.
  */
-#define MSE(W, H) \
-uint32_t vpx_mse##W##x##H##_c(const uint8_t *a, int a_stride, \
-                              const uint8_t *b, int b_stride, \
-                              uint32_t *sse) { \
-  int sum; \
-  variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
-  return *sse; \
-}
+#define MSE(W, H)                                               \
+  uint32_t vpx_mse##W##x##H##_c(const uint8_t *a, int a_stride, \
+                                const uint8_t *b, int b_stride, \
+                                uint32_t *sse) {                \
+    int sum;                                                    \
+    variance(a, a_stride, b, b_stride, W, H, sse, &sum);        \
+    return *sse;                                                \
+  }
 
 /* All three forms of the variance are available in the same sizes. */
 #define VARIANCES(W, H) \
-    VAR(W, H) \
-    SUBPIX_VAR(W, H) \
-    SUBPIX_AVG_VAR(W, H)
+  VAR(W, H)             \
+  SUBPIX_VAR(W, H)      \
+  SUBPIX_AVG_VAR(W, H)
 
+/* clang-format off */
 VARIANCES(64, 64)
 VARIANCES(64, 32)
 VARIANCES(32, 64)
@@ -255,10 +239,10 @@ MSE(16, 16)
 MSE(16, 8)
 MSE(8, 16)
 MSE(8, 8)
+/* clang-format on */
 
-void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred,
-                         int width, int height,
-                         const uint8_t *ref, int ref_stride) {
+void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
+                         int height, const uint8_t *ref, int ref_stride) {
   int i, j;
 
   for (i = 0; i < height; ++i) {
@@ -273,9 +257,9 @@ void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred,
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static void highbd_variance64(const uint8_t *a8, int  a_stride,
-                              const uint8_t *b8, int  b_stride,
-                              int w, int h, uint64_t *sse, uint64_t *sum) {
+static void highbd_variance64(const uint8_t *a8, int a_stride,
+                              const uint8_t *b8, int b_stride, int w, int h,
+                              uint64_t *sse, uint64_t *sum) {
   int i, j;
 
   uint16_t *a = CONVERT_TO_SHORTPTR(a8);
@@ -294,9 +278,9 @@ static void highbd_variance64(const uint8_t *a8, int  a_stride,
   }
 }
 
-static void highbd_8_variance(const uint8_t *a8, int  a_stride,
-                              const uint8_t *b8, int  b_stride,
-                              int w, int h, uint32_t *sse, int *sum) {
+static void highbd_8_variance(const uint8_t *a8, int a_stride,
+                              const uint8_t *b8, int b_stride, int w, int h,
+                              uint32_t *sse, int *sum) {
   uint64_t sse_long = 0;
   uint64_t sum_long = 0;
   highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
@@ -304,9 +288,9 @@ static void highbd_8_variance(const uint8_t *a8, int  a_stride,
   *sum = (int)sum_long;
 }
 
-static void highbd_10_variance(const uint8_t *a8, int  a_stride,
-                               const uint8_t *b8, int  b_stride,
-                               int w, int h, uint32_t *sse, int *sum) {
+static void highbd_10_variance(const uint8_t *a8, int a_stride,
+                               const uint8_t *b8, int b_stride, int w, int h,
+                               uint32_t *sse, int *sum) {
   uint64_t sse_long = 0;
   uint64_t sum_long = 0;
   highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
@@ -314,9 +298,9 @@ static void highbd_10_variance(const uint8_t *a8, int  a_stride,
   *sum = (int)ROUND_POWER_OF_TWO(sum_long, 2);
 }
 
-static void highbd_12_variance(const uint8_t *a8, int  a_stride,
-                               const uint8_t *b8, int  b_stride,
-                               int w, int h, uint32_t *sse, int *sum) {
+static void highbd_12_variance(const uint8_t *a8, int a_stride,
+                               const uint8_t *b8, int b_stride, int w, int h,
+                               uint32_t *sse, int *sum) {
   uint64_t sse_long = 0;
   uint64_t sum_long = 0;
   highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
@@ -324,103 +308,87 @@ static void highbd_12_variance(const uint8_t *a8, int  a_stride,
   *sum = (int)ROUND_POWER_OF_TWO(sum_long, 4);
 }
 
-#define HIGHBD_VAR(W, H) \
-uint32_t vpx_highbd_8_variance##W##x##H##_c(const uint8_t *a, \
-                                            int a_stride, \
-                                            const uint8_t *b, \
-                                            int b_stride, \
-                                            uint32_t *sse) { \
-  int sum; \
-  highbd_8_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
-  return *sse - (((int64_t)sum * sum) / (W * H)); \
-} \
-\
-uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, \
-                                             int a_stride, \
-                                             const uint8_t *b, \
-                                             int b_stride, \
-                                             uint32_t *sse) { \
-  int sum; \
-  highbd_10_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
-  return *sse - (((int64_t)sum * sum) / (W * H)); \
-} \
-\
-uint32_t vpx_highbd_12_variance##W##x##H##_c(const uint8_t *a, \
-                                             int a_stride, \
-                                             const uint8_t *b, \
-                                             int b_stride, \
-                                             uint32_t *sse) { \
-  int sum; \
-  highbd_12_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
-  return *sse - (((int64_t)sum * sum) / (W * H)); \
-}
+#define HIGHBD_VAR(W, H)                                                       \
+  uint32_t vpx_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride,  \
+                                              const uint8_t *b, int b_stride,  \
+                                              uint32_t *sse) {                 \
+    int sum;                                                                   \
+    highbd_8_variance(a, a_stride, b, b_stride, W, H, sse, &sum);              \
+    return *sse - (((int64_t)sum * sum) / (W * H));                            \
+  }                                                                            \
+                                                                               \
+  uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+                                               const uint8_t *b, int b_stride, \
+                                               uint32_t *sse) {                \
+    int sum;                                                                   \
+    highbd_10_variance(a, a_stride, b, b_stride, W, H, sse, &sum);             \
+    return *sse - (((int64_t)sum * sum) / (W * H));                            \
+  }                                                                            \
+                                                                               \
+  uint32_t vpx_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+                                               const uint8_t *b, int b_stride, \
+                                               uint32_t *sse) {                \
+    int sum;                                                                   \
+    highbd_12_variance(a, a_stride, b, b_stride, W, H, sse, &sum);             \
+    return *sse - (((int64_t)sum * sum) / (W * H));                            \
+  }
 
-#define HIGHBD_GET_VAR(S) \
-void vpx_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
-                                      const uint8_t *ref, int ref_stride, \
-                                      uint32_t *sse, int *sum) { \
-  highbd_8_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
-} \
-\
-void vpx_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
-                                       const uint8_t *ref, int ref_stride, \
-                                       uint32_t *sse, int *sum) { \
-  highbd_10_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
-} \
-\
-void vpx_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
-                                       const uint8_t *ref, int ref_stride, \
-                                       uint32_t *sse, int *sum) { \
-  highbd_12_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
-}
+#define HIGHBD_GET_VAR(S)                                                    \
+  void vpx_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride,  \
+                                        const uint8_t *ref, int ref_stride,  \
+                                        uint32_t *sse, int *sum) {           \
+    highbd_8_variance(src, src_stride, ref, ref_stride, S, S, sse, sum);     \
+  }                                                                          \
+                                                                             \
+  void vpx_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
+                                         const uint8_t *ref, int ref_stride, \
+                                         uint32_t *sse, int *sum) {          \
+    highbd_10_variance(src, src_stride, ref, ref_stride, S, S, sse, sum);    \
+  }                                                                          \
+                                                                             \
+  void vpx_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
+                                         const uint8_t *ref, int ref_stride, \
+                                         uint32_t *sse, int *sum) {          \
+    highbd_12_variance(src, src_stride, ref, ref_stride, S, S, sse, sum);    \
+  }
 
-#define HIGHBD_MSE(W, H) \
-uint32_t vpx_highbd_8_mse##W##x##H##_c(const uint8_t *src, \
-                                       int src_stride, \
-                                       const uint8_t *ref, \
-                                       int ref_stride, \
-                                       uint32_t *sse) { \
-  int sum; \
-  highbd_8_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \
-  return *sse; \
-} \
-\
-uint32_t vpx_highbd_10_mse##W##x##H##_c(const uint8_t *src, \
-                                        int src_stride, \
-                                        const uint8_t *ref, \
-                                        int ref_stride, \
-                                        uint32_t *sse) { \
-  int sum; \
-  highbd_10_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \
-  return *sse; \
-} \
-\
-uint32_t vpx_highbd_12_mse##W##x##H##_c(const uint8_t *src, \
-                                        int src_stride, \
-                                        const uint8_t *ref, \
-                                        int ref_stride, \
-                                        uint32_t *sse) { \
-  int sum; \
-  highbd_12_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \
-  return *sse; \
-}
+#define HIGHBD_MSE(W, H)                                                      \
+  uint32_t vpx_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride,  \
+                                         const uint8_t *ref, int ref_stride,  \
+                                         uint32_t *sse) {                     \
+    int sum;                                                                  \
+    highbd_8_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum);     \
+    return *sse;                                                              \
+  }                                                                           \
+                                                                              \
+  uint32_t vpx_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
+                                          const uint8_t *ref, int ref_stride, \
+                                          uint32_t *sse) {                    \
+    int sum;                                                                  \
+    highbd_10_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum);    \
+    return *sse;                                                              \
+  }                                                                           \
+                                                                              \
+  uint32_t vpx_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
+                                          const uint8_t *ref, int ref_stride, \
+                                          uint32_t *sse) {                    \
+    int sum;                                                                  \
+    highbd_12_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum);    \
+    return *sse;                                                              \
+  }
 
 static void highbd_var_filter_block2d_bil_first_pass(
-    const uint8_t *src_ptr8,
-    uint16_t *output_ptr,
-    unsigned int src_pixels_per_line,
-    int pixel_step,
-    unsigned int output_height,
-    unsigned int output_width,
+    const uint8_t *src_ptr8, uint16_t *output_ptr,
+    unsigned int src_pixels_per_line, int pixel_step,
+    unsigned int output_height, unsigned int output_width,
     const uint8_t *filter) {
   unsigned int i, j;
   uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src_ptr8);
   for (i = 0; i < output_height; ++i) {
     for (j = 0; j < output_width; ++j) {
-      output_ptr[j] =
-          ROUND_POWER_OF_TWO((int)src_ptr[0] * filter[0] +
-                             (int)src_ptr[pixel_step] * filter[1],
-                             FILTER_BITS);
+      output_ptr[j] = ROUND_POWER_OF_TWO(
+          (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1],
+          FILTER_BITS);
 
       ++src_ptr;
     }
@@ -432,21 +400,17 @@ static void highbd_var_filter_block2d_bil_first_pass(
 }
 
 static void highbd_var_filter_block2d_bil_second_pass(
-    const uint16_t *src_ptr,
-    uint16_t *output_ptr,
-    unsigned int src_pixels_per_line,
-    unsigned int pixel_step,
-    unsigned int output_height,
-    unsigned int output_width,
+    const uint16_t *src_ptr, uint16_t *output_ptr,
+    unsigned int src_pixels_per_line, unsigned int pixel_step,
+    unsigned int output_height, unsigned int output_width,
     const uint8_t *filter) {
-  unsigned int  i, j;
+  unsigned int i, j;
 
   for (i = 0; i < output_height; ++i) {
     for (j = 0; j < output_width; ++j) {
-      output_ptr[j] =
-          ROUND_POWER_OF_TWO((int)src_ptr[0] * filter[0] +
-                             (int)src_ptr[pixel_step] * filter[1],
-                             FILTER_BITS);
+      output_ptr[j] = ROUND_POWER_OF_TWO(
+          (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1],
+          FILTER_BITS);
       ++src_ptr;
     }
 
@@ -455,131 +419,120 @@ static void highbd_var_filter_block2d_bil_second_pass(
   }
 }
 
-#define HIGHBD_SUBPIX_VAR(W, H) \
-uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c( \
-  const uint8_t *src, int  src_stride, \
-  int xoffset, int  yoffset, \
-  const uint8_t *dst, int dst_stride, \
-  uint32_t *sse) { \
-  uint16_t fdata3[(H + 1) * W]; \
-  uint16_t temp2[H * W]; \
-\
-  highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
-  highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
-\
-  return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, dst, \
-                                          dst_stride, sse); \
-} \
-\
-uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c( \
-  const uint8_t *src, int  src_stride, \
-  int xoffset, int  yoffset, \
-  const uint8_t *dst, int dst_stride, \
-  uint32_t *sse) { \
-  uint16_t fdata3[(H + 1) * W]; \
-  uint16_t temp2[H * W]; \
-\
-  highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
-  highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
-\
-  return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
-                                             W, dst, dst_stride, sse); \
-} \
-\
-uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c( \
-  const uint8_t *src, int  src_stride, \
-  int xoffset, int  yoffset, \
-  const uint8_t *dst, int dst_stride, \
-  uint32_t *sse) { \
-  uint16_t fdata3[(H + 1) * W]; \
-  uint16_t temp2[H * W]; \
-\
-  highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
-  highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
-\
-  return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
-                                             W, dst, dst_stride, sse); \
-}
+#define HIGHBD_SUBPIX_VAR(W, H)                                              \
+  uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c(                     \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
+      const uint8_t *dst, int dst_stride, uint32_t *sse) {                   \
+    uint16_t fdata3[(H + 1) * W];                                            \
+    uint16_t temp2[H * W];                                                   \
+                                                                             \
+    highbd_var_filter_block2d_bil_first_pass(                                \
+        src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]);    \
+    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,     \
+                                              bilinear_filters[yoffset]);    \
+                                                                             \
+    return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W,  \
+                                              dst, dst_stride, sse);         \
+  }                                                                          \
+                                                                             \
+  uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c(                    \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
+      const uint8_t *dst, int dst_stride, uint32_t *sse) {                   \
+    uint16_t fdata3[(H + 1) * W];                                            \
+    uint16_t temp2[H * W];                                                   \
+                                                                             \
+    highbd_var_filter_block2d_bil_first_pass(                                \
+        src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]);    \
+    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,     \
+                                              bilinear_filters[yoffset]);    \
+                                                                             \
+    return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+                                               dst, dst_stride, sse);        \
+  }                                                                          \
+                                                                             \
+  uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c(                    \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
+      const uint8_t *dst, int dst_stride, uint32_t *sse) {                   \
+    uint16_t fdata3[(H + 1) * W];                                            \
+    uint16_t temp2[H * W];                                                   \
+                                                                             \
+    highbd_var_filter_block2d_bil_first_pass(                                \
+        src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]);    \
+    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,     \
+                                              bilinear_filters[yoffset]);    \
+                                                                             \
+    return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+                                               dst, dst_stride, sse);        \
+  }
 
-#define HIGHBD_SUBPIX_AVG_VAR(W, H) \
-uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \
-  const uint8_t *src, int  src_stride, \
-  int xoffset, int  yoffset, \
-  const uint8_t *dst, int dst_stride, \
-  uint32_t *sse, \
-  const uint8_t *second_pred) { \
-  uint16_t fdata3[(H + 1) * W]; \
-  uint16_t temp2[H * W]; \
-  DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
-\
-  highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
-  highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
-\
-  vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \
-                           CONVERT_TO_BYTEPTR(temp2), W); \
-\
-  return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, dst, \
-                                          dst_stride, sse); \
-} \
-\
-uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \
-  const uint8_t *src, int  src_stride, \
-  int xoffset, int  yoffset, \
-  const uint8_t *dst, int dst_stride, \
-  uint32_t *sse, \
-  const uint8_t *second_pred) { \
-  uint16_t fdata3[(H + 1) * W]; \
-  uint16_t temp2[H * W]; \
-  DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
-\
-  highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
-  highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
-\
-  vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \
-                           CONVERT_TO_BYTEPTR(temp2), W); \
-\
-  return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), \
-                                             W, dst, dst_stride, sse); \
-} \
-\
-uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \
-  const uint8_t *src, int  src_stride, \
-  int xoffset, int  yoffset, \
-  const uint8_t *dst, int dst_stride, \
-  uint32_t *sse, \
-  const uint8_t *second_pred) { \
-  uint16_t fdata3[(H + 1) * W]; \
-  uint16_t temp2[H * W]; \
-  DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
-\
-  highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
-  highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
-\
-  vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \
-                           CONVERT_TO_BYTEPTR(temp2), W); \
-\
-  return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), \
-                                             W, dst, dst_stride, sse); \
-}
+#define HIGHBD_SUBPIX_AVG_VAR(W, H)                                          \
+  uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c(                 \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
+      const uint8_t *dst, int dst_stride, uint32_t *sse,                     \
+      const uint8_t *second_pred) {                                          \
+    uint16_t fdata3[(H + 1) * W];                                            \
+    uint16_t temp2[H * W];                                                   \
+    DECLARE_ALIGNED(16, uint16_t, temp3[H * W]);                             \
+                                                                             \
+    highbd_var_filter_block2d_bil_first_pass(                                \
+        src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]);    \
+    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,     \
+                                              bilinear_filters[yoffset]);    \
+                                                                             \
+    vpx_highbd_comp_avg_pred(temp3, second_pred, W, H,                       \
+                             CONVERT_TO_BYTEPTR(temp2), W);                  \
+                                                                             \
+    return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W,  \
+                                              dst, dst_stride, sse);         \
+  }                                                                          \
+                                                                             \
+  uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c(                \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
+      const uint8_t *dst, int dst_stride, uint32_t *sse,                     \
+      const uint8_t *second_pred) {                                          \
+    uint16_t fdata3[(H + 1) * W];                                            \
+    uint16_t temp2[H * W];                                                   \
+    DECLARE_ALIGNED(16, uint16_t, temp3[H * W]);                             \
+                                                                             \
+    highbd_var_filter_block2d_bil_first_pass(                                \
+        src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]);    \
+    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,     \
+                                              bilinear_filters[yoffset]);    \
+                                                                             \
+    vpx_highbd_comp_avg_pred(temp3, second_pred, W, H,                       \
+                             CONVERT_TO_BYTEPTR(temp2), W);                  \
+                                                                             \
+    return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+                                               dst, dst_stride, sse);        \
+  }                                                                          \
+                                                                             \
+  uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c(                \
+      const uint8_t *src, int src_stride, int xoffset, int yoffset,          \
+      const uint8_t *dst, int dst_stride, uint32_t *sse,                     \
+      const uint8_t *second_pred) {                                          \
+    uint16_t fdata3[(H + 1) * W];                                            \
+    uint16_t temp2[H * W];                                                   \
+    DECLARE_ALIGNED(16, uint16_t, temp3[H * W]);                             \
+                                                                             \
+    highbd_var_filter_block2d_bil_first_pass(                                \
+        src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]);    \
+    highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W,     \
+                                              bilinear_filters[yoffset]);    \
+                                                                             \
+    vpx_highbd_comp_avg_pred(temp3, second_pred, W, H,                       \
+                             CONVERT_TO_BYTEPTR(temp2), W);                  \
+                                                                             \
+    return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+                                               dst, dst_stride, sse);        \
+  }
 
 /* All three forms of the variance are available in the same sizes. */
 #define HIGHBD_VARIANCES(W, H) \
-    HIGHBD_VAR(W, H) \
-    HIGHBD_SUBPIX_VAR(W, H) \
-    HIGHBD_SUBPIX_AVG_VAR(W, H)
+  HIGHBD_VAR(W, H)             \
+  HIGHBD_SUBPIX_VAR(W, H)      \
+  HIGHBD_SUBPIX_AVG_VAR(W, H)
 
+/* clang-format off */
 HIGHBD_VARIANCES(64, 64)
 HIGHBD_VARIANCES(64, 32)
 HIGHBD_VARIANCES(32, 64)
@@ -601,6 +554,7 @@ HIGHBD_MSE(16, 16)
 HIGHBD_MSE(16, 8)
 HIGHBD_MSE(8, 16)
 HIGHBD_MSE(8, 8)
+/* clang-format on */
 
 void vpx_highbd_comp_avg_pred(uint16_t *comp_pred, const uint8_t *pred8,
                               int width, int height, const uint8_t *ref8,
diff --git a/vpx_dsp/variance.h b/vpx_dsp/variance.h
index 17d8abc64ab2557fdb51edd872694a41af7c9141..6a3202054e182861c2cff7a4cb7a4d41ecee19e8 100644
--- a/vpx_dsp/variance.h
+++ b/vpx_dsp/variance.h
@@ -22,15 +22,15 @@ extern "C" {
 #define FILTER_BITS 7
 #define FILTER_WEIGHT 128
 
-typedef unsigned int(*vpx_sad_fn_t)(const uint8_t *a, int a_stride,
-                                    const uint8_t *b_ptr, int b_stride);
+typedef unsigned int (*vpx_sad_fn_t)(const uint8_t *a, int a_stride,
+                                     const uint8_t *b_ptr, int b_stride);
 
-typedef unsigned int(*vpx_sad_avg_fn_t)(const uint8_t *a_ptr, int a_stride,
-                                        const uint8_t *b_ptr, int b_stride,
-                                        const uint8_t *second_pred);
+typedef unsigned int (*vpx_sad_avg_fn_t)(const uint8_t *a_ptr, int a_stride,
+                                         const uint8_t *b_ptr, int b_stride,
+                                         const uint8_t *second_pred);
 
-typedef void (*vp8_copy32xn_fn_t)(const uint8_t *a, int a_stride,
-                                  uint8_t *b, int b_stride, int n);
+typedef void (*vp8_copy32xn_fn_t)(const uint8_t *a, int a_stride, uint8_t *b,
+                                  int b_stride, int n);
 
 typedef void (*vpx_sad_multi_fn_t)(const uint8_t *a, int a_stride,
                                    const uint8_t *b, int b_stride,
@@ -38,8 +38,7 @@ typedef void (*vpx_sad_multi_fn_t)(const uint8_t *a, int a_stride,
 
 typedef void (*vpx_sad_multi_d_fn_t)(const uint8_t *a, int a_stride,
                                      const uint8_t *const b_array[],
-                                     int b_stride,
-                                     unsigned int *sad_array);
+                                     int b_stride, unsigned int *sad_array);
 
 typedef unsigned int (*vpx_variance_fn_t)(const uint8_t *a, int a_stride,
                                           const uint8_t *b, int b_stride,
@@ -50,23 +49,20 @@ typedef unsigned int (*vpx_subpixvariance_fn_t)(const uint8_t *a, int a_stride,
                                                 const uint8_t *b, int b_stride,
                                                 unsigned int *sse);
 
-typedef unsigned int (*vpx_subp_avg_variance_fn_t)(const uint8_t *a_ptr,
-                                                   int a_stride,
-                                                   int xoffset, int yoffset,
-                                                   const uint8_t *b_ptr,
-                                                   int b_stride,
-                                                   unsigned int *sse,
-                                                   const uint8_t *second_pred);
+typedef unsigned int (*vpx_subp_avg_variance_fn_t)(
+    const uint8_t *a_ptr, int a_stride, int xoffset, int yoffset,
+    const uint8_t *b_ptr, int b_stride, unsigned int *sse,
+    const uint8_t *second_pred);
 #if CONFIG_VP10
 typedef struct vpx_variance_vtable {
-  vpx_sad_fn_t               sdf;
-  vpx_sad_avg_fn_t           sdaf;
-  vpx_variance_fn_t          vf;
-  vpx_subpixvariance_fn_t    svf;
+  vpx_sad_fn_t sdf;
+  vpx_sad_avg_fn_t sdaf;
+  vpx_variance_fn_t vf;
+  vpx_subpixvariance_fn_t svf;
   vpx_subp_avg_variance_fn_t svaf;
-  vpx_sad_multi_fn_t         sdx3f;
-  vpx_sad_multi_fn_t         sdx8f;
-  vpx_sad_multi_d_fn_t       sdx4df;
+  vpx_sad_multi_fn_t sdx3f;
+  vpx_sad_multi_fn_t sdx8f;
+  vpx_sad_multi_d_fn_t sdx4df;
 } vpx_variance_fn_ptr_t;
 #endif  // CONFIG_VP10
 
diff --git a/vpx_dsp/vpx_convolve.c b/vpx_dsp/vpx_convolve.c
index 16942bd128a19eafa81286de2233f004929d16f9..8d78dfbfa4b2a186bdd40b6d2b26d4fc6958d9d8 100644
--- a/vpx_dsp/vpx_convolve.c
+++ b/vpx_dsp/vpx_convolve.c
@@ -21,8 +21,8 @@
 
 static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride,
                            uint8_t *dst, ptrdiff_t dst_stride,
-                           const InterpKernel *x_filters,
-                           int x0_q4, int x_step_q4, int w, int h) {
+                           const InterpKernel *x_filters, int x0_q4,
+                           int x_step_q4, int w, int h) {
   int x, y;
   src -= SUBPEL_TAPS / 2 - 1;
   for (y = 0; y < h; ++y) {
@@ -31,8 +31,7 @@ static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride,
       const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
       const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
       int k, sum = 0;
-      for (k = 0; k < SUBPEL_TAPS; ++k)
-        sum += src_x[k] * x_filter[k];
+      for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
       dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
       x_q4 += x_step_q4;
     }
@@ -43,8 +42,8 @@ static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride,
 
 static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride,
                                uint8_t *dst, ptrdiff_t dst_stride,
-                               const InterpKernel *x_filters,
-                               int x0_q4, int x_step_q4, int w, int h) {
+                               const InterpKernel *x_filters, int x0_q4,
+                               int x_step_q4, int w, int h) {
   int x, y;
   src -= SUBPEL_TAPS / 2 - 1;
   for (y = 0; y < h; ++y) {
@@ -53,10 +52,9 @@ static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride,
       const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
       const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
       int k, sum = 0;
-      for (k = 0; k < SUBPEL_TAPS; ++k)
-        sum += src_x[k] * x_filter[k];
-      dst[x] = ROUND_POWER_OF_TWO(dst[x] +
-          clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
+      for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
+      dst[x] = ROUND_POWER_OF_TWO(
+          dst[x] + clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
       x_q4 += x_step_q4;
     }
     src += src_stride;
@@ -66,8 +64,8 @@ static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride,
 
 static void convolve_vert(const uint8_t *src, ptrdiff_t src_stride,
                           uint8_t *dst, ptrdiff_t dst_stride,
-                          const InterpKernel *y_filters,
-                          int y0_q4, int y_step_q4, int w, int h) {
+                          const InterpKernel *y_filters, int y0_q4,
+                          int y_step_q4, int w, int h) {
   int x, y;
   src -= src_stride * (SUBPEL_TAPS / 2 - 1);
 
@@ -89,8 +87,8 @@ static void convolve_vert(const uint8_t *src, ptrdiff_t src_stride,
 
 static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
-                              const InterpKernel *y_filters,
-                              int y0_q4, int y_step_q4, int w, int h) {
+                              const InterpKernel *y_filters, int y0_q4,
+                              int y_step_q4, int w, int h) {
   int x, y;
   src -= src_stride * (SUBPEL_TAPS / 2 - 1);
 
@@ -102,8 +100,10 @@ static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride,
       int k, sum = 0;
       for (k = 0; k < SUBPEL_TAPS; ++k)
         sum += src_y[k * src_stride] * y_filter[k];
-      dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] +
-          clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
+      dst[y * dst_stride] = ROUND_POWER_OF_TWO(
+          dst[y * dst_stride] +
+              clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)),
+          1);
       y_q4 += y_step_q4;
     }
     ++src;
@@ -111,13 +111,11 @@ static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride,
   }
 }
 
-static void convolve(const uint8_t *src, ptrdiff_t src_stride,
-                     uint8_t *dst, ptrdiff_t dst_stride,
-                     const InterpKernel *const x_filters,
+static void convolve(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                     ptrdiff_t dst_stride, const InterpKernel *const x_filters,
                      int x0_q4, int x_step_q4,
-                     const InterpKernel *const y_filters,
-                     int y0_q4, int y_step_q4,
-                     int w, int h) {
+                     const InterpKernel *const y_filters, int y0_q4,
+                     int y_step_q4, int w, int h) {
   // Note: Fixed size intermediate buffer, temp, places limits on parameters.
   // 2d filtering proceeds in 2 steps:
   //   (1) Interpolate horizontally into an intermediate buffer, temp.
@@ -132,7 +130,7 @@ static void convolve(const uint8_t *src, ptrdiff_t src_stride,
   // --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
   uint8_t temp[135 * 64];
   int intermediate_height =
-          (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS;
+      (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS;
 
   assert(w <= 64);
   assert(h <= 64);
@@ -158,67 +156,66 @@ static int get_filter_offset(const int16_t *f, const InterpKernel *base) {
 void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
                            uint8_t *dst, ptrdiff_t dst_stride,
                            const int16_t *filter_x, int x_step_q4,
-                           const int16_t *filter_y, int y_step_q4,
-                           int w, int h) {
+                           const int16_t *filter_y, int y_step_q4, int w,
+                           int h) {
   const InterpKernel *const filters_x = get_filter_base(filter_x);
   const int x0_q4 = get_filter_offset(filter_x, filters_x);
 
   (void)filter_y;
   (void)y_step_q4;
 
-  convolve_horiz(src, src_stride, dst, dst_stride, filters_x,
-                 x0_q4, x_step_q4, w, h);
+  convolve_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4, x_step_q4,
+                 w, h);
 }
 
 void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
                                uint8_t *dst, ptrdiff_t dst_stride,
                                const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4,
-                               int w, int h) {
+                               const int16_t *filter_y, int y_step_q4, int w,
+                               int h) {
   const InterpKernel *const filters_x = get_filter_base(filter_x);
   const int x0_q4 = get_filter_offset(filter_x, filters_x);
 
   (void)filter_y;
   (void)y_step_q4;
 
-  convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x,
-                     x0_q4, x_step_q4, w, h);
+  convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4,
+                     x_step_q4, w, h);
 }
 
 void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                           uint8_t *dst, ptrdiff_t dst_stride,
                           const int16_t *filter_x, int x_step_q4,
-                          const int16_t *filter_y, int y_step_q4,
-                          int w, int h) {
+                          const int16_t *filter_y, int y_step_q4, int w,
+                          int h) {
   const InterpKernel *const filters_y = get_filter_base(filter_y);
   const int y0_q4 = get_filter_offset(filter_y, filters_y);
 
   (void)filter_x;
   (void)x_step_q4;
 
-  convolve_vert(src, src_stride, dst, dst_stride, filters_y,
-                y0_q4, y_step_q4, w, h);
+  convolve_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4, y_step_q4,
+                w, h);
 }
 
 void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4,
-                              int w, int h) {
+                              const int16_t *filter_y, int y_step_q4, int w,
+                              int h) {
   const InterpKernel *const filters_y = get_filter_base(filter_y);
   const int y0_q4 = get_filter_offset(filter_y, filters_y);
 
   (void)filter_x;
   (void)x_step_q4;
 
-  convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y,
-                    y0_q4, y_step_q4, w, h);
+  convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4,
+                    y_step_q4, w, h);
 }
 
-void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
-                     uint8_t *dst, ptrdiff_t dst_stride,
-                     const int16_t *filter_x, int x_step_q4,
-                     const int16_t *filter_y, int y_step_q4,
+void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                     ptrdiff_t dst_stride, const int16_t *filter_x,
+                     int x_step_q4, const int16_t *filter_y, int y_step_q4,
                      int w, int h) {
   const InterpKernel *const filters_x = get_filter_base(filter_x);
   const int x0_q4 = get_filter_offset(filter_x, filters_x);
@@ -226,35 +223,34 @@ void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
   const InterpKernel *const filters_y = get_filter_base(filter_y);
   const int y0_q4 = get_filter_offset(filter_y, filters_y);
 
-  convolve(src, src_stride, dst, dst_stride,
-           filters_x, x0_q4, x_step_q4,
+  convolve(src, src_stride, dst, dst_stride, filters_x, x0_q4, x_step_q4,
            filters_y, y0_q4, y_step_q4, w, h);
 }
 
-void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
-                         uint8_t *dst, ptrdiff_t dst_stride,
-                         const int16_t *filter_x, int x_step_q4,
-                         const int16_t *filter_y, int y_step_q4,
+void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                         ptrdiff_t dst_stride, const int16_t *filter_x,
+                         int x_step_q4, const int16_t *filter_y, int y_step_q4,
                          int w, int h) {
   /* Fixed size intermediate buffer places limits on parameters. */
   DECLARE_ALIGNED(16, uint8_t, temp[64 * 64]);
   assert(w <= 64);
   assert(h <= 64);
 
-  vpx_convolve8_c(src, src_stride, temp, 64,
-                  filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+  vpx_convolve8_c(src, src_stride, temp, 64, filter_x, x_step_q4, filter_y,
+                  y_step_q4, w, h);
   vpx_convolve_avg_c(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h);
 }
 
-void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride,
-                         uint8_t *dst, ptrdiff_t dst_stride,
-                         const int16_t *filter_x, int filter_x_stride,
-                         const int16_t *filter_y, int filter_y_stride,
-                         int w, int h) {
+void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                         ptrdiff_t dst_stride, const int16_t *filter_x,
+                         int filter_x_stride, const int16_t *filter_y,
+                         int filter_y_stride, int w, int h) {
   int r;
 
-  (void)filter_x;  (void)filter_x_stride;
-  (void)filter_y;  (void)filter_y_stride;
+  (void)filter_x;
+  (void)filter_x_stride;
+  (void)filter_y;
+  (void)filter_y_stride;
 
   for (r = h; r > 0; --r) {
     memcpy(dst, src, w);
@@ -263,47 +259,44 @@ void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride,
   }
 }
 
-void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride,
-                        uint8_t *dst, ptrdiff_t dst_stride,
-                        const int16_t *filter_x, int filter_x_stride,
-                        const int16_t *filter_y, int filter_y_stride,
-                        int w, int h) {
+void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                        ptrdiff_t dst_stride, const int16_t *filter_x,
+                        int filter_x_stride, const int16_t *filter_y,
+                        int filter_y_stride, int w, int h) {
   int x, y;
 
-  (void)filter_x;  (void)filter_x_stride;
-  (void)filter_y;  (void)filter_y_stride;
+  (void)filter_x;
+  (void)filter_x_stride;
+  (void)filter_y;
+  (void)filter_y_stride;
 
   for (y = 0; y < h; ++y) {
-    for (x = 0; x < w; ++x)
-      dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1);
+    for (x = 0; x < w; ++x) dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1);
 
     src += src_stride;
     dst += dst_stride;
   }
 }
 
-void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
-                        uint8_t *dst, ptrdiff_t dst_stride,
-                        const int16_t *filter_x, int x_step_q4,
-                        const int16_t *filter_y, int y_step_q4,
+void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                        ptrdiff_t dst_stride, const int16_t *filter_x,
+                        int x_step_q4, const int16_t *filter_y, int y_step_q4,
                         int w, int h) {
   vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                         filter_y, y_step_q4, w, h);
 }
 
-void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride,
-                       uint8_t *dst, ptrdiff_t dst_stride,
-                       const int16_t *filter_x, int x_step_q4,
-                       const int16_t *filter_y, int y_step_q4,
+void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                       ptrdiff_t dst_stride, const int16_t *filter_x,
+                       int x_step_q4, const int16_t *filter_y, int y_step_q4,
                        int w, int h) {
   vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                        filter_y, y_step_q4, w, h);
 }
 
-void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride,
-                     uint8_t *dst, ptrdiff_t dst_stride,
-                     const int16_t *filter_x, int x_step_q4,
-                     const int16_t *filter_y, int y_step_q4,
+void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                     ptrdiff_t dst_stride, const int16_t *filter_x,
+                     int x_step_q4, const int16_t *filter_y, int y_step_q4,
                      int w, int h) {
   vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                   filter_y, y_step_q4, w, h);
@@ -312,8 +305,8 @@ void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride,
 void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int x_step_q4,
-                            const int16_t *filter_y, int y_step_q4,
-                            int w, int h) {
+                            const int16_t *filter_y, int y_step_q4, int w,
+                            int h) {
   vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x,
                             x_step_q4, filter_y, y_step_q4, w, h);
 }
@@ -321,17 +314,16 @@ void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
 void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                            uint8_t *dst, ptrdiff_t dst_stride,
                            const int16_t *filter_x, int x_step_q4,
-                           const int16_t *filter_y, int y_step_q4,
-                           int w, int h) {
+                           const int16_t *filter_y, int y_step_q4, int w,
+                           int h) {
   vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
                            x_step_q4, filter_y, y_step_q4, w, h);
 }
 
-void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride,
-                     uint8_t *dst, ptrdiff_t dst_stride,
-                     const int16_t *filter_x, int x_step_q4,
-                     const int16_t *filter_y, int y_step_q4,
-                     int w, int h) {
+void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                         ptrdiff_t dst_stride, const int16_t *filter_x,
+                         int x_step_q4, const int16_t *filter_y, int y_step_q4,
+                         int w, int h) {
   vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4,
                       filter_y, y_step_q4, w, h);
 }
@@ -339,9 +331,8 @@ void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride,
 #if CONFIG_VPX_HIGHBITDEPTH
 static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
                                   uint8_t *dst8, ptrdiff_t dst_stride,
-                                  const InterpKernel *x_filters,
-                                  int x0_q4, int x_step_q4,
-                                  int w, int h, int bd) {
+                                  const InterpKernel *x_filters, int x0_q4,
+                                  int x_step_q4, int w, int h, int bd) {
   int x, y;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
@@ -352,8 +343,7 @@ static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
       const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
       const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
       int k, sum = 0;
-      for (k = 0; k < SUBPEL_TAPS; ++k)
-        sum += src_x[k] * x_filter[k];
+      for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
       dst[x] = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
       x_q4 += x_step_q4;
     }
@@ -364,9 +354,8 @@ static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
 
 static void highbd_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride,
                                       uint8_t *dst8, ptrdiff_t dst_stride,
-                                      const InterpKernel *x_filters,
-                                      int x0_q4, int x_step_q4,
-                                      int w, int h, int bd) {
+                                      const InterpKernel *x_filters, int x0_q4,
+                                      int x_step_q4, int w, int h, int bd) {
   int x, y;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
@@ -377,10 +366,10 @@ static void highbd_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride,
       const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
       const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
       int k, sum = 0;
-      for (k = 0; k < SUBPEL_TAPS; ++k)
-        sum += src_x[k] * x_filter[k];
-      dst[x] = ROUND_POWER_OF_TWO(dst[x] +
-          clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), 1);
+      for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
+      dst[x] = ROUND_POWER_OF_TWO(
+          dst[x] + clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd),
+          1);
       x_q4 += x_step_q4;
     }
     src += src_stride;
@@ -390,9 +379,8 @@ static void highbd_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride,
 
 static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride,
                                  uint8_t *dst8, ptrdiff_t dst_stride,
-                                 const InterpKernel *y_filters,
-                                 int y0_q4, int y_step_q4, int w, int h,
-                                 int bd) {
+                                 const InterpKernel *y_filters, int y0_q4,
+                                 int y_step_q4, int w, int h, int bd) {
   int x, y;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
@@ -405,8 +393,8 @@ static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride,
       int k, sum = 0;
       for (k = 0; k < SUBPEL_TAPS; ++k)
         sum += src_y[k * src_stride] * y_filter[k];
-      dst[y * dst_stride] = clip_pixel_highbd(
-          ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
+      dst[y * dst_stride] =
+          clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
       y_q4 += y_step_q4;
     }
     ++src;
@@ -416,9 +404,8 @@ static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride,
 
 static void highbd_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride,
                                      uint8_t *dst8, ptrdiff_t dst_stride,
-                                     const InterpKernel *y_filters,
-                                     int y0_q4, int y_step_q4, int w, int h,
-                                     int bd) {
+                                     const InterpKernel *y_filters, int y0_q4,
+                                     int y_step_q4, int w, int h, int bd) {
   int x, y;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
@@ -431,8 +418,10 @@ static void highbd_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride,
       int k, sum = 0;
       for (k = 0; k < SUBPEL_TAPS; ++k)
         sum += src_y[k * src_stride] * y_filter[k];
-      dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] +
-          clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), 1);
+      dst[y * dst_stride] = ROUND_POWER_OF_TWO(
+          dst[y * dst_stride] +
+              clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd),
+          1);
       y_q4 += y_step_q4;
     }
     ++src;
@@ -442,11 +431,9 @@ static void highbd_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride,
 
 static void highbd_convolve(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
-                            const InterpKernel *const x_filters,
-                            int x0_q4, int x_step_q4,
-                            const InterpKernel *const y_filters,
-                            int y0_q4, int y_step_q4,
-                            int w, int h, int bd) {
+                            const InterpKernel *const x_filters, int x0_q4,
+                            int x_step_q4, const InterpKernel *const y_filters,
+                            int y0_q4, int y_step_q4, int w, int h, int bd) {
   // Note: Fixed size intermediate buffer, temp, places limits on parameters.
   // 2d filtering proceeds in 2 steps:
   //   (1) Interpolate horizontally into an intermediate buffer, temp.
@@ -461,35 +448,33 @@ static void highbd_convolve(const uint8_t *src, ptrdiff_t src_stride,
   // --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
   uint16_t temp[64 * 135];
   int intermediate_height =
-          (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS;
+      (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS;
 
   assert(w <= 64);
   assert(h <= 64);
   assert(y_step_q4 <= 32);
   assert(x_step_q4 <= 32);
 
-  highbd_convolve_horiz(src - src_stride * (SUBPEL_TAPS / 2 - 1),
-                        src_stride, CONVERT_TO_BYTEPTR(temp), 64,
-                        x_filters, x0_q4, x_step_q4, w,
-                        intermediate_height, bd);
+  highbd_convolve_horiz(src - src_stride * (SUBPEL_TAPS / 2 - 1), src_stride,
+                        CONVERT_TO_BYTEPTR(temp), 64, x_filters, x0_q4,
+                        x_step_q4, w, intermediate_height, bd);
   highbd_convolve_vert(CONVERT_TO_BYTEPTR(temp) + 64 * (SUBPEL_TAPS / 2 - 1),
-                       64, dst, dst_stride, y_filters, y0_q4, y_step_q4,
-                       w, h, bd);
+                       64, dst, dst_stride, y_filters, y0_q4, y_step_q4, w, h,
+                       bd);
 }
 
-
 void vpx_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
                                   uint8_t *dst, ptrdiff_t dst_stride,
                                   const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4,
-                                  int w, int h, int bd) {
+                                  const int16_t *filter_y, int y_step_q4, int w,
+                                  int h, int bd) {
   const InterpKernel *const filters_x = get_filter_base(filter_x);
   const int x0_q4 = get_filter_offset(filter_x, filters_x);
   (void)filter_y;
   (void)y_step_q4;
 
-  highbd_convolve_horiz(src, src_stride, dst, dst_stride, filters_x,
-                        x0_q4, x_step_q4, w, h, bd);
+  highbd_convolve_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4,
+                        x_step_q4, w, h, bd);
 }
 
 void vpx_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
@@ -502,22 +487,22 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
   (void)filter_y;
   (void)y_step_q4;
 
-  highbd_convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x,
-                            x0_q4, x_step_q4, w, h, bd);
+  highbd_convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4,
+                            x_step_q4, w, h, bd);
 }
 
 void vpx_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
                                  uint8_t *dst, ptrdiff_t dst_stride,
                                  const int16_t *filter_x, int x_step_q4,
-                                 const int16_t *filter_y, int y_step_q4,
-                                 int w, int h, int bd) {
+                                 const int16_t *filter_y, int y_step_q4, int w,
+                                 int h, int bd) {
   const InterpKernel *const filters_y = get_filter_base(filter_y);
   const int y0_q4 = get_filter_offset(filter_y, filters_y);
   (void)filter_x;
   (void)x_step_q4;
 
-  highbd_convolve_vert(src, src_stride, dst, dst_stride, filters_y,
-                       y0_q4, y_step_q4, w, h, bd);
+  highbd_convolve_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4,
+                       y_step_q4, w, h, bd);
 }
 
 void vpx_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
@@ -530,31 +515,30 @@ void vpx_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
   (void)filter_x;
   (void)x_step_q4;
 
-  highbd_convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y,
-                           y0_q4, y_step_q4, w, h, bd);
+  highbd_convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4,
+                           y_step_q4, w, h, bd);
 }
 
 void vpx_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
                             uint8_t *dst, ptrdiff_t dst_stride,
                             const int16_t *filter_x, int x_step_q4,
-                            const int16_t *filter_y, int y_step_q4,
-                            int w, int h, int bd) {
+                            const int16_t *filter_y, int y_step_q4, int w,
+                            int h, int bd) {
   const InterpKernel *const filters_x = get_filter_base(filter_x);
   const int x0_q4 = get_filter_offset(filter_x, filters_x);
 
   const InterpKernel *const filters_y = get_filter_base(filter_y);
   const int y0_q4 = get_filter_offset(filter_y, filters_y);
 
-  highbd_convolve(src, src_stride, dst, dst_stride,
-                  filters_x, x0_q4, x_step_q4,
+  highbd_convolve(src, src_stride, dst, dst_stride, filters_x, x0_q4, x_step_q4,
                   filters_y, y0_q4, y_step_q4, w, h, bd);
 }
 
 void vpx_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
                                 uint8_t *dst, ptrdiff_t dst_stride,
                                 const int16_t *filter_x, int x_step_q4,
-                                const int16_t *filter_y, int y_step_q4,
-                                int w, int h, int bd) {
+                                const int16_t *filter_y, int y_step_q4, int w,
+                                int h, int bd) {
   // Fixed size intermediate buffer places limits on parameters.
   DECLARE_ALIGNED(16, uint16_t, temp[64 * 64]);
   assert(w <= 64);
@@ -562,8 +546,8 @@ void vpx_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
 
   vpx_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), 64,
                          filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd);
-  vpx_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride,
-                            NULL, 0, NULL, 0, w, h, bd);
+  vpx_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride, NULL,
+                            0, NULL, 0, w, h, bd);
 }
 
 void vpx_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride,
diff --git a/vpx_dsp/vpx_convolve.h b/vpx_dsp/vpx_convolve.h
index aadffa8a1f89eb682d5c5f00654e1e5e99caf4d3..00ba535a26a69342e0e5b7526f691e2a1b6f80fb 100644
--- a/vpx_dsp/vpx_convolve.h
+++ b/vpx_dsp/vpx_convolve.h
@@ -20,8 +20,8 @@ extern "C" {
 typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
                               uint8_t *dst, ptrdiff_t dst_stride,
                               const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4,
-                              int w, int h);
+                              const int16_t *filter_y, int y_step_q4, int w,
+                              int h);
 
 #if CONFIG_VPX_HIGHBITDEPTH
 typedef void (*highbd_convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
diff --git a/vpx_dsp/vpx_dsp_common.h b/vpx_dsp/vpx_dsp_common.h
index eaaa4a3e5e1163a33ab493242fcf920cc669f0c2..d12b4efc02f35d35e4e9a155669d9aadc7204317 100644
--- a/vpx_dsp/vpx_dsp_common.h
+++ b/vpx_dsp/vpx_dsp_common.h
@@ -53,12 +53,9 @@ static INLINE double fclamp(double value, double low, double high) {
 static INLINE uint16_t clip_pixel_highbd(int val, int bd) {
   switch (bd) {
     case 8:
-    default:
-      return (uint16_t)clamp(val, 0, 255);
-    case 10:
-      return (uint16_t)clamp(val, 0, 1023);
-    case 12:
-      return (uint16_t)clamp(val, 0, 4095);
+    default: return (uint16_t)clamp(val, 0, 255);
+    case 10: return (uint16_t)clamp(val, 0, 1023);
+    case 12: return (uint16_t)clamp(val, 0, 4095);
   }
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vpx_dsp/vpx_dsp_rtcd.c b/vpx_dsp/vpx_dsp_rtcd.c
index 5fe27b614bdcd7be4df2ea700f16190d1258ab27..030c456d391777b0ed3b365f7a13f81648a09a5d 100644
--- a/vpx_dsp/vpx_dsp_rtcd.c
+++ b/vpx_dsp/vpx_dsp_rtcd.c
@@ -12,6 +12,4 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_ports/vpx_once.h"
 
-void vpx_dsp_rtcd() {
-  once(setup_rtcd_internal);
-}
+void vpx_dsp_rtcd() { once(setup_rtcd_internal); }
diff --git a/vpx_dsp/vpx_filter.h b/vpx_dsp/vpx_filter.h
index 2617febf3b3d506bf77b90df47f58ed30b09766c..6cea251bccaad06ec3982df06eca70fafc031c3e 100644
--- a/vpx_dsp/vpx_filter.h
+++ b/vpx_dsp/vpx_filter.h
@@ -13,7 +13,6 @@
 
 #include "vpx/vpx_integer.h"
 
-
 #ifdef __cplusplus
 extern "C" {
 #endif
diff --git a/vpx_dsp/x86/avg_intrin_sse2.c b/vpx_dsp/x86/avg_intrin_sse2.c
index f9af6cf97490237263b0ea6a02d514e7c29c7a4a..b0a104bad066a3c2272fae7eca40e390d8b32053 100644
--- a/vpx_dsp/x86/avg_intrin_sse2.c
+++ b/vpx_dsp/x86/avg_intrin_sse2.c
@@ -16,7 +16,7 @@
 void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
                          int *min, int *max) {
   __m128i u0, s0, d0, diff, maxabsdiff, minabsdiff, negdiff, absdiff0, absdiff;
-  u0  = _mm_setzero_si128();
+  u0 = _mm_setzero_si128();
   // Row 0
   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
   d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d)), u0);
@@ -94,7 +94,7 @@ void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
 unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) {
   __m128i s0, s1, u0;
   unsigned int avg = 0;
-  u0  = _mm_setzero_si128();
+  u0 = _mm_setzero_si128();
   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
   s0 = _mm_adds_epu16(s0, s1);
@@ -121,7 +121,7 @@ unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) {
 unsigned int vpx_avg_4x4_sse2(const uint8_t *s, int p) {
   __m128i s0, s1, u0;
   unsigned int avg = 0;
-  u0  = _mm_setzero_si128();
+  u0 = _mm_setzero_si128();
   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
   s0 = _mm_adds_epu16(s0, s1);
@@ -248,8 +248,8 @@ void vpx_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride,
                              int16_t *coeff) {
   int idx;
   for (idx = 0; idx < 4; ++idx) {
-    int16_t const *src_ptr = src_diff + (idx >> 1) * 8 * src_stride
-                                + (idx & 0x01) * 8;
+    int16_t const *src_ptr =
+        src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
     vpx_hadamard_8x8_sse2(src_ptr, src_stride, coeff + idx * 64);
   }
 
@@ -309,7 +309,7 @@ int vpx_satd_sse2(const int16_t *coeff, int length) {
   return _mm_cvtsi128_si32(accum);
 }
 
-void vpx_int_pro_row_sse2(int16_t *hbuf, uint8_t const*ref,
+void vpx_int_pro_row_sse2(int16_t *hbuf, uint8_t const *ref,
                           const int ref_stride, const int height) {
   int idx;
   __m128i zero = _mm_setzero_si128();
@@ -378,8 +378,7 @@ int16_t vpx_int_pro_col_sse2(uint8_t const *ref, const int width) {
   return _mm_extract_epi16(s0, 0);
 }
 
-int vpx_vector_var_sse2(int16_t const *ref, int16_t const *src,
-                        const int bwl) {
+int vpx_vector_var_sse2(int16_t const *ref, int16_t const *src, const int bwl) {
   int idx;
   int width = 4 << bwl;
   int16_t mean;
@@ -398,23 +397,23 @@ int vpx_vector_var_sse2(int16_t const *ref, int16_t const *src,
     diff = _mm_subs_epi16(v0, v1);
 
     sum = _mm_add_epi16(sum, diff);
-    v0  = _mm_madd_epi16(diff, diff);
+    v0 = _mm_madd_epi16(diff, diff);
     sse = _mm_add_epi32(sse, v0);
 
     ref += 8;
     src += 8;
   }
 
-  v0  = _mm_srli_si128(sum, 8);
+  v0 = _mm_srli_si128(sum, 8);
   sum = _mm_add_epi16(sum, v0);
-  v0  = _mm_srli_epi64(sum, 32);
+  v0 = _mm_srli_epi64(sum, 32);
   sum = _mm_add_epi16(sum, v0);
-  v0  = _mm_srli_epi32(sum, 16);
+  v0 = _mm_srli_epi32(sum, 16);
   sum = _mm_add_epi16(sum, v0);
 
-  v1  = _mm_srli_si128(sse, 8);
+  v1 = _mm_srli_si128(sse, 8);
   sse = _mm_add_epi32(sse, v1);
-  v1  = _mm_srli_epi64(sse, 32);
+  v1 = _mm_srli_epi64(sse, 32);
   sse = _mm_add_epi32(sse, v1);
 
   mean = _mm_extract_epi16(sum, 0);
diff --git a/vpx_dsp/x86/convolve.h b/vpx_dsp/x86/convolve.h
index d15c67d57d74692fd10a2f36e802b24499740eb2..75b55cd626023fb8b2ae3cd7e759e0058f61ab2c 100644
--- a/vpx_dsp/x86/convolve.h
+++ b/vpx_dsp/x86/convolve.h
@@ -16,275 +16,195 @@
 #include "vpx/vpx_integer.h"
 #include "vpx_ports/mem.h"
 
-typedef void filter8_1dfunction (
-  const uint8_t *src_ptr,
-  ptrdiff_t src_pitch,
-  uint8_t *output_ptr,
-  ptrdiff_t out_pitch,
-  uint32_t output_height,
-  const int16_t *filter
-);
+typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch,
+                                uint8_t *output_ptr, ptrdiff_t out_pitch,
+                                uint32_t output_height, const int16_t *filter);
 
-#define FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \
-  void vpx_convolve8_##name##_##opt(const uint8_t *src, ptrdiff_t src_stride, \
-                                    uint8_t *dst, ptrdiff_t dst_stride, \
-                                    const int16_t *filter_x, int x_step_q4, \
-                                    const int16_t *filter_y, int y_step_q4, \
-                                    int w, int h) { \
-  assert(filter[3] != 128); \
-  assert(step_q4 == 16); \
-  if (filter[0] || filter[1] || filter[2]) { \
-    while (w >= 16) { \
-      vpx_filter_block1d16_##dir##8_##avg##opt(src_start, \
-                                               src_stride, \
-                                               dst, \
-                                               dst_stride, \
-                                               h, \
-                                               filter); \
-      src += 16; \
-      dst += 16; \
-      w -= 16; \
-    } \
-    while (w >= 8) { \
-      vpx_filter_block1d8_##dir##8_##avg##opt(src_start, \
-                                              src_stride, \
-                                              dst, \
-                                              dst_stride, \
-                                              h, \
-                                              filter); \
-      src += 8; \
-      dst += 8; \
-      w -= 8; \
-    } \
-    while (w >= 4) { \
-      vpx_filter_block1d4_##dir##8_##avg##opt(src_start, \
-                                              src_stride, \
-                                              dst, \
-                                              dst_stride, \
-                                              h, \
-                                              filter); \
-      src += 4; \
-      dst += 4; \
-      w -= 4; \
-    } \
-  } else { \
-    while (w >= 16) { \
-      vpx_filter_block1d16_##dir##2_##avg##opt(src, \
-                                               src_stride, \
-                                               dst, \
-                                               dst_stride, \
-                                               h, \
-                                               filter); \
-      src += 16; \
-      dst += 16; \
-      w -= 16; \
-    } \
-    while (w >= 8) { \
-      vpx_filter_block1d8_##dir##2_##avg##opt(src, \
-                                              src_stride, \
-                                              dst, \
-                                              dst_stride, \
-                                              h, \
-                                              filter); \
-      src += 8; \
-      dst += 8; \
-      w -= 8; \
-    } \
-    while (w >= 4) { \
-      vpx_filter_block1d4_##dir##2_##avg##opt(src, \
-                                              src_stride, \
-                                              dst, \
-                                              dst_stride, \
-                                              h, \
-                                              filter); \
-      src += 4; \
-      dst += 4; \
-      w -= 4; \
-    } \
-  } \
-}
+#define FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt)         \
+  void vpx_convolve8_##name##_##opt(                                         \
+      const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                \
+      ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,          \
+      const int16_t *filter_y, int y_step_q4, int w, int h) {                \
+    assert(filter[3] != 128);                                                \
+    assert(step_q4 == 16);                                                   \
+    if (filter[0] || filter[1] || filter[2]) {                               \
+      while (w >= 16) {                                                      \
+        vpx_filter_block1d16_##dir##8_##avg##opt(src_start, src_stride, dst, \
+                                                 dst_stride, h, filter);     \
+        src += 16;                                                           \
+        dst += 16;                                                           \
+        w -= 16;                                                             \
+      }                                                                      \
+      while (w >= 8) {                                                       \
+        vpx_filter_block1d8_##dir##8_##avg##opt(src_start, src_stride, dst,  \
+                                                dst_stride, h, filter);      \
+        src += 8;                                                            \
+        dst += 8;                                                            \
+        w -= 8;                                                              \
+      }                                                                      \
+      while (w >= 4) {                                                       \
+        vpx_filter_block1d4_##dir##8_##avg##opt(src_start, src_stride, dst,  \
+                                                dst_stride, h, filter);      \
+        src += 4;                                                            \
+        dst += 4;                                                            \
+        w -= 4;                                                              \
+      }                                                                      \
+    } else {                                                                 \
+      while (w >= 16) {                                                      \
+        vpx_filter_block1d16_##dir##2_##avg##opt(src, src_stride, dst,       \
+                                                 dst_stride, h, filter);     \
+        src += 16;                                                           \
+        dst += 16;                                                           \
+        w -= 16;                                                             \
+      }                                                                      \
+      while (w >= 8) {                                                       \
+        vpx_filter_block1d8_##dir##2_##avg##opt(src, src_stride, dst,        \
+                                                dst_stride, h, filter);      \
+        src += 8;                                                            \
+        dst += 8;                                                            \
+        w -= 8;                                                              \
+      }                                                                      \
+      while (w >= 4) {                                                       \
+        vpx_filter_block1d4_##dir##2_##avg##opt(src, src_stride, dst,        \
+                                                dst_stride, h, filter);      \
+        src += 4;                                                            \
+        dst += 4;                                                            \
+        w -= 4;                                                              \
+      }                                                                      \
+    }                                                                        \
+  }
 
-#define FUN_CONV_2D(avg, opt) \
-void vpx_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \
-                              uint8_t *dst, ptrdiff_t dst_stride, \
-                              const int16_t *filter_x, int x_step_q4, \
-                              const int16_t *filter_y, int y_step_q4, \
-                              int w, int h) { \
-  assert(filter_x[3] != 128); \
-  assert(filter_y[3] != 128); \
-  assert(w <= 64); \
-  assert(h <= 64); \
-  assert(x_step_q4 == 16); \
-  assert(y_step_q4 == 16); \
-  if (filter_x[0] || filter_x[1] || filter_x[2]|| \
-      filter_y[0] || filter_y[1] || filter_y[2]) { \
-    DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 71]); \
-    vpx_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \
-                              filter_x, x_step_q4, filter_y, y_step_q4, \
-                              w, h + 7); \
-    vpx_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \
-                                    filter_x, x_step_q4, filter_y, \
-                                    y_step_q4, w, h); \
-  } else { \
-    DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 65]); \
-    vpx_convolve8_horiz_##opt(src, src_stride, fdata2, 64, \
-                              filter_x, x_step_q4, filter_y, y_step_q4, \
-                              w, h + 1); \
-    vpx_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, \
-                                    filter_x, x_step_q4, filter_y, \
-                                    y_step_q4, w, h); \
-  } \
-}
+#define FUN_CONV_2D(avg, opt)                                                 \
+  void vpx_convolve8_##avg##opt(                                              \
+      const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                 \
+      ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,           \
+      const int16_t *filter_y, int y_step_q4, int w, int h) {                 \
+    assert(filter_x[3] != 128);                                               \
+    assert(filter_y[3] != 128);                                               \
+    assert(w <= 64);                                                          \
+    assert(h <= 64);                                                          \
+    assert(x_step_q4 == 16);                                                  \
+    assert(y_step_q4 == 16);                                                  \
+    if (filter_x[0] || filter_x[1] || filter_x[2] || filter_y[0] ||           \
+        filter_y[1] || filter_y[2]) {                                         \
+      DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 71]);                          \
+      vpx_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \
+                                filter_x, x_step_q4, filter_y, y_step_q4, w,  \
+                                h + 7);                                       \
+      vpx_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride,   \
+                                      filter_x, x_step_q4, filter_y,          \
+                                      y_step_q4, w, h);                       \
+    } else {                                                                  \
+      DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 65]);                          \
+      vpx_convolve8_horiz_##opt(src, src_stride, fdata2, 64, filter_x,        \
+                                x_step_q4, filter_y, y_step_q4, w, h + 1);    \
+      vpx_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, filter_x,  \
+                                      x_step_q4, filter_y, y_step_q4, w, h);  \
+    }                                                                         \
+  }
 
 #if CONFIG_VPX_HIGHBITDEPTH
 
-typedef void highbd_filter8_1dfunction (
-  const uint16_t *src_ptr,
-  const ptrdiff_t src_pitch,
-  uint16_t *output_ptr,
-  ptrdiff_t out_pitch,
-  unsigned int output_height,
-  const int16_t *filter,
-  int bd
-);
+typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr,
+                                       const ptrdiff_t src_pitch,
+                                       uint16_t *output_ptr,
+                                       ptrdiff_t out_pitch,
+                                       unsigned int output_height,
+                                       const int16_t *filter, int bd);
 
 #define HIGH_FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \
-  void vpx_highbd_convolve8_##name##_##opt(const uint8_t *src8, \
-                                           ptrdiff_t src_stride, \
-                                           uint8_t *dst8, \
-                                           ptrdiff_t dst_stride, \
-                                           const int16_t *filter_x, \
-                                           int x_step_q4, \
-                                           const int16_t *filter_y, \
-                                           int y_step_q4, \
-                                           int w, int h, int bd) { \
-  if (step_q4 == 16 && filter[3] != 128) { \
-    uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-    uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
-    if (filter[0] || filter[1] || filter[2]) { \
-      while (w >= 16) { \
-        vpx_highbd_filter_block1d16_##dir##8_##avg##opt(src_start, \
-                                                        src_stride, \
-                                                        dst, \
-                                                        dst_stride, \
-                                                        h, \
-                                                        filter, \
-                                                        bd); \
-        src += 16; \
-        dst += 16; \
-        w -= 16; \
-      } \
-      while (w >= 8) { \
-        vpx_highbd_filter_block1d8_##dir##8_##avg##opt(src_start, \
-                                                       src_stride, \
-                                                       dst, \
-                                                       dst_stride, \
-                                                       h, \
-                                                       filter, \
-                                                       bd); \
-        src += 8; \
-        dst += 8; \
-        w -= 8; \
-      } \
-      while (w >= 4) { \
-        vpx_highbd_filter_block1d4_##dir##8_##avg##opt(src_start, \
-                                                       src_stride, \
-                                                       dst, \
-                                                       dst_stride, \
-                                                       h, \
-                                                       filter, \
-                                                       bd); \
-        src += 4; \
-        dst += 4; \
-        w -= 4; \
-      } \
-    } else { \
-      while (w >= 16) { \
-        vpx_highbd_filter_block1d16_##dir##2_##avg##opt(src, \
-                                                        src_stride, \
-                                                        dst, \
-                                                        dst_stride, \
-                                                        h, \
-                                                        filter, \
-                                                        bd); \
-        src += 16; \
-        dst += 16; \
-        w -= 16; \
-      } \
-      while (w >= 8) { \
-        vpx_highbd_filter_block1d8_##dir##2_##avg##opt(src, \
-                                                       src_stride, \
-                                                       dst, \
-                                                       dst_stride, \
-                                                       h, \
-                                                       filter, \
-                                                       bd); \
-        src += 8; \
-        dst += 8; \
-        w -= 8; \
-      } \
-      while (w >= 4) { \
-        vpx_highbd_filter_block1d4_##dir##2_##avg##opt(src, \
-                                                       src_stride, \
-                                                       dst, \
-                                                       dst_stride, \
-                                                       h, \
-                                                       filter, \
-                                                       bd); \
-        src += 4; \
-        dst += 4; \
-        w -= 4; \
-      } \
-    } \
-  } \
-  if (w) { \
-    vpx_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \
-                                    filter_x, x_step_q4, filter_y, y_step_q4, \
-                                    w, h, bd); \
-  } \
-}
+  void vpx_highbd_convolve8_##name##_##opt(                               \
+      const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8,           \
+      ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,       \
+      const int16_t *filter_y, int y_step_q4, int w, int h, int bd) {     \
+    if (step_q4 == 16 && filter[3] != 128) {                              \
+      uint16_t *src = CONVERT_TO_SHORTPTR(src8);                          \
+      uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                          \
+      if (filter[0] || filter[1] || filter[2]) {                          \
+        while (w >= 16) {                                                 \
+          vpx_highbd_filter_block1d16_##dir##8_##avg##opt(                \
+              src_start, src_stride, dst, dst_stride, h, filter, bd);     \
+          src += 16;                                                      \
+          dst += 16;                                                      \
+          w -= 16;                                                        \
+        }                                                                 \
+        while (w >= 8) {                                                  \
+          vpx_highbd_filter_block1d8_##dir##8_##avg##opt(                 \
+              src_start, src_stride, dst, dst_stride, h, filter, bd);     \
+          src += 8;                                                       \
+          dst += 8;                                                       \
+          w -= 8;                                                         \
+        }                                                                 \
+        while (w >= 4) {                                                  \
+          vpx_highbd_filter_block1d4_##dir##8_##avg##opt(                 \
+              src_start, src_stride, dst, dst_stride, h, filter, bd);     \
+          src += 4;                                                       \
+          dst += 4;                                                       \
+          w -= 4;                                                         \
+        }                                                                 \
+      } else {                                                            \
+        while (w >= 16) {                                                 \
+          vpx_highbd_filter_block1d16_##dir##2_##avg##opt(                \
+              src, src_stride, dst, dst_stride, h, filter, bd);           \
+          src += 16;                                                      \
+          dst += 16;                                                      \
+          w -= 16;                                                        \
+        }                                                                 \
+        while (w >= 8) {                                                  \
+          vpx_highbd_filter_block1d8_##dir##2_##avg##opt(                 \
+              src, src_stride, dst, dst_stride, h, filter, bd);           \
+          src += 8;                                                       \
+          dst += 8;                                                       \
+          w -= 8;                                                         \
+        }                                                                 \
+        while (w >= 4) {                                                  \
+          vpx_highbd_filter_block1d4_##dir##2_##avg##opt(                 \
+              src, src_stride, dst, dst_stride, h, filter, bd);           \
+          src += 4;                                                       \
+          dst += 4;                                                       \
+          w -= 4;                                                         \
+        }                                                                 \
+      }                                                                   \
+    }                                                                     \
+    if (w) {                                                              \
+      vpx_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \
+                                      filter_x, x_step_q4, filter_y,      \
+                                      y_step_q4, w, h, bd);               \
+    }                                                                     \
+  }
 
-#define HIGH_FUN_CONV_2D(avg, opt) \
-void vpx_highbd_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \
-                                     uint8_t *dst, ptrdiff_t dst_stride, \
-                                     const int16_t *filter_x, int x_step_q4, \
-                                     const int16_t *filter_y, int y_step_q4, \
-                                     int w, int h, int bd) { \
-  assert(w <= 64); \
-  assert(h <= 64); \
-  if (x_step_q4 == 16 && y_step_q4 == 16) { \
-    if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \
-        filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \
-      DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 71]); \
-      vpx_highbd_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, \
-                                       CONVERT_TO_BYTEPTR(fdata2), 64, \
-                                       filter_x, x_step_q4, \
-                                       filter_y, y_step_q4, \
-                                       w, h + 7, bd); \
-      vpx_highbd_convolve8_##avg##vert_##opt(CONVERT_TO_BYTEPTR(fdata2) + 192, \
-                                             64, dst, dst_stride, \
-                                             filter_x, x_step_q4, \
-                                             filter_y, y_step_q4, \
-                                             w, h, bd); \
-    } else { \
-      DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 65]); \
-      vpx_highbd_convolve8_horiz_##opt(src, src_stride, \
-                                       CONVERT_TO_BYTEPTR(fdata2), 64, \
-                                       filter_x, x_step_q4, \
-                                       filter_y, y_step_q4, \
-                                       w, h + 1, bd); \
-      vpx_highbd_convolve8_##avg##vert_##opt(CONVERT_TO_BYTEPTR(fdata2), 64, \
-                                             dst, dst_stride, \
-                                             filter_x, x_step_q4, \
-                                             filter_y, y_step_q4, \
-                                             w, h, bd); \
-    } \
-  } else { \
-    vpx_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride, \
-                                  filter_x, x_step_q4, filter_y, y_step_q4, w, \
-                                  h, bd); \
-  } \
-}
+#define HIGH_FUN_CONV_2D(avg, opt)                                            \
+  void vpx_highbd_convolve8_##avg##opt(                                       \
+      const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                 \
+      ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,           \
+      const int16_t *filter_y, int y_step_q4, int w, int h, int bd) {         \
+    assert(w <= 64);                                                          \
+    assert(h <= 64);                                                          \
+    if (x_step_q4 == 16 && y_step_q4 == 16) {                                 \
+      if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 ||  \
+          filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) {  \
+        DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 71]);                       \
+        vpx_highbd_convolve8_horiz_##opt(                                     \
+            src - 3 * src_stride, src_stride, CONVERT_TO_BYTEPTR(fdata2), 64, \
+            filter_x, x_step_q4, filter_y, y_step_q4, w, h + 7, bd);          \
+        vpx_highbd_convolve8_##avg##vert_##opt(                               \
+            CONVERT_TO_BYTEPTR(fdata2) + 192, 64, dst, dst_stride, filter_x,  \
+            x_step_q4, filter_y, y_step_q4, w, h, bd);                        \
+      } else {                                                                \
+        DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 65]);                       \
+        vpx_highbd_convolve8_horiz_##opt(                                     \
+            src, src_stride, CONVERT_TO_BYTEPTR(fdata2), 64, filter_x,        \
+            x_step_q4, filter_y, y_step_q4, w, h + 1, bd);                    \
+        vpx_highbd_convolve8_##avg##vert_##opt(                               \
+            CONVERT_TO_BYTEPTR(fdata2), 64, dst, dst_stride, filter_x,        \
+            x_step_q4, filter_y, y_step_q4, w, h, bd);                        \
+      }                                                                       \
+    } else {                                                                  \
+      vpx_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride,         \
+                                    filter_x, x_step_q4, filter_y, y_step_q4, \
+                                    w, h, bd);                                \
+    }                                                                         \
+  }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #endif  // VPX_DSP_X86_CONVOLVE_H_
diff --git a/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h b/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h
index 4df39dff861449ab5b590ed768642d3df0262016..7200dc16fad6c91d97064b1e388e551f3839343d 100644
--- a/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h
+++ b/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h
@@ -12,15 +12,15 @@
 
 #include "vpx_dsp/txfm_common.h"
 
-#define pair256_set_epi16(a, b) \
+#define pair256_set_epi16(a, b)                                            \
   _mm256_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
                    (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
                    (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
                    (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a))
 
-#define pair256_set_epi32(a, b) \
-  _mm256_set_epi32((int)(b), (int)(a), (int)(b), (int)(a), \
-                   (int)(b), (int)(a), (int)(b), (int)(a))
+#define pair256_set_epi32(a, b)                                                \
+  _mm256_set_epi32((int)(b), (int)(a), (int)(b), (int)(a), (int)(b), (int)(a), \
+                   (int)(b), (int)(a))
 
 #if FDCT32x32_HIGH_PRECISION
 static INLINE __m256i k_madd_epi32_avx2(__m256i a, __m256i b) {
@@ -39,8 +39,7 @@ static INLINE __m256i k_packs_epi64_avx2(__m256i a, __m256i b) {
 }
 #endif
 
-void FDCT32x32_2D_AVX2(const int16_t *input,
-                  int16_t *output_org, int stride) {
+void FDCT32x32_2D_AVX2(const int16_t *input, int16_t *output_org, int stride) {
   // Calculate pre-multiplied strides
   const int str1 = stride;
   const int str2 = 2 * stride;
@@ -52,43 +51,45 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
   //    it's a pair of them that we need to repeat four times. This is done
   //    by constructing the 32 bit constant corresponding to that pair.
   const __m256i k__cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
-  const __m256i k__cospi_p16_m16 = pair256_set_epi16(+cospi_16_64, -cospi_16_64);
-  const __m256i k__cospi_m08_p24 = pair256_set_epi16(-cospi_8_64,   cospi_24_64);
+  const __m256i k__cospi_p16_m16 =
+      pair256_set_epi16(+cospi_16_64, -cospi_16_64);
+  const __m256i k__cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
   const __m256i k__cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
-  const __m256i k__cospi_p24_p08 = pair256_set_epi16(+cospi_24_64,  cospi_8_64);
-  const __m256i k__cospi_p12_p20 = pair256_set_epi16(+cospi_12_64,  cospi_20_64);
-  const __m256i k__cospi_m20_p12 = pair256_set_epi16(-cospi_20_64,  cospi_12_64);
-  const __m256i k__cospi_m04_p28 = pair256_set_epi16(-cospi_4_64,   cospi_28_64);
-  const __m256i k__cospi_p28_p04 = pair256_set_epi16(+cospi_28_64,  cospi_4_64);
+  const __m256i k__cospi_p24_p08 = pair256_set_epi16(+cospi_24_64, cospi_8_64);
+  const __m256i k__cospi_p12_p20 = pair256_set_epi16(+cospi_12_64, cospi_20_64);
+  const __m256i k__cospi_m20_p12 = pair256_set_epi16(-cospi_20_64, cospi_12_64);
+  const __m256i k__cospi_m04_p28 = pair256_set_epi16(-cospi_4_64, cospi_28_64);
+  const __m256i k__cospi_p28_p04 = pair256_set_epi16(+cospi_28_64, cospi_4_64);
   const __m256i k__cospi_m28_m04 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
-  const __m256i k__cospi_m12_m20 = pair256_set_epi16(-cospi_12_64, -cospi_20_64);
-  const __m256i k__cospi_p30_p02 = pair256_set_epi16(+cospi_30_64,  cospi_2_64);
-  const __m256i k__cospi_p14_p18 = pair256_set_epi16(+cospi_14_64,  cospi_18_64);
-  const __m256i k__cospi_p22_p10 = pair256_set_epi16(+cospi_22_64,  cospi_10_64);
-  const __m256i k__cospi_p06_p26 = pair256_set_epi16(+cospi_6_64,   cospi_26_64);
-  const __m256i k__cospi_m26_p06 = pair256_set_epi16(-cospi_26_64,  cospi_6_64);
-  const __m256i k__cospi_m10_p22 = pair256_set_epi16(-cospi_10_64,  cospi_22_64);
-  const __m256i k__cospi_m18_p14 = pair256_set_epi16(-cospi_18_64,  cospi_14_64);
-  const __m256i k__cospi_m02_p30 = pair256_set_epi16(-cospi_2_64,   cospi_30_64);
-  const __m256i k__cospi_p31_p01 = pair256_set_epi16(+cospi_31_64,  cospi_1_64);
-  const __m256i k__cospi_p15_p17 = pair256_set_epi16(+cospi_15_64,  cospi_17_64);
-  const __m256i k__cospi_p23_p09 = pair256_set_epi16(+cospi_23_64,  cospi_9_64);
-  const __m256i k__cospi_p07_p25 = pair256_set_epi16(+cospi_7_64,   cospi_25_64);
-  const __m256i k__cospi_m25_p07 = pair256_set_epi16(-cospi_25_64,  cospi_7_64);
-  const __m256i k__cospi_m09_p23 = pair256_set_epi16(-cospi_9_64,   cospi_23_64);
-  const __m256i k__cospi_m17_p15 = pair256_set_epi16(-cospi_17_64,  cospi_15_64);
-  const __m256i k__cospi_m01_p31 = pair256_set_epi16(-cospi_1_64,   cospi_31_64);
-  const __m256i k__cospi_p27_p05 = pair256_set_epi16(+cospi_27_64,  cospi_5_64);
-  const __m256i k__cospi_p11_p21 = pair256_set_epi16(+cospi_11_64,  cospi_21_64);
-  const __m256i k__cospi_p19_p13 = pair256_set_epi16(+cospi_19_64,  cospi_13_64);
-  const __m256i k__cospi_p03_p29 = pair256_set_epi16(+cospi_3_64,   cospi_29_64);
-  const __m256i k__cospi_m29_p03 = pair256_set_epi16(-cospi_29_64,  cospi_3_64);
-  const __m256i k__cospi_m13_p19 = pair256_set_epi16(-cospi_13_64,  cospi_19_64);
-  const __m256i k__cospi_m21_p11 = pair256_set_epi16(-cospi_21_64,  cospi_11_64);
-  const __m256i k__cospi_m05_p27 = pair256_set_epi16(-cospi_5_64,   cospi_27_64);
+  const __m256i k__cospi_m12_m20 =
+      pair256_set_epi16(-cospi_12_64, -cospi_20_64);
+  const __m256i k__cospi_p30_p02 = pair256_set_epi16(+cospi_30_64, cospi_2_64);
+  const __m256i k__cospi_p14_p18 = pair256_set_epi16(+cospi_14_64, cospi_18_64);
+  const __m256i k__cospi_p22_p10 = pair256_set_epi16(+cospi_22_64, cospi_10_64);
+  const __m256i k__cospi_p06_p26 = pair256_set_epi16(+cospi_6_64, cospi_26_64);
+  const __m256i k__cospi_m26_p06 = pair256_set_epi16(-cospi_26_64, cospi_6_64);
+  const __m256i k__cospi_m10_p22 = pair256_set_epi16(-cospi_10_64, cospi_22_64);
+  const __m256i k__cospi_m18_p14 = pair256_set_epi16(-cospi_18_64, cospi_14_64);
+  const __m256i k__cospi_m02_p30 = pair256_set_epi16(-cospi_2_64, cospi_30_64);
+  const __m256i k__cospi_p31_p01 = pair256_set_epi16(+cospi_31_64, cospi_1_64);
+  const __m256i k__cospi_p15_p17 = pair256_set_epi16(+cospi_15_64, cospi_17_64);
+  const __m256i k__cospi_p23_p09 = pair256_set_epi16(+cospi_23_64, cospi_9_64);
+  const __m256i k__cospi_p07_p25 = pair256_set_epi16(+cospi_7_64, cospi_25_64);
+  const __m256i k__cospi_m25_p07 = pair256_set_epi16(-cospi_25_64, cospi_7_64);
+  const __m256i k__cospi_m09_p23 = pair256_set_epi16(-cospi_9_64, cospi_23_64);
+  const __m256i k__cospi_m17_p15 = pair256_set_epi16(-cospi_17_64, cospi_15_64);
+  const __m256i k__cospi_m01_p31 = pair256_set_epi16(-cospi_1_64, cospi_31_64);
+  const __m256i k__cospi_p27_p05 = pair256_set_epi16(+cospi_27_64, cospi_5_64);
+  const __m256i k__cospi_p11_p21 = pair256_set_epi16(+cospi_11_64, cospi_21_64);
+  const __m256i k__cospi_p19_p13 = pair256_set_epi16(+cospi_19_64, cospi_13_64);
+  const __m256i k__cospi_p03_p29 = pair256_set_epi16(+cospi_3_64, cospi_29_64);
+  const __m256i k__cospi_m29_p03 = pair256_set_epi16(-cospi_29_64, cospi_3_64);
+  const __m256i k__cospi_m13_p19 = pair256_set_epi16(-cospi_13_64, cospi_19_64);
+  const __m256i k__cospi_m21_p11 = pair256_set_epi16(-cospi_21_64, cospi_11_64);
+  const __m256i k__cospi_m05_p27 = pair256_set_epi16(-cospi_5_64, cospi_27_64);
   const __m256i k__DCT_CONST_ROUNDING = _mm256_set1_epi32(DCT_CONST_ROUNDING);
   const __m256i kZero = _mm256_set1_epi16(0);
-  const __m256i kOne  = _mm256_set1_epi16(1);
+  const __m256i kOne = _mm256_set1_epi16(1);
   // Do the two transform/transpose passes
   int pass;
   for (pass = 0; pass < 2; ++pass) {
@@ -103,125 +104,149 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
       // Note: even though all the loads below are aligned, using the aligned
       //       intrinsic make the code slightly slower.
       if (0 == pass) {
-        const int16_t *in  = &input[column_start];
+        const int16_t *in = &input[column_start];
         // step1[i] =  (in[ 0 * stride] + in[(32 -  1) * stride]) << 2;
         // Note: the next four blocks could be in a loop. That would help the
         //       instruction cache but is actually slower.
         {
-          const int16_t *ina =  in +  0 * str1;
-          const int16_t *inb =  in + 31 * str1;
-          __m256i *step1a = &step1[ 0];
+          const int16_t *ina = in + 0 * str1;
+          const int16_t *inb = in + 31 * str1;
+          __m256i *step1a = &step1[0];
           __m256i *step1b = &step1[31];
-          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
-          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
-          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
-          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
-          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
-          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
-          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
-          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
-          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
+          const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina));
+          const __m256i ina1 =
+              _mm256_loadu_si256((const __m256i *)(ina + str1));
+          const __m256i ina2 =
+              _mm256_loadu_si256((const __m256i *)(ina + str2));
+          const __m256i ina3 =
+              _mm256_loadu_si256((const __m256i *)(ina + str3));
+          const __m256i inb3 =
+              _mm256_loadu_si256((const __m256i *)(inb - str3));
+          const __m256i inb2 =
+              _mm256_loadu_si256((const __m256i *)(inb - str2));
+          const __m256i inb1 =
+              _mm256_loadu_si256((const __m256i *)(inb - str1));
+          const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb));
+          step1a[0] = _mm256_add_epi16(ina0, inb0);
+          step1a[1] = _mm256_add_epi16(ina1, inb1);
+          step1a[2] = _mm256_add_epi16(ina2, inb2);
+          step1a[3] = _mm256_add_epi16(ina3, inb3);
           step1b[-3] = _mm256_sub_epi16(ina3, inb3);
           step1b[-2] = _mm256_sub_epi16(ina2, inb2);
           step1b[-1] = _mm256_sub_epi16(ina1, inb1);
           step1b[-0] = _mm256_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm256_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm256_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm256_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm256_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
           step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
         }
         {
-          const int16_t *ina =  in +  4 * str1;
-          const int16_t *inb =  in + 27 * str1;
-          __m256i *step1a = &step1[ 4];
+          const int16_t *ina = in + 4 * str1;
+          const int16_t *inb = in + 27 * str1;
+          __m256i *step1a = &step1[4];
           __m256i *step1b = &step1[27];
-          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
-          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
-          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
-          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
-          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
-          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
-          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
-          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
-          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
+          const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina));
+          const __m256i ina1 =
+              _mm256_loadu_si256((const __m256i *)(ina + str1));
+          const __m256i ina2 =
+              _mm256_loadu_si256((const __m256i *)(ina + str2));
+          const __m256i ina3 =
+              _mm256_loadu_si256((const __m256i *)(ina + str3));
+          const __m256i inb3 =
+              _mm256_loadu_si256((const __m256i *)(inb - str3));
+          const __m256i inb2 =
+              _mm256_loadu_si256((const __m256i *)(inb - str2));
+          const __m256i inb1 =
+              _mm256_loadu_si256((const __m256i *)(inb - str1));
+          const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb));
+          step1a[0] = _mm256_add_epi16(ina0, inb0);
+          step1a[1] = _mm256_add_epi16(ina1, inb1);
+          step1a[2] = _mm256_add_epi16(ina2, inb2);
+          step1a[3] = _mm256_add_epi16(ina3, inb3);
           step1b[-3] = _mm256_sub_epi16(ina3, inb3);
           step1b[-2] = _mm256_sub_epi16(ina2, inb2);
           step1b[-1] = _mm256_sub_epi16(ina1, inb1);
           step1b[-0] = _mm256_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm256_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm256_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm256_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm256_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
           step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
         }
         {
-          const int16_t *ina =  in +  8 * str1;
-          const int16_t *inb =  in + 23 * str1;
-          __m256i *step1a = &step1[ 8];
+          const int16_t *ina = in + 8 * str1;
+          const int16_t *inb = in + 23 * str1;
+          __m256i *step1a = &step1[8];
           __m256i *step1b = &step1[23];
-          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
-          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
-          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
-          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
-          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
-          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
-          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
-          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
-          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
+          const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina));
+          const __m256i ina1 =
+              _mm256_loadu_si256((const __m256i *)(ina + str1));
+          const __m256i ina2 =
+              _mm256_loadu_si256((const __m256i *)(ina + str2));
+          const __m256i ina3 =
+              _mm256_loadu_si256((const __m256i *)(ina + str3));
+          const __m256i inb3 =
+              _mm256_loadu_si256((const __m256i *)(inb - str3));
+          const __m256i inb2 =
+              _mm256_loadu_si256((const __m256i *)(inb - str2));
+          const __m256i inb1 =
+              _mm256_loadu_si256((const __m256i *)(inb - str1));
+          const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb));
+          step1a[0] = _mm256_add_epi16(ina0, inb0);
+          step1a[1] = _mm256_add_epi16(ina1, inb1);
+          step1a[2] = _mm256_add_epi16(ina2, inb2);
+          step1a[3] = _mm256_add_epi16(ina3, inb3);
           step1b[-3] = _mm256_sub_epi16(ina3, inb3);
           step1b[-2] = _mm256_sub_epi16(ina2, inb2);
           step1b[-1] = _mm256_sub_epi16(ina1, inb1);
           step1b[-0] = _mm256_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm256_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm256_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm256_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm256_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
           step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
         }
         {
-          const int16_t *ina =  in + 12 * str1;
-          const int16_t *inb =  in + 19 * str1;
+          const int16_t *ina = in + 12 * str1;
+          const int16_t *inb = in + 19 * str1;
           __m256i *step1a = &step1[12];
           __m256i *step1b = &step1[19];
-          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
-          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
-          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
-          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
-          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
-          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
-          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
-          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
-          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
+          const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina));
+          const __m256i ina1 =
+              _mm256_loadu_si256((const __m256i *)(ina + str1));
+          const __m256i ina2 =
+              _mm256_loadu_si256((const __m256i *)(ina + str2));
+          const __m256i ina3 =
+              _mm256_loadu_si256((const __m256i *)(ina + str3));
+          const __m256i inb3 =
+              _mm256_loadu_si256((const __m256i *)(inb - str3));
+          const __m256i inb2 =
+              _mm256_loadu_si256((const __m256i *)(inb - str2));
+          const __m256i inb1 =
+              _mm256_loadu_si256((const __m256i *)(inb - str1));
+          const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb));
+          step1a[0] = _mm256_add_epi16(ina0, inb0);
+          step1a[1] = _mm256_add_epi16(ina1, inb1);
+          step1a[2] = _mm256_add_epi16(ina2, inb2);
+          step1a[3] = _mm256_add_epi16(ina3, inb3);
           step1b[-3] = _mm256_sub_epi16(ina3, inb3);
           step1b[-2] = _mm256_sub_epi16(ina2, inb2);
           step1b[-1] = _mm256_sub_epi16(ina1, inb1);
           step1b[-0] = _mm256_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm256_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm256_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm256_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm256_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
@@ -236,52 +261,52 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         // Note: the next four blocks could be in a loop. That would help the
         //       instruction cache but is actually slower.
         {
-          __m256i in00  = _mm256_loadu_si256((const __m256i *)(in +  0 * 32));
-          __m256i in01  = _mm256_loadu_si256((const __m256i *)(in +  1 * 32));
-          __m256i in02  = _mm256_loadu_si256((const __m256i *)(in +  2 * 32));
-          __m256i in03  = _mm256_loadu_si256((const __m256i *)(in +  3 * 32));
-          __m256i in28  = _mm256_loadu_si256((const __m256i *)(in + 28 * 32));
-          __m256i in29  = _mm256_loadu_si256((const __m256i *)(in + 29 * 32));
-          __m256i in30  = _mm256_loadu_si256((const __m256i *)(in + 30 * 32));
-          __m256i in31  = _mm256_loadu_si256((const __m256i *)(in + 31 * 32));
-          step1[ 0] = _mm256_add_epi16(in00, in31);
-          step1[ 1] = _mm256_add_epi16(in01, in30);
-          step1[ 2] = _mm256_add_epi16(in02, in29);
-          step1[ 3] = _mm256_add_epi16(in03, in28);
+          __m256i in00 = _mm256_loadu_si256((const __m256i *)(in + 0 * 32));
+          __m256i in01 = _mm256_loadu_si256((const __m256i *)(in + 1 * 32));
+          __m256i in02 = _mm256_loadu_si256((const __m256i *)(in + 2 * 32));
+          __m256i in03 = _mm256_loadu_si256((const __m256i *)(in + 3 * 32));
+          __m256i in28 = _mm256_loadu_si256((const __m256i *)(in + 28 * 32));
+          __m256i in29 = _mm256_loadu_si256((const __m256i *)(in + 29 * 32));
+          __m256i in30 = _mm256_loadu_si256((const __m256i *)(in + 30 * 32));
+          __m256i in31 = _mm256_loadu_si256((const __m256i *)(in + 31 * 32));
+          step1[0] = _mm256_add_epi16(in00, in31);
+          step1[1] = _mm256_add_epi16(in01, in30);
+          step1[2] = _mm256_add_epi16(in02, in29);
+          step1[3] = _mm256_add_epi16(in03, in28);
           step1[28] = _mm256_sub_epi16(in03, in28);
           step1[29] = _mm256_sub_epi16(in02, in29);
           step1[30] = _mm256_sub_epi16(in01, in30);
           step1[31] = _mm256_sub_epi16(in00, in31);
         }
         {
-          __m256i in04  = _mm256_loadu_si256((const __m256i *)(in +  4 * 32));
-          __m256i in05  = _mm256_loadu_si256((const __m256i *)(in +  5 * 32));
-          __m256i in06  = _mm256_loadu_si256((const __m256i *)(in +  6 * 32));
-          __m256i in07  = _mm256_loadu_si256((const __m256i *)(in +  7 * 32));
-          __m256i in24  = _mm256_loadu_si256((const __m256i *)(in + 24 * 32));
-          __m256i in25  = _mm256_loadu_si256((const __m256i *)(in + 25 * 32));
-          __m256i in26  = _mm256_loadu_si256((const __m256i *)(in + 26 * 32));
-          __m256i in27  = _mm256_loadu_si256((const __m256i *)(in + 27 * 32));
-          step1[ 4] = _mm256_add_epi16(in04, in27);
-          step1[ 5] = _mm256_add_epi16(in05, in26);
-          step1[ 6] = _mm256_add_epi16(in06, in25);
-          step1[ 7] = _mm256_add_epi16(in07, in24);
+          __m256i in04 = _mm256_loadu_si256((const __m256i *)(in + 4 * 32));
+          __m256i in05 = _mm256_loadu_si256((const __m256i *)(in + 5 * 32));
+          __m256i in06 = _mm256_loadu_si256((const __m256i *)(in + 6 * 32));
+          __m256i in07 = _mm256_loadu_si256((const __m256i *)(in + 7 * 32));
+          __m256i in24 = _mm256_loadu_si256((const __m256i *)(in + 24 * 32));
+          __m256i in25 = _mm256_loadu_si256((const __m256i *)(in + 25 * 32));
+          __m256i in26 = _mm256_loadu_si256((const __m256i *)(in + 26 * 32));
+          __m256i in27 = _mm256_loadu_si256((const __m256i *)(in + 27 * 32));
+          step1[4] = _mm256_add_epi16(in04, in27);
+          step1[5] = _mm256_add_epi16(in05, in26);
+          step1[6] = _mm256_add_epi16(in06, in25);
+          step1[7] = _mm256_add_epi16(in07, in24);
           step1[24] = _mm256_sub_epi16(in07, in24);
           step1[25] = _mm256_sub_epi16(in06, in25);
           step1[26] = _mm256_sub_epi16(in05, in26);
           step1[27] = _mm256_sub_epi16(in04, in27);
         }
         {
-          __m256i in08  = _mm256_loadu_si256((const __m256i *)(in +  8 * 32));
-          __m256i in09  = _mm256_loadu_si256((const __m256i *)(in +  9 * 32));
-          __m256i in10  = _mm256_loadu_si256((const __m256i *)(in + 10 * 32));
-          __m256i in11  = _mm256_loadu_si256((const __m256i *)(in + 11 * 32));
-          __m256i in20  = _mm256_loadu_si256((const __m256i *)(in + 20 * 32));
-          __m256i in21  = _mm256_loadu_si256((const __m256i *)(in + 21 * 32));
-          __m256i in22  = _mm256_loadu_si256((const __m256i *)(in + 22 * 32));
-          __m256i in23  = _mm256_loadu_si256((const __m256i *)(in + 23 * 32));
-          step1[ 8] = _mm256_add_epi16(in08, in23);
-          step1[ 9] = _mm256_add_epi16(in09, in22);
+          __m256i in08 = _mm256_loadu_si256((const __m256i *)(in + 8 * 32));
+          __m256i in09 = _mm256_loadu_si256((const __m256i *)(in + 9 * 32));
+          __m256i in10 = _mm256_loadu_si256((const __m256i *)(in + 10 * 32));
+          __m256i in11 = _mm256_loadu_si256((const __m256i *)(in + 11 * 32));
+          __m256i in20 = _mm256_loadu_si256((const __m256i *)(in + 20 * 32));
+          __m256i in21 = _mm256_loadu_si256((const __m256i *)(in + 21 * 32));
+          __m256i in22 = _mm256_loadu_si256((const __m256i *)(in + 22 * 32));
+          __m256i in23 = _mm256_loadu_si256((const __m256i *)(in + 23 * 32));
+          step1[8] = _mm256_add_epi16(in08, in23);
+          step1[9] = _mm256_add_epi16(in09, in22);
           step1[10] = _mm256_add_epi16(in10, in21);
           step1[11] = _mm256_add_epi16(in11, in20);
           step1[20] = _mm256_sub_epi16(in11, in20);
@@ -290,14 +315,14 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           step1[23] = _mm256_sub_epi16(in08, in23);
         }
         {
-          __m256i in12  = _mm256_loadu_si256((const __m256i *)(in + 12 * 32));
-          __m256i in13  = _mm256_loadu_si256((const __m256i *)(in + 13 * 32));
-          __m256i in14  = _mm256_loadu_si256((const __m256i *)(in + 14 * 32));
-          __m256i in15  = _mm256_loadu_si256((const __m256i *)(in + 15 * 32));
-          __m256i in16  = _mm256_loadu_si256((const __m256i *)(in + 16 * 32));
-          __m256i in17  = _mm256_loadu_si256((const __m256i *)(in + 17 * 32));
-          __m256i in18  = _mm256_loadu_si256((const __m256i *)(in + 18 * 32));
-          __m256i in19  = _mm256_loadu_si256((const __m256i *)(in + 19 * 32));
+          __m256i in12 = _mm256_loadu_si256((const __m256i *)(in + 12 * 32));
+          __m256i in13 = _mm256_loadu_si256((const __m256i *)(in + 13 * 32));
+          __m256i in14 = _mm256_loadu_si256((const __m256i *)(in + 14 * 32));
+          __m256i in15 = _mm256_loadu_si256((const __m256i *)(in + 15 * 32));
+          __m256i in16 = _mm256_loadu_si256((const __m256i *)(in + 16 * 32));
+          __m256i in17 = _mm256_loadu_si256((const __m256i *)(in + 17 * 32));
+          __m256i in18 = _mm256_loadu_si256((const __m256i *)(in + 18 * 32));
+          __m256i in19 = _mm256_loadu_si256((const __m256i *)(in + 19 * 32));
           step1[12] = _mm256_add_epi16(in12, in19);
           step1[13] = _mm256_add_epi16(in13, in18);
           step1[14] = _mm256_add_epi16(in14, in17);
@@ -310,16 +335,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
       }
       // Stage 2
       {
-        step2[ 0] = _mm256_add_epi16(step1[0], step1[15]);
-        step2[ 1] = _mm256_add_epi16(step1[1], step1[14]);
-        step2[ 2] = _mm256_add_epi16(step1[2], step1[13]);
-        step2[ 3] = _mm256_add_epi16(step1[3], step1[12]);
-        step2[ 4] = _mm256_add_epi16(step1[4], step1[11]);
-        step2[ 5] = _mm256_add_epi16(step1[5], step1[10]);
-        step2[ 6] = _mm256_add_epi16(step1[6], step1[ 9]);
-        step2[ 7] = _mm256_add_epi16(step1[7], step1[ 8]);
-        step2[ 8] = _mm256_sub_epi16(step1[7], step1[ 8]);
-        step2[ 9] = _mm256_sub_epi16(step1[6], step1[ 9]);
+        step2[0] = _mm256_add_epi16(step1[0], step1[15]);
+        step2[1] = _mm256_add_epi16(step1[1], step1[14]);
+        step2[2] = _mm256_add_epi16(step1[2], step1[13]);
+        step2[3] = _mm256_add_epi16(step1[3], step1[12]);
+        step2[4] = _mm256_add_epi16(step1[4], step1[11]);
+        step2[5] = _mm256_add_epi16(step1[5], step1[10]);
+        step2[6] = _mm256_add_epi16(step1[6], step1[9]);
+        step2[7] = _mm256_add_epi16(step1[7], step1[8]);
+        step2[8] = _mm256_sub_epi16(step1[7], step1[8]);
+        step2[9] = _mm256_sub_epi16(step1[6], step1[9]);
         step2[10] = _mm256_sub_epi16(step1[5], step1[10]);
         step2[11] = _mm256_sub_epi16(step1[4], step1[11]);
         step2[12] = _mm256_sub_epi16(step1[3], step1[12]);
@@ -353,22 +378,38 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         const __m256i s2_27_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_p16);
         const __m256i s2_27_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_p16);
         // dct_const_round_shift
-        const __m256i s2_20_4 = _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_20_5 = _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_21_4 = _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_21_5 = _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_22_4 = _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_22_5 = _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_23_4 = _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_23_5 = _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_24_4 = _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_24_5 = _mm256_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_25_4 = _mm256_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_25_5 = _mm256_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_26_4 = _mm256_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_26_5 = _mm256_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_27_4 = _mm256_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_27_5 = _mm256_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_20_4 =
+            _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_20_5 =
+            _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_21_4 =
+            _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_21_5 =
+            _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_22_4 =
+            _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_22_5 =
+            _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_23_4 =
+            _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_23_5 =
+            _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_24_4 =
+            _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_24_5 =
+            _mm256_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_25_4 =
+            _mm256_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_25_5 =
+            _mm256_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_26_4 =
+            _mm256_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_26_5 =
+            _mm256_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_27_4 =
+            _mm256_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_27_5 =
+            _mm256_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING);
         const __m256i s2_20_6 = _mm256_srai_epi32(s2_20_4, DCT_CONST_BITS);
         const __m256i s2_20_7 = _mm256_srai_epi32(s2_20_5, DCT_CONST_BITS);
         const __m256i s2_21_6 = _mm256_srai_epi32(s2_21_4, DCT_CONST_BITS);
@@ -400,49 +441,49 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
       // dump the magnitude by half, hence the intermediate values are within
       // the range of 16 bits.
       if (1 == pass) {
-        __m256i s3_00_0 = _mm256_cmpgt_epi16(kZero,step2[ 0]);
-        __m256i s3_01_0 = _mm256_cmpgt_epi16(kZero,step2[ 1]);
-        __m256i s3_02_0 = _mm256_cmpgt_epi16(kZero,step2[ 2]);
-        __m256i s3_03_0 = _mm256_cmpgt_epi16(kZero,step2[ 3]);
-        __m256i s3_04_0 = _mm256_cmpgt_epi16(kZero,step2[ 4]);
-        __m256i s3_05_0 = _mm256_cmpgt_epi16(kZero,step2[ 5]);
-        __m256i s3_06_0 = _mm256_cmpgt_epi16(kZero,step2[ 6]);
-        __m256i s3_07_0 = _mm256_cmpgt_epi16(kZero,step2[ 7]);
-        __m256i s2_08_0 = _mm256_cmpgt_epi16(kZero,step2[ 8]);
-        __m256i s2_09_0 = _mm256_cmpgt_epi16(kZero,step2[ 9]);
-        __m256i s3_10_0 = _mm256_cmpgt_epi16(kZero,step2[10]);
-        __m256i s3_11_0 = _mm256_cmpgt_epi16(kZero,step2[11]);
-        __m256i s3_12_0 = _mm256_cmpgt_epi16(kZero,step2[12]);
-        __m256i s3_13_0 = _mm256_cmpgt_epi16(kZero,step2[13]);
-        __m256i s2_14_0 = _mm256_cmpgt_epi16(kZero,step2[14]);
-        __m256i s2_15_0 = _mm256_cmpgt_epi16(kZero,step2[15]);
-        __m256i s3_16_0 = _mm256_cmpgt_epi16(kZero,step1[16]);
-        __m256i s3_17_0 = _mm256_cmpgt_epi16(kZero,step1[17]);
-        __m256i s3_18_0 = _mm256_cmpgt_epi16(kZero,step1[18]);
-        __m256i s3_19_0 = _mm256_cmpgt_epi16(kZero,step1[19]);
-        __m256i s3_20_0 = _mm256_cmpgt_epi16(kZero,step2[20]);
-        __m256i s3_21_0 = _mm256_cmpgt_epi16(kZero,step2[21]);
-        __m256i s3_22_0 = _mm256_cmpgt_epi16(kZero,step2[22]);
-        __m256i s3_23_0 = _mm256_cmpgt_epi16(kZero,step2[23]);
-        __m256i s3_24_0 = _mm256_cmpgt_epi16(kZero,step2[24]);
-        __m256i s3_25_0 = _mm256_cmpgt_epi16(kZero,step2[25]);
-        __m256i s3_26_0 = _mm256_cmpgt_epi16(kZero,step2[26]);
-        __m256i s3_27_0 = _mm256_cmpgt_epi16(kZero,step2[27]);
-        __m256i s3_28_0 = _mm256_cmpgt_epi16(kZero,step1[28]);
-        __m256i s3_29_0 = _mm256_cmpgt_epi16(kZero,step1[29]);
-        __m256i s3_30_0 = _mm256_cmpgt_epi16(kZero,step1[30]);
-        __m256i s3_31_0 = _mm256_cmpgt_epi16(kZero,step1[31]);
-
-        step2[ 0] = _mm256_sub_epi16(step2[ 0], s3_00_0);
-        step2[ 1] = _mm256_sub_epi16(step2[ 1], s3_01_0);
-        step2[ 2] = _mm256_sub_epi16(step2[ 2], s3_02_0);
-        step2[ 3] = _mm256_sub_epi16(step2[ 3], s3_03_0);
-        step2[ 4] = _mm256_sub_epi16(step2[ 4], s3_04_0);
-        step2[ 5] = _mm256_sub_epi16(step2[ 5], s3_05_0);
-        step2[ 6] = _mm256_sub_epi16(step2[ 6], s3_06_0);
-        step2[ 7] = _mm256_sub_epi16(step2[ 7], s3_07_0);
-        step2[ 8] = _mm256_sub_epi16(step2[ 8], s2_08_0);
-        step2[ 9] = _mm256_sub_epi16(step2[ 9], s2_09_0);
+        __m256i s3_00_0 = _mm256_cmpgt_epi16(kZero, step2[0]);
+        __m256i s3_01_0 = _mm256_cmpgt_epi16(kZero, step2[1]);
+        __m256i s3_02_0 = _mm256_cmpgt_epi16(kZero, step2[2]);
+        __m256i s3_03_0 = _mm256_cmpgt_epi16(kZero, step2[3]);
+        __m256i s3_04_0 = _mm256_cmpgt_epi16(kZero, step2[4]);
+        __m256i s3_05_0 = _mm256_cmpgt_epi16(kZero, step2[5]);
+        __m256i s3_06_0 = _mm256_cmpgt_epi16(kZero, step2[6]);
+        __m256i s3_07_0 = _mm256_cmpgt_epi16(kZero, step2[7]);
+        __m256i s2_08_0 = _mm256_cmpgt_epi16(kZero, step2[8]);
+        __m256i s2_09_0 = _mm256_cmpgt_epi16(kZero, step2[9]);
+        __m256i s3_10_0 = _mm256_cmpgt_epi16(kZero, step2[10]);
+        __m256i s3_11_0 = _mm256_cmpgt_epi16(kZero, step2[11]);
+        __m256i s3_12_0 = _mm256_cmpgt_epi16(kZero, step2[12]);
+        __m256i s3_13_0 = _mm256_cmpgt_epi16(kZero, step2[13]);
+        __m256i s2_14_0 = _mm256_cmpgt_epi16(kZero, step2[14]);
+        __m256i s2_15_0 = _mm256_cmpgt_epi16(kZero, step2[15]);
+        __m256i s3_16_0 = _mm256_cmpgt_epi16(kZero, step1[16]);
+        __m256i s3_17_0 = _mm256_cmpgt_epi16(kZero, step1[17]);
+        __m256i s3_18_0 = _mm256_cmpgt_epi16(kZero, step1[18]);
+        __m256i s3_19_0 = _mm256_cmpgt_epi16(kZero, step1[19]);
+        __m256i s3_20_0 = _mm256_cmpgt_epi16(kZero, step2[20]);
+        __m256i s3_21_0 = _mm256_cmpgt_epi16(kZero, step2[21]);
+        __m256i s3_22_0 = _mm256_cmpgt_epi16(kZero, step2[22]);
+        __m256i s3_23_0 = _mm256_cmpgt_epi16(kZero, step2[23]);
+        __m256i s3_24_0 = _mm256_cmpgt_epi16(kZero, step2[24]);
+        __m256i s3_25_0 = _mm256_cmpgt_epi16(kZero, step2[25]);
+        __m256i s3_26_0 = _mm256_cmpgt_epi16(kZero, step2[26]);
+        __m256i s3_27_0 = _mm256_cmpgt_epi16(kZero, step2[27]);
+        __m256i s3_28_0 = _mm256_cmpgt_epi16(kZero, step1[28]);
+        __m256i s3_29_0 = _mm256_cmpgt_epi16(kZero, step1[29]);
+        __m256i s3_30_0 = _mm256_cmpgt_epi16(kZero, step1[30]);
+        __m256i s3_31_0 = _mm256_cmpgt_epi16(kZero, step1[31]);
+
+        step2[0] = _mm256_sub_epi16(step2[0], s3_00_0);
+        step2[1] = _mm256_sub_epi16(step2[1], s3_01_0);
+        step2[2] = _mm256_sub_epi16(step2[2], s3_02_0);
+        step2[3] = _mm256_sub_epi16(step2[3], s3_03_0);
+        step2[4] = _mm256_sub_epi16(step2[4], s3_04_0);
+        step2[5] = _mm256_sub_epi16(step2[5], s3_05_0);
+        step2[6] = _mm256_sub_epi16(step2[6], s3_06_0);
+        step2[7] = _mm256_sub_epi16(step2[7], s3_07_0);
+        step2[8] = _mm256_sub_epi16(step2[8], s2_08_0);
+        step2[9] = _mm256_sub_epi16(step2[9], s2_09_0);
         step2[10] = _mm256_sub_epi16(step2[10], s3_10_0);
         step2[11] = _mm256_sub_epi16(step2[11], s3_11_0);
         step2[12] = _mm256_sub_epi16(step2[12], s3_12_0);
@@ -466,16 +507,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         step1[30] = _mm256_sub_epi16(step1[30], s3_30_0);
         step1[31] = _mm256_sub_epi16(step1[31], s3_31_0);
 
-        step2[ 0] = _mm256_add_epi16(step2[ 0], kOne);
-        step2[ 1] = _mm256_add_epi16(step2[ 1], kOne);
-        step2[ 2] = _mm256_add_epi16(step2[ 2], kOne);
-        step2[ 3] = _mm256_add_epi16(step2[ 3], kOne);
-        step2[ 4] = _mm256_add_epi16(step2[ 4], kOne);
-        step2[ 5] = _mm256_add_epi16(step2[ 5], kOne);
-        step2[ 6] = _mm256_add_epi16(step2[ 6], kOne);
-        step2[ 7] = _mm256_add_epi16(step2[ 7], kOne);
-        step2[ 8] = _mm256_add_epi16(step2[ 8], kOne);
-        step2[ 9] = _mm256_add_epi16(step2[ 9], kOne);
+        step2[0] = _mm256_add_epi16(step2[0], kOne);
+        step2[1] = _mm256_add_epi16(step2[1], kOne);
+        step2[2] = _mm256_add_epi16(step2[2], kOne);
+        step2[3] = _mm256_add_epi16(step2[3], kOne);
+        step2[4] = _mm256_add_epi16(step2[4], kOne);
+        step2[5] = _mm256_add_epi16(step2[5], kOne);
+        step2[6] = _mm256_add_epi16(step2[6], kOne);
+        step2[7] = _mm256_add_epi16(step2[7], kOne);
+        step2[8] = _mm256_add_epi16(step2[8], kOne);
+        step2[9] = _mm256_add_epi16(step2[9], kOne);
         step2[10] = _mm256_add_epi16(step2[10], kOne);
         step2[11] = _mm256_add_epi16(step2[11], kOne);
         step2[12] = _mm256_add_epi16(step2[12], kOne);
@@ -499,16 +540,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         step1[30] = _mm256_add_epi16(step1[30], kOne);
         step1[31] = _mm256_add_epi16(step1[31], kOne);
 
-        step2[ 0] = _mm256_srai_epi16(step2[ 0], 2);
-        step2[ 1] = _mm256_srai_epi16(step2[ 1], 2);
-        step2[ 2] = _mm256_srai_epi16(step2[ 2], 2);
-        step2[ 3] = _mm256_srai_epi16(step2[ 3], 2);
-        step2[ 4] = _mm256_srai_epi16(step2[ 4], 2);
-        step2[ 5] = _mm256_srai_epi16(step2[ 5], 2);
-        step2[ 6] = _mm256_srai_epi16(step2[ 6], 2);
-        step2[ 7] = _mm256_srai_epi16(step2[ 7], 2);
-        step2[ 8] = _mm256_srai_epi16(step2[ 8], 2);
-        step2[ 9] = _mm256_srai_epi16(step2[ 9], 2);
+        step2[0] = _mm256_srai_epi16(step2[0], 2);
+        step2[1] = _mm256_srai_epi16(step2[1], 2);
+        step2[2] = _mm256_srai_epi16(step2[2], 2);
+        step2[3] = _mm256_srai_epi16(step2[3], 2);
+        step2[4] = _mm256_srai_epi16(step2[4], 2);
+        step2[5] = _mm256_srai_epi16(step2[5], 2);
+        step2[6] = _mm256_srai_epi16(step2[6], 2);
+        step2[7] = _mm256_srai_epi16(step2[7], 2);
+        step2[8] = _mm256_srai_epi16(step2[8], 2);
+        step2[9] = _mm256_srai_epi16(step2[9], 2);
         step2[10] = _mm256_srai_epi16(step2[10], 2);
         step2[11] = _mm256_srai_epi16(step2[11], 2);
         step2[12] = _mm256_srai_epi16(step2[12], 2);
@@ -537,616 +578,796 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
 #if FDCT32x32_HIGH_PRECISION
       if (pass == 0) {
 #endif
-      // Stage 3
-      {
-        step3[0] = _mm256_add_epi16(step2[(8 - 1)], step2[0]);
-        step3[1] = _mm256_add_epi16(step2[(8 - 2)], step2[1]);
-        step3[2] = _mm256_add_epi16(step2[(8 - 3)], step2[2]);
-        step3[3] = _mm256_add_epi16(step2[(8 - 4)], step2[3]);
-        step3[4] = _mm256_sub_epi16(step2[(8 - 5)], step2[4]);
-        step3[5] = _mm256_sub_epi16(step2[(8 - 6)], step2[5]);
-        step3[6] = _mm256_sub_epi16(step2[(8 - 7)], step2[6]);
-        step3[7] = _mm256_sub_epi16(step2[(8 - 8)], step2[7]);
-      }
-      {
-        const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]);
-        const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]);
-        const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]);
-        const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]);
-        const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16);
-        const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16);
-        const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16);
-        const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16);
-        const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16);
-        const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16);
-        const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16);
-        const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_10_6 = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS);
-        const __m256i s3_10_7 = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS);
-        const __m256i s3_11_6 = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS);
-        const __m256i s3_11_7 = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS);
-        const __m256i s3_12_6 = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS);
-        const __m256i s3_12_7 = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS);
-        const __m256i s3_13_6 = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS);
-        const __m256i s3_13_7 = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS);
-        // Combine
-        step3[10] = _mm256_packs_epi32(s3_10_6, s3_10_7);
-        step3[11] = _mm256_packs_epi32(s3_11_6, s3_11_7);
-        step3[12] = _mm256_packs_epi32(s3_12_6, s3_12_7);
-        step3[13] = _mm256_packs_epi32(s3_13_6, s3_13_7);
-      }
-      {
-        step3[16] = _mm256_add_epi16(step2[23], step1[16]);
-        step3[17] = _mm256_add_epi16(step2[22], step1[17]);
-        step3[18] = _mm256_add_epi16(step2[21], step1[18]);
-        step3[19] = _mm256_add_epi16(step2[20], step1[19]);
-        step3[20] = _mm256_sub_epi16(step1[19], step2[20]);
-        step3[21] = _mm256_sub_epi16(step1[18], step2[21]);
-        step3[22] = _mm256_sub_epi16(step1[17], step2[22]);
-        step3[23] = _mm256_sub_epi16(step1[16], step2[23]);
-        step3[24] = _mm256_sub_epi16(step1[31], step2[24]);
-        step3[25] = _mm256_sub_epi16(step1[30], step2[25]);
-        step3[26] = _mm256_sub_epi16(step1[29], step2[26]);
-        step3[27] = _mm256_sub_epi16(step1[28], step2[27]);
-        step3[28] = _mm256_add_epi16(step2[27], step1[28]);
-        step3[29] = _mm256_add_epi16(step2[26], step1[29]);
-        step3[30] = _mm256_add_epi16(step2[25], step1[30]);
-        step3[31] = _mm256_add_epi16(step2[24], step1[31]);
-      }
+        // Stage 3
+        {
+          step3[0] = _mm256_add_epi16(step2[(8 - 1)], step2[0]);
+          step3[1] = _mm256_add_epi16(step2[(8 - 2)], step2[1]);
+          step3[2] = _mm256_add_epi16(step2[(8 - 3)], step2[2]);
+          step3[3] = _mm256_add_epi16(step2[(8 - 4)], step2[3]);
+          step3[4] = _mm256_sub_epi16(step2[(8 - 5)], step2[4]);
+          step3[5] = _mm256_sub_epi16(step2[(8 - 6)], step2[5]);
+          step3[6] = _mm256_sub_epi16(step2[(8 - 7)], step2[6]);
+          step3[7] = _mm256_sub_epi16(step2[(8 - 8)], step2[7]);
+        }
+        {
+          const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]);
+          const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]);
+          const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]);
+          const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]);
+          const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16);
+          const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16);
+          const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16);
+          const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16);
+          const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16);
+          const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16);
+          const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16);
+          const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16);
+          // dct_const_round_shift
+          const __m256i s3_10_4 =
+              _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_10_5 =
+              _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_11_4 =
+              _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_11_5 =
+              _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_12_4 =
+              _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_12_5 =
+              _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_13_4 =
+              _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_13_5 =
+              _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_10_6 = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS);
+          const __m256i s3_10_7 = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS);
+          const __m256i s3_11_6 = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS);
+          const __m256i s3_11_7 = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS);
+          const __m256i s3_12_6 = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS);
+          const __m256i s3_12_7 = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS);
+          const __m256i s3_13_6 = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS);
+          const __m256i s3_13_7 = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS);
+          // Combine
+          step3[10] = _mm256_packs_epi32(s3_10_6, s3_10_7);
+          step3[11] = _mm256_packs_epi32(s3_11_6, s3_11_7);
+          step3[12] = _mm256_packs_epi32(s3_12_6, s3_12_7);
+          step3[13] = _mm256_packs_epi32(s3_13_6, s3_13_7);
+        }
+        {
+          step3[16] = _mm256_add_epi16(step2[23], step1[16]);
+          step3[17] = _mm256_add_epi16(step2[22], step1[17]);
+          step3[18] = _mm256_add_epi16(step2[21], step1[18]);
+          step3[19] = _mm256_add_epi16(step2[20], step1[19]);
+          step3[20] = _mm256_sub_epi16(step1[19], step2[20]);
+          step3[21] = _mm256_sub_epi16(step1[18], step2[21]);
+          step3[22] = _mm256_sub_epi16(step1[17], step2[22]);
+          step3[23] = _mm256_sub_epi16(step1[16], step2[23]);
+          step3[24] = _mm256_sub_epi16(step1[31], step2[24]);
+          step3[25] = _mm256_sub_epi16(step1[30], step2[25]);
+          step3[26] = _mm256_sub_epi16(step1[29], step2[26]);
+          step3[27] = _mm256_sub_epi16(step1[28], step2[27]);
+          step3[28] = _mm256_add_epi16(step2[27], step1[28]);
+          step3[29] = _mm256_add_epi16(step2[26], step1[29]);
+          step3[30] = _mm256_add_epi16(step2[25], step1[30]);
+          step3[31] = _mm256_add_epi16(step2[24], step1[31]);
+        }
 
-      // Stage 4
-      {
-        step1[ 0] = _mm256_add_epi16(step3[ 3], step3[ 0]);
-        step1[ 1] = _mm256_add_epi16(step3[ 2], step3[ 1]);
-        step1[ 2] = _mm256_sub_epi16(step3[ 1], step3[ 2]);
-        step1[ 3] = _mm256_sub_epi16(step3[ 0], step3[ 3]);
-        step1[ 8] = _mm256_add_epi16(step3[11], step2[ 8]);
-        step1[ 9] = _mm256_add_epi16(step3[10], step2[ 9]);
-        step1[10] = _mm256_sub_epi16(step2[ 9], step3[10]);
-        step1[11] = _mm256_sub_epi16(step2[ 8], step3[11]);
-        step1[12] = _mm256_sub_epi16(step2[15], step3[12]);
-        step1[13] = _mm256_sub_epi16(step2[14], step3[13]);
-        step1[14] = _mm256_add_epi16(step3[13], step2[14]);
-        step1[15] = _mm256_add_epi16(step3[12], step2[15]);
-      }
-      {
-        const __m256i s1_05_0 = _mm256_unpacklo_epi16(step3[6], step3[5]);
-        const __m256i s1_05_1 = _mm256_unpackhi_epi16(step3[6], step3[5]);
-        const __m256i s1_05_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_m16);
-        const __m256i s1_05_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_m16);
-        const __m256i s1_06_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_p16);
-        const __m256i s1_06_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m256i s1_05_4 = _mm256_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_05_5 = _mm256_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_06_4 = _mm256_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_06_5 = _mm256_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_05_6 = _mm256_srai_epi32(s1_05_4, DCT_CONST_BITS);
-        const __m256i s1_05_7 = _mm256_srai_epi32(s1_05_5, DCT_CONST_BITS);
-        const __m256i s1_06_6 = _mm256_srai_epi32(s1_06_4, DCT_CONST_BITS);
-        const __m256i s1_06_7 = _mm256_srai_epi32(s1_06_5, DCT_CONST_BITS);
-        // Combine
-        step1[5] = _mm256_packs_epi32(s1_05_6, s1_05_7);
-        step1[6] = _mm256_packs_epi32(s1_06_6, s1_06_7);
-      }
-      {
-        const __m256i s1_18_0 = _mm256_unpacklo_epi16(step3[18], step3[29]);
-        const __m256i s1_18_1 = _mm256_unpackhi_epi16(step3[18], step3[29]);
-        const __m256i s1_19_0 = _mm256_unpacklo_epi16(step3[19], step3[28]);
-        const __m256i s1_19_1 = _mm256_unpackhi_epi16(step3[19], step3[28]);
-        const __m256i s1_20_0 = _mm256_unpacklo_epi16(step3[20], step3[27]);
-        const __m256i s1_20_1 = _mm256_unpackhi_epi16(step3[20], step3[27]);
-        const __m256i s1_21_0 = _mm256_unpacklo_epi16(step3[21], step3[26]);
-        const __m256i s1_21_1 = _mm256_unpackhi_epi16(step3[21], step3[26]);
-        const __m256i s1_18_2 = _mm256_madd_epi16(s1_18_0, k__cospi_m08_p24);
-        const __m256i s1_18_3 = _mm256_madd_epi16(s1_18_1, k__cospi_m08_p24);
-        const __m256i s1_19_2 = _mm256_madd_epi16(s1_19_0, k__cospi_m08_p24);
-        const __m256i s1_19_3 = _mm256_madd_epi16(s1_19_1, k__cospi_m08_p24);
-        const __m256i s1_20_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m24_m08);
-        const __m256i s1_20_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m24_m08);
-        const __m256i s1_21_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m24_m08);
-        const __m256i s1_21_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m24_m08);
-        const __m256i s1_26_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m08_p24);
-        const __m256i s1_26_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m08_p24);
-        const __m256i s1_27_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m08_p24);
-        const __m256i s1_27_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m08_p24);
-        const __m256i s1_28_2 = _mm256_madd_epi16(s1_19_0, k__cospi_p24_p08);
-        const __m256i s1_28_3 = _mm256_madd_epi16(s1_19_1, k__cospi_p24_p08);
-        const __m256i s1_29_2 = _mm256_madd_epi16(s1_18_0, k__cospi_p24_p08);
-        const __m256i s1_29_3 = _mm256_madd_epi16(s1_18_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m256i s1_18_4 = _mm256_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_18_5 = _mm256_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_19_4 = _mm256_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_19_5 = _mm256_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_20_4 = _mm256_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_20_5 = _mm256_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_21_4 = _mm256_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_21_5 = _mm256_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_26_4 = _mm256_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_26_5 = _mm256_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_27_4 = _mm256_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_27_5 = _mm256_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_28_4 = _mm256_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_28_5 = _mm256_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_29_4 = _mm256_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_29_5 = _mm256_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_18_6 = _mm256_srai_epi32(s1_18_4, DCT_CONST_BITS);
-        const __m256i s1_18_7 = _mm256_srai_epi32(s1_18_5, DCT_CONST_BITS);
-        const __m256i s1_19_6 = _mm256_srai_epi32(s1_19_4, DCT_CONST_BITS);
-        const __m256i s1_19_7 = _mm256_srai_epi32(s1_19_5, DCT_CONST_BITS);
-        const __m256i s1_20_6 = _mm256_srai_epi32(s1_20_4, DCT_CONST_BITS);
-        const __m256i s1_20_7 = _mm256_srai_epi32(s1_20_5, DCT_CONST_BITS);
-        const __m256i s1_21_6 = _mm256_srai_epi32(s1_21_4, DCT_CONST_BITS);
-        const __m256i s1_21_7 = _mm256_srai_epi32(s1_21_5, DCT_CONST_BITS);
-        const __m256i s1_26_6 = _mm256_srai_epi32(s1_26_4, DCT_CONST_BITS);
-        const __m256i s1_26_7 = _mm256_srai_epi32(s1_26_5, DCT_CONST_BITS);
-        const __m256i s1_27_6 = _mm256_srai_epi32(s1_27_4, DCT_CONST_BITS);
-        const __m256i s1_27_7 = _mm256_srai_epi32(s1_27_5, DCT_CONST_BITS);
-        const __m256i s1_28_6 = _mm256_srai_epi32(s1_28_4, DCT_CONST_BITS);
-        const __m256i s1_28_7 = _mm256_srai_epi32(s1_28_5, DCT_CONST_BITS);
-        const __m256i s1_29_6 = _mm256_srai_epi32(s1_29_4, DCT_CONST_BITS);
-        const __m256i s1_29_7 = _mm256_srai_epi32(s1_29_5, DCT_CONST_BITS);
-        // Combine
-        step1[18] = _mm256_packs_epi32(s1_18_6, s1_18_7);
-        step1[19] = _mm256_packs_epi32(s1_19_6, s1_19_7);
-        step1[20] = _mm256_packs_epi32(s1_20_6, s1_20_7);
-        step1[21] = _mm256_packs_epi32(s1_21_6, s1_21_7);
-        step1[26] = _mm256_packs_epi32(s1_26_6, s1_26_7);
-        step1[27] = _mm256_packs_epi32(s1_27_6, s1_27_7);
-        step1[28] = _mm256_packs_epi32(s1_28_6, s1_28_7);
-        step1[29] = _mm256_packs_epi32(s1_29_6, s1_29_7);
-      }
-      // Stage 5
-      {
-        step2[4] = _mm256_add_epi16(step1[5], step3[4]);
-        step2[5] = _mm256_sub_epi16(step3[4], step1[5]);
-        step2[6] = _mm256_sub_epi16(step3[7], step1[6]);
-        step2[7] = _mm256_add_epi16(step1[6], step3[7]);
-      }
-      {
-        const __m256i out_00_0 = _mm256_unpacklo_epi16(step1[0], step1[1]);
-        const __m256i out_00_1 = _mm256_unpackhi_epi16(step1[0], step1[1]);
-        const __m256i out_08_0 = _mm256_unpacklo_epi16(step1[2], step1[3]);
-        const __m256i out_08_1 = _mm256_unpackhi_epi16(step1[2], step1[3]);
-        const __m256i out_00_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_p16);
-        const __m256i out_00_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_p16);
-        const __m256i out_16_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_m16);
-        const __m256i out_16_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_m16);
-        const __m256i out_08_2 = _mm256_madd_epi16(out_08_0, k__cospi_p24_p08);
-        const __m256i out_08_3 = _mm256_madd_epi16(out_08_1, k__cospi_p24_p08);
-        const __m256i out_24_2 = _mm256_madd_epi16(out_08_0, k__cospi_m08_p24);
-        const __m256i out_24_3 = _mm256_madd_epi16(out_08_1, k__cospi_m08_p24);
-        // dct_const_round_shift
-        const __m256i out_00_4 = _mm256_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_00_5 = _mm256_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_16_4 = _mm256_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_16_5 = _mm256_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_08_4 = _mm256_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_08_5 = _mm256_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_24_4 = _mm256_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_24_5 = _mm256_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_00_6 = _mm256_srai_epi32(out_00_4, DCT_CONST_BITS);
-        const __m256i out_00_7 = _mm256_srai_epi32(out_00_5, DCT_CONST_BITS);
-        const __m256i out_16_6 = _mm256_srai_epi32(out_16_4, DCT_CONST_BITS);
-        const __m256i out_16_7 = _mm256_srai_epi32(out_16_5, DCT_CONST_BITS);
-        const __m256i out_08_6 = _mm256_srai_epi32(out_08_4, DCT_CONST_BITS);
-        const __m256i out_08_7 = _mm256_srai_epi32(out_08_5, DCT_CONST_BITS);
-        const __m256i out_24_6 = _mm256_srai_epi32(out_24_4, DCT_CONST_BITS);
-        const __m256i out_24_7 = _mm256_srai_epi32(out_24_5, DCT_CONST_BITS);
-        // Combine
-        out[ 0] = _mm256_packs_epi32(out_00_6, out_00_7);
-        out[16] = _mm256_packs_epi32(out_16_6, out_16_7);
-        out[ 8] = _mm256_packs_epi32(out_08_6, out_08_7);
-        out[24] = _mm256_packs_epi32(out_24_6, out_24_7);
-      }
-      {
-        const __m256i s2_09_0 = _mm256_unpacklo_epi16(step1[ 9], step1[14]);
-        const __m256i s2_09_1 = _mm256_unpackhi_epi16(step1[ 9], step1[14]);
-        const __m256i s2_10_0 = _mm256_unpacklo_epi16(step1[10], step1[13]);
-        const __m256i s2_10_1 = _mm256_unpackhi_epi16(step1[10], step1[13]);
-        const __m256i s2_09_2 = _mm256_madd_epi16(s2_09_0, k__cospi_m08_p24);
-        const __m256i s2_09_3 = _mm256_madd_epi16(s2_09_1, k__cospi_m08_p24);
-        const __m256i s2_10_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m24_m08);
-        const __m256i s2_10_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m24_m08);
-        const __m256i s2_13_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m08_p24);
-        const __m256i s2_13_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m08_p24);
-        const __m256i s2_14_2 = _mm256_madd_epi16(s2_09_0, k__cospi_p24_p08);
-        const __m256i s2_14_3 = _mm256_madd_epi16(s2_09_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m256i s2_09_4 = _mm256_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_09_5 = _mm256_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_10_4 = _mm256_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_10_5 = _mm256_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_13_4 = _mm256_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_13_5 = _mm256_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_14_4 = _mm256_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_14_5 = _mm256_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_09_6 = _mm256_srai_epi32(s2_09_4, DCT_CONST_BITS);
-        const __m256i s2_09_7 = _mm256_srai_epi32(s2_09_5, DCT_CONST_BITS);
-        const __m256i s2_10_6 = _mm256_srai_epi32(s2_10_4, DCT_CONST_BITS);
-        const __m256i s2_10_7 = _mm256_srai_epi32(s2_10_5, DCT_CONST_BITS);
-        const __m256i s2_13_6 = _mm256_srai_epi32(s2_13_4, DCT_CONST_BITS);
-        const __m256i s2_13_7 = _mm256_srai_epi32(s2_13_5, DCT_CONST_BITS);
-        const __m256i s2_14_6 = _mm256_srai_epi32(s2_14_4, DCT_CONST_BITS);
-        const __m256i s2_14_7 = _mm256_srai_epi32(s2_14_5, DCT_CONST_BITS);
-        // Combine
-        step2[ 9] = _mm256_packs_epi32(s2_09_6, s2_09_7);
-        step2[10] = _mm256_packs_epi32(s2_10_6, s2_10_7);
-        step2[13] = _mm256_packs_epi32(s2_13_6, s2_13_7);
-        step2[14] = _mm256_packs_epi32(s2_14_6, s2_14_7);
-      }
-      {
-        step2[16] = _mm256_add_epi16(step1[19], step3[16]);
-        step2[17] = _mm256_add_epi16(step1[18], step3[17]);
-        step2[18] = _mm256_sub_epi16(step3[17], step1[18]);
-        step2[19] = _mm256_sub_epi16(step3[16], step1[19]);
-        step2[20] = _mm256_sub_epi16(step3[23], step1[20]);
-        step2[21] = _mm256_sub_epi16(step3[22], step1[21]);
-        step2[22] = _mm256_add_epi16(step1[21], step3[22]);
-        step2[23] = _mm256_add_epi16(step1[20], step3[23]);
-        step2[24] = _mm256_add_epi16(step1[27], step3[24]);
-        step2[25] = _mm256_add_epi16(step1[26], step3[25]);
-        step2[26] = _mm256_sub_epi16(step3[25], step1[26]);
-        step2[27] = _mm256_sub_epi16(step3[24], step1[27]);
-        step2[28] = _mm256_sub_epi16(step3[31], step1[28]);
-        step2[29] = _mm256_sub_epi16(step3[30], step1[29]);
-        step2[30] = _mm256_add_epi16(step1[29], step3[30]);
-        step2[31] = _mm256_add_epi16(step1[28], step3[31]);
-      }
-      // Stage 6
-      {
-        const __m256i out_04_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
-        const __m256i out_04_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
-        const __m256i out_20_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
-        const __m256i out_20_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
-        const __m256i out_12_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
-        const __m256i out_12_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
-        const __m256i out_28_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
-        const __m256i out_28_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
-        const __m256i out_04_2 = _mm256_madd_epi16(out_04_0, k__cospi_p28_p04);
-        const __m256i out_04_3 = _mm256_madd_epi16(out_04_1, k__cospi_p28_p04);
-        const __m256i out_20_2 = _mm256_madd_epi16(out_20_0, k__cospi_p12_p20);
-        const __m256i out_20_3 = _mm256_madd_epi16(out_20_1, k__cospi_p12_p20);
-        const __m256i out_12_2 = _mm256_madd_epi16(out_12_0, k__cospi_m20_p12);
-        const __m256i out_12_3 = _mm256_madd_epi16(out_12_1, k__cospi_m20_p12);
-        const __m256i out_28_2 = _mm256_madd_epi16(out_28_0, k__cospi_m04_p28);
-        const __m256i out_28_3 = _mm256_madd_epi16(out_28_1, k__cospi_m04_p28);
-        // dct_const_round_shift
-        const __m256i out_04_4 = _mm256_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_04_5 = _mm256_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_20_4 = _mm256_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_20_5 = _mm256_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_12_4 = _mm256_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_12_5 = _mm256_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_28_4 = _mm256_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_28_5 = _mm256_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_04_6 = _mm256_srai_epi32(out_04_4, DCT_CONST_BITS);
-        const __m256i out_04_7 = _mm256_srai_epi32(out_04_5, DCT_CONST_BITS);
-        const __m256i out_20_6 = _mm256_srai_epi32(out_20_4, DCT_CONST_BITS);
-        const __m256i out_20_7 = _mm256_srai_epi32(out_20_5, DCT_CONST_BITS);
-        const __m256i out_12_6 = _mm256_srai_epi32(out_12_4, DCT_CONST_BITS);
-        const __m256i out_12_7 = _mm256_srai_epi32(out_12_5, DCT_CONST_BITS);
-        const __m256i out_28_6 = _mm256_srai_epi32(out_28_4, DCT_CONST_BITS);
-        const __m256i out_28_7 = _mm256_srai_epi32(out_28_5, DCT_CONST_BITS);
-        // Combine
-        out[ 4] = _mm256_packs_epi32(out_04_6, out_04_7);
-        out[20] = _mm256_packs_epi32(out_20_6, out_20_7);
-        out[12] = _mm256_packs_epi32(out_12_6, out_12_7);
-        out[28] = _mm256_packs_epi32(out_28_6, out_28_7);
-      }
-      {
-        step3[ 8] = _mm256_add_epi16(step2[ 9], step1[ 8]);
-        step3[ 9] = _mm256_sub_epi16(step1[ 8], step2[ 9]);
-        step3[10] = _mm256_sub_epi16(step1[11], step2[10]);
-        step3[11] = _mm256_add_epi16(step2[10], step1[11]);
-        step3[12] = _mm256_add_epi16(step2[13], step1[12]);
-        step3[13] = _mm256_sub_epi16(step1[12], step2[13]);
-        step3[14] = _mm256_sub_epi16(step1[15], step2[14]);
-        step3[15] = _mm256_add_epi16(step2[14], step1[15]);
-      }
-      {
-        const __m256i s3_17_0 = _mm256_unpacklo_epi16(step2[17], step2[30]);
-        const __m256i s3_17_1 = _mm256_unpackhi_epi16(step2[17], step2[30]);
-        const __m256i s3_18_0 = _mm256_unpacklo_epi16(step2[18], step2[29]);
-        const __m256i s3_18_1 = _mm256_unpackhi_epi16(step2[18], step2[29]);
-        const __m256i s3_21_0 = _mm256_unpacklo_epi16(step2[21], step2[26]);
-        const __m256i s3_21_1 = _mm256_unpackhi_epi16(step2[21], step2[26]);
-        const __m256i s3_22_0 = _mm256_unpacklo_epi16(step2[22], step2[25]);
-        const __m256i s3_22_1 = _mm256_unpackhi_epi16(step2[22], step2[25]);
-        const __m256i s3_17_2 = _mm256_madd_epi16(s3_17_0, k__cospi_m04_p28);
-        const __m256i s3_17_3 = _mm256_madd_epi16(s3_17_1, k__cospi_m04_p28);
-        const __m256i s3_18_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m28_m04);
-        const __m256i s3_18_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m28_m04);
-        const __m256i s3_21_2 = _mm256_madd_epi16(s3_21_0, k__cospi_m20_p12);
-        const __m256i s3_21_3 = _mm256_madd_epi16(s3_21_1, k__cospi_m20_p12);
-        const __m256i s3_22_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m12_m20);
-        const __m256i s3_22_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m12_m20);
-        const __m256i s3_25_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m20_p12);
-        const __m256i s3_25_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m20_p12);
-        const __m256i s3_26_2 = _mm256_madd_epi16(s3_21_0, k__cospi_p12_p20);
-        const __m256i s3_26_3 = _mm256_madd_epi16(s3_21_1, k__cospi_p12_p20);
-        const __m256i s3_29_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m04_p28);
-        const __m256i s3_29_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m04_p28);
-        const __m256i s3_30_2 = _mm256_madd_epi16(s3_17_0, k__cospi_p28_p04);
-        const __m256i s3_30_3 = _mm256_madd_epi16(s3_17_1, k__cospi_p28_p04);
-        // dct_const_round_shift
-        const __m256i s3_17_4 = _mm256_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_17_5 = _mm256_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_18_4 = _mm256_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_18_5 = _mm256_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_21_4 = _mm256_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_21_5 = _mm256_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_22_4 = _mm256_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_22_5 = _mm256_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_17_6 = _mm256_srai_epi32(s3_17_4, DCT_CONST_BITS);
-        const __m256i s3_17_7 = _mm256_srai_epi32(s3_17_5, DCT_CONST_BITS);
-        const __m256i s3_18_6 = _mm256_srai_epi32(s3_18_4, DCT_CONST_BITS);
-        const __m256i s3_18_7 = _mm256_srai_epi32(s3_18_5, DCT_CONST_BITS);
-        const __m256i s3_21_6 = _mm256_srai_epi32(s3_21_4, DCT_CONST_BITS);
-        const __m256i s3_21_7 = _mm256_srai_epi32(s3_21_5, DCT_CONST_BITS);
-        const __m256i s3_22_6 = _mm256_srai_epi32(s3_22_4, DCT_CONST_BITS);
-        const __m256i s3_22_7 = _mm256_srai_epi32(s3_22_5, DCT_CONST_BITS);
-        const __m256i s3_25_4 = _mm256_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_25_5 = _mm256_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_26_4 = _mm256_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_26_5 = _mm256_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_29_4 = _mm256_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_29_5 = _mm256_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_30_4 = _mm256_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_30_5 = _mm256_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_25_6 = _mm256_srai_epi32(s3_25_4, DCT_CONST_BITS);
-        const __m256i s3_25_7 = _mm256_srai_epi32(s3_25_5, DCT_CONST_BITS);
-        const __m256i s3_26_6 = _mm256_srai_epi32(s3_26_4, DCT_CONST_BITS);
-        const __m256i s3_26_7 = _mm256_srai_epi32(s3_26_5, DCT_CONST_BITS);
-        const __m256i s3_29_6 = _mm256_srai_epi32(s3_29_4, DCT_CONST_BITS);
-        const __m256i s3_29_7 = _mm256_srai_epi32(s3_29_5, DCT_CONST_BITS);
-        const __m256i s3_30_6 = _mm256_srai_epi32(s3_30_4, DCT_CONST_BITS);
-        const __m256i s3_30_7 = _mm256_srai_epi32(s3_30_5, DCT_CONST_BITS);
-        // Combine
-        step3[17] = _mm256_packs_epi32(s3_17_6, s3_17_7);
-        step3[18] = _mm256_packs_epi32(s3_18_6, s3_18_7);
-        step3[21] = _mm256_packs_epi32(s3_21_6, s3_21_7);
-        step3[22] = _mm256_packs_epi32(s3_22_6, s3_22_7);
-        // Combine
-        step3[25] = _mm256_packs_epi32(s3_25_6, s3_25_7);
-        step3[26] = _mm256_packs_epi32(s3_26_6, s3_26_7);
-        step3[29] = _mm256_packs_epi32(s3_29_6, s3_29_7);
-        step3[30] = _mm256_packs_epi32(s3_30_6, s3_30_7);
-      }
-      // Stage 7
-      {
-        const __m256i out_02_0 = _mm256_unpacklo_epi16(step3[ 8], step3[15]);
-        const __m256i out_02_1 = _mm256_unpackhi_epi16(step3[ 8], step3[15]);
-        const __m256i out_18_0 = _mm256_unpacklo_epi16(step3[ 9], step3[14]);
-        const __m256i out_18_1 = _mm256_unpackhi_epi16(step3[ 9], step3[14]);
-        const __m256i out_10_0 = _mm256_unpacklo_epi16(step3[10], step3[13]);
-        const __m256i out_10_1 = _mm256_unpackhi_epi16(step3[10], step3[13]);
-        const __m256i out_26_0 = _mm256_unpacklo_epi16(step3[11], step3[12]);
-        const __m256i out_26_1 = _mm256_unpackhi_epi16(step3[11], step3[12]);
-        const __m256i out_02_2 = _mm256_madd_epi16(out_02_0, k__cospi_p30_p02);
-        const __m256i out_02_3 = _mm256_madd_epi16(out_02_1, k__cospi_p30_p02);
-        const __m256i out_18_2 = _mm256_madd_epi16(out_18_0, k__cospi_p14_p18);
-        const __m256i out_18_3 = _mm256_madd_epi16(out_18_1, k__cospi_p14_p18);
-        const __m256i out_10_2 = _mm256_madd_epi16(out_10_0, k__cospi_p22_p10);
-        const __m256i out_10_3 = _mm256_madd_epi16(out_10_1, k__cospi_p22_p10);
-        const __m256i out_26_2 = _mm256_madd_epi16(out_26_0, k__cospi_p06_p26);
-        const __m256i out_26_3 = _mm256_madd_epi16(out_26_1, k__cospi_p06_p26);
-        const __m256i out_06_2 = _mm256_madd_epi16(out_26_0, k__cospi_m26_p06);
-        const __m256i out_06_3 = _mm256_madd_epi16(out_26_1, k__cospi_m26_p06);
-        const __m256i out_22_2 = _mm256_madd_epi16(out_10_0, k__cospi_m10_p22);
-        const __m256i out_22_3 = _mm256_madd_epi16(out_10_1, k__cospi_m10_p22);
-        const __m256i out_14_2 = _mm256_madd_epi16(out_18_0, k__cospi_m18_p14);
-        const __m256i out_14_3 = _mm256_madd_epi16(out_18_1, k__cospi_m18_p14);
-        const __m256i out_30_2 = _mm256_madd_epi16(out_02_0, k__cospi_m02_p30);
-        const __m256i out_30_3 = _mm256_madd_epi16(out_02_1, k__cospi_m02_p30);
-        // dct_const_round_shift
-        const __m256i out_02_4 = _mm256_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_02_5 = _mm256_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_18_4 = _mm256_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_18_5 = _mm256_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_10_4 = _mm256_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_10_5 = _mm256_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_26_4 = _mm256_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_26_5 = _mm256_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_06_4 = _mm256_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_06_5 = _mm256_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_22_4 = _mm256_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_22_5 = _mm256_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_14_4 = _mm256_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_14_5 = _mm256_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_30_4 = _mm256_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_30_5 = _mm256_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_02_6 = _mm256_srai_epi32(out_02_4, DCT_CONST_BITS);
-        const __m256i out_02_7 = _mm256_srai_epi32(out_02_5, DCT_CONST_BITS);
-        const __m256i out_18_6 = _mm256_srai_epi32(out_18_4, DCT_CONST_BITS);
-        const __m256i out_18_7 = _mm256_srai_epi32(out_18_5, DCT_CONST_BITS);
-        const __m256i out_10_6 = _mm256_srai_epi32(out_10_4, DCT_CONST_BITS);
-        const __m256i out_10_7 = _mm256_srai_epi32(out_10_5, DCT_CONST_BITS);
-        const __m256i out_26_6 = _mm256_srai_epi32(out_26_4, DCT_CONST_BITS);
-        const __m256i out_26_7 = _mm256_srai_epi32(out_26_5, DCT_CONST_BITS);
-        const __m256i out_06_6 = _mm256_srai_epi32(out_06_4, DCT_CONST_BITS);
-        const __m256i out_06_7 = _mm256_srai_epi32(out_06_5, DCT_CONST_BITS);
-        const __m256i out_22_6 = _mm256_srai_epi32(out_22_4, DCT_CONST_BITS);
-        const __m256i out_22_7 = _mm256_srai_epi32(out_22_5, DCT_CONST_BITS);
-        const __m256i out_14_6 = _mm256_srai_epi32(out_14_4, DCT_CONST_BITS);
-        const __m256i out_14_7 = _mm256_srai_epi32(out_14_5, DCT_CONST_BITS);
-        const __m256i out_30_6 = _mm256_srai_epi32(out_30_4, DCT_CONST_BITS);
-        const __m256i out_30_7 = _mm256_srai_epi32(out_30_5, DCT_CONST_BITS);
-        // Combine
-        out[ 2] = _mm256_packs_epi32(out_02_6, out_02_7);
-        out[18] = _mm256_packs_epi32(out_18_6, out_18_7);
-        out[10] = _mm256_packs_epi32(out_10_6, out_10_7);
-        out[26] = _mm256_packs_epi32(out_26_6, out_26_7);
-        out[ 6] = _mm256_packs_epi32(out_06_6, out_06_7);
-        out[22] = _mm256_packs_epi32(out_22_6, out_22_7);
-        out[14] = _mm256_packs_epi32(out_14_6, out_14_7);
-        out[30] = _mm256_packs_epi32(out_30_6, out_30_7);
-      }
-      {
-        step1[16] = _mm256_add_epi16(step3[17], step2[16]);
-        step1[17] = _mm256_sub_epi16(step2[16], step3[17]);
-        step1[18] = _mm256_sub_epi16(step2[19], step3[18]);
-        step1[19] = _mm256_add_epi16(step3[18], step2[19]);
-        step1[20] = _mm256_add_epi16(step3[21], step2[20]);
-        step1[21] = _mm256_sub_epi16(step2[20], step3[21]);
-        step1[22] = _mm256_sub_epi16(step2[23], step3[22]);
-        step1[23] = _mm256_add_epi16(step3[22], step2[23]);
-        step1[24] = _mm256_add_epi16(step3[25], step2[24]);
-        step1[25] = _mm256_sub_epi16(step2[24], step3[25]);
-        step1[26] = _mm256_sub_epi16(step2[27], step3[26]);
-        step1[27] = _mm256_add_epi16(step3[26], step2[27]);
-        step1[28] = _mm256_add_epi16(step3[29], step2[28]);
-        step1[29] = _mm256_sub_epi16(step2[28], step3[29]);
-        step1[30] = _mm256_sub_epi16(step2[31], step3[30]);
-        step1[31] = _mm256_add_epi16(step3[30], step2[31]);
-      }
-      // Final stage --- outputs indices are bit-reversed.
-      {
-        const __m256i out_01_0 = _mm256_unpacklo_epi16(step1[16], step1[31]);
-        const __m256i out_01_1 = _mm256_unpackhi_epi16(step1[16], step1[31]);
-        const __m256i out_17_0 = _mm256_unpacklo_epi16(step1[17], step1[30]);
-        const __m256i out_17_1 = _mm256_unpackhi_epi16(step1[17], step1[30]);
-        const __m256i out_09_0 = _mm256_unpacklo_epi16(step1[18], step1[29]);
-        const __m256i out_09_1 = _mm256_unpackhi_epi16(step1[18], step1[29]);
-        const __m256i out_25_0 = _mm256_unpacklo_epi16(step1[19], step1[28]);
-        const __m256i out_25_1 = _mm256_unpackhi_epi16(step1[19], step1[28]);
-        const __m256i out_01_2 = _mm256_madd_epi16(out_01_0, k__cospi_p31_p01);
-        const __m256i out_01_3 = _mm256_madd_epi16(out_01_1, k__cospi_p31_p01);
-        const __m256i out_17_2 = _mm256_madd_epi16(out_17_0, k__cospi_p15_p17);
-        const __m256i out_17_3 = _mm256_madd_epi16(out_17_1, k__cospi_p15_p17);
-        const __m256i out_09_2 = _mm256_madd_epi16(out_09_0, k__cospi_p23_p09);
-        const __m256i out_09_3 = _mm256_madd_epi16(out_09_1, k__cospi_p23_p09);
-        const __m256i out_25_2 = _mm256_madd_epi16(out_25_0, k__cospi_p07_p25);
-        const __m256i out_25_3 = _mm256_madd_epi16(out_25_1, k__cospi_p07_p25);
-        const __m256i out_07_2 = _mm256_madd_epi16(out_25_0, k__cospi_m25_p07);
-        const __m256i out_07_3 = _mm256_madd_epi16(out_25_1, k__cospi_m25_p07);
-        const __m256i out_23_2 = _mm256_madd_epi16(out_09_0, k__cospi_m09_p23);
-        const __m256i out_23_3 = _mm256_madd_epi16(out_09_1, k__cospi_m09_p23);
-        const __m256i out_15_2 = _mm256_madd_epi16(out_17_0, k__cospi_m17_p15);
-        const __m256i out_15_3 = _mm256_madd_epi16(out_17_1, k__cospi_m17_p15);
-        const __m256i out_31_2 = _mm256_madd_epi16(out_01_0, k__cospi_m01_p31);
-        const __m256i out_31_3 = _mm256_madd_epi16(out_01_1, k__cospi_m01_p31);
-        // dct_const_round_shift
-        const __m256i out_01_4 = _mm256_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_01_5 = _mm256_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_17_4 = _mm256_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_17_5 = _mm256_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_09_4 = _mm256_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_09_5 = _mm256_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_25_4 = _mm256_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_25_5 = _mm256_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_07_4 = _mm256_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_07_5 = _mm256_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_23_4 = _mm256_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_23_5 = _mm256_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_15_4 = _mm256_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_15_5 = _mm256_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_31_4 = _mm256_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_31_5 = _mm256_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_01_6 = _mm256_srai_epi32(out_01_4, DCT_CONST_BITS);
-        const __m256i out_01_7 = _mm256_srai_epi32(out_01_5, DCT_CONST_BITS);
-        const __m256i out_17_6 = _mm256_srai_epi32(out_17_4, DCT_CONST_BITS);
-        const __m256i out_17_7 = _mm256_srai_epi32(out_17_5, DCT_CONST_BITS);
-        const __m256i out_09_6 = _mm256_srai_epi32(out_09_4, DCT_CONST_BITS);
-        const __m256i out_09_7 = _mm256_srai_epi32(out_09_5, DCT_CONST_BITS);
-        const __m256i out_25_6 = _mm256_srai_epi32(out_25_4, DCT_CONST_BITS);
-        const __m256i out_25_7 = _mm256_srai_epi32(out_25_5, DCT_CONST_BITS);
-        const __m256i out_07_6 = _mm256_srai_epi32(out_07_4, DCT_CONST_BITS);
-        const __m256i out_07_7 = _mm256_srai_epi32(out_07_5, DCT_CONST_BITS);
-        const __m256i out_23_6 = _mm256_srai_epi32(out_23_4, DCT_CONST_BITS);
-        const __m256i out_23_7 = _mm256_srai_epi32(out_23_5, DCT_CONST_BITS);
-        const __m256i out_15_6 = _mm256_srai_epi32(out_15_4, DCT_CONST_BITS);
-        const __m256i out_15_7 = _mm256_srai_epi32(out_15_5, DCT_CONST_BITS);
-        const __m256i out_31_6 = _mm256_srai_epi32(out_31_4, DCT_CONST_BITS);
-        const __m256i out_31_7 = _mm256_srai_epi32(out_31_5, DCT_CONST_BITS);
-        // Combine
-        out[ 1] = _mm256_packs_epi32(out_01_6, out_01_7);
-        out[17] = _mm256_packs_epi32(out_17_6, out_17_7);
-        out[ 9] = _mm256_packs_epi32(out_09_6, out_09_7);
-        out[25] = _mm256_packs_epi32(out_25_6, out_25_7);
-        out[ 7] = _mm256_packs_epi32(out_07_6, out_07_7);
-        out[23] = _mm256_packs_epi32(out_23_6, out_23_7);
-        out[15] = _mm256_packs_epi32(out_15_6, out_15_7);
-        out[31] = _mm256_packs_epi32(out_31_6, out_31_7);
-      }
-      {
-        const __m256i out_05_0 = _mm256_unpacklo_epi16(step1[20], step1[27]);
-        const __m256i out_05_1 = _mm256_unpackhi_epi16(step1[20], step1[27]);
-        const __m256i out_21_0 = _mm256_unpacklo_epi16(step1[21], step1[26]);
-        const __m256i out_21_1 = _mm256_unpackhi_epi16(step1[21], step1[26]);
-        const __m256i out_13_0 = _mm256_unpacklo_epi16(step1[22], step1[25]);
-        const __m256i out_13_1 = _mm256_unpackhi_epi16(step1[22], step1[25]);
-        const __m256i out_29_0 = _mm256_unpacklo_epi16(step1[23], step1[24]);
-        const __m256i out_29_1 = _mm256_unpackhi_epi16(step1[23], step1[24]);
-        const __m256i out_05_2 = _mm256_madd_epi16(out_05_0, k__cospi_p27_p05);
-        const __m256i out_05_3 = _mm256_madd_epi16(out_05_1, k__cospi_p27_p05);
-        const __m256i out_21_2 = _mm256_madd_epi16(out_21_0, k__cospi_p11_p21);
-        const __m256i out_21_3 = _mm256_madd_epi16(out_21_1, k__cospi_p11_p21);
-        const __m256i out_13_2 = _mm256_madd_epi16(out_13_0, k__cospi_p19_p13);
-        const __m256i out_13_3 = _mm256_madd_epi16(out_13_1, k__cospi_p19_p13);
-        const __m256i out_29_2 = _mm256_madd_epi16(out_29_0, k__cospi_p03_p29);
-        const __m256i out_29_3 = _mm256_madd_epi16(out_29_1, k__cospi_p03_p29);
-        const __m256i out_03_2 = _mm256_madd_epi16(out_29_0, k__cospi_m29_p03);
-        const __m256i out_03_3 = _mm256_madd_epi16(out_29_1, k__cospi_m29_p03);
-        const __m256i out_19_2 = _mm256_madd_epi16(out_13_0, k__cospi_m13_p19);
-        const __m256i out_19_3 = _mm256_madd_epi16(out_13_1, k__cospi_m13_p19);
-        const __m256i out_11_2 = _mm256_madd_epi16(out_21_0, k__cospi_m21_p11);
-        const __m256i out_11_3 = _mm256_madd_epi16(out_21_1, k__cospi_m21_p11);
-        const __m256i out_27_2 = _mm256_madd_epi16(out_05_0, k__cospi_m05_p27);
-        const __m256i out_27_3 = _mm256_madd_epi16(out_05_1, k__cospi_m05_p27);
-        // dct_const_round_shift
-        const __m256i out_05_4 = _mm256_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_05_5 = _mm256_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_21_4 = _mm256_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_21_5 = _mm256_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_13_4 = _mm256_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_13_5 = _mm256_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_29_4 = _mm256_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_29_5 = _mm256_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_03_4 = _mm256_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_03_5 = _mm256_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_19_4 = _mm256_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_19_5 = _mm256_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_11_4 = _mm256_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_11_5 = _mm256_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_27_4 = _mm256_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_27_5 = _mm256_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_05_6 = _mm256_srai_epi32(out_05_4, DCT_CONST_BITS);
-        const __m256i out_05_7 = _mm256_srai_epi32(out_05_5, DCT_CONST_BITS);
-        const __m256i out_21_6 = _mm256_srai_epi32(out_21_4, DCT_CONST_BITS);
-        const __m256i out_21_7 = _mm256_srai_epi32(out_21_5, DCT_CONST_BITS);
-        const __m256i out_13_6 = _mm256_srai_epi32(out_13_4, DCT_CONST_BITS);
-        const __m256i out_13_7 = _mm256_srai_epi32(out_13_5, DCT_CONST_BITS);
-        const __m256i out_29_6 = _mm256_srai_epi32(out_29_4, DCT_CONST_BITS);
-        const __m256i out_29_7 = _mm256_srai_epi32(out_29_5, DCT_CONST_BITS);
-        const __m256i out_03_6 = _mm256_srai_epi32(out_03_4, DCT_CONST_BITS);
-        const __m256i out_03_7 = _mm256_srai_epi32(out_03_5, DCT_CONST_BITS);
-        const __m256i out_19_6 = _mm256_srai_epi32(out_19_4, DCT_CONST_BITS);
-        const __m256i out_19_7 = _mm256_srai_epi32(out_19_5, DCT_CONST_BITS);
-        const __m256i out_11_6 = _mm256_srai_epi32(out_11_4, DCT_CONST_BITS);
-        const __m256i out_11_7 = _mm256_srai_epi32(out_11_5, DCT_CONST_BITS);
-        const __m256i out_27_6 = _mm256_srai_epi32(out_27_4, DCT_CONST_BITS);
-        const __m256i out_27_7 = _mm256_srai_epi32(out_27_5, DCT_CONST_BITS);
-        // Combine
-        out[ 5] = _mm256_packs_epi32(out_05_6, out_05_7);
-        out[21] = _mm256_packs_epi32(out_21_6, out_21_7);
-        out[13] = _mm256_packs_epi32(out_13_6, out_13_7);
-        out[29] = _mm256_packs_epi32(out_29_6, out_29_7);
-        out[ 3] = _mm256_packs_epi32(out_03_6, out_03_7);
-        out[19] = _mm256_packs_epi32(out_19_6, out_19_7);
-        out[11] = _mm256_packs_epi32(out_11_6, out_11_7);
-        out[27] = _mm256_packs_epi32(out_27_6, out_27_7);
-      }
+        // Stage 4
+        {
+          step1[0] = _mm256_add_epi16(step3[3], step3[0]);
+          step1[1] = _mm256_add_epi16(step3[2], step3[1]);
+          step1[2] = _mm256_sub_epi16(step3[1], step3[2]);
+          step1[3] = _mm256_sub_epi16(step3[0], step3[3]);
+          step1[8] = _mm256_add_epi16(step3[11], step2[8]);
+          step1[9] = _mm256_add_epi16(step3[10], step2[9]);
+          step1[10] = _mm256_sub_epi16(step2[9], step3[10]);
+          step1[11] = _mm256_sub_epi16(step2[8], step3[11]);
+          step1[12] = _mm256_sub_epi16(step2[15], step3[12]);
+          step1[13] = _mm256_sub_epi16(step2[14], step3[13]);
+          step1[14] = _mm256_add_epi16(step3[13], step2[14]);
+          step1[15] = _mm256_add_epi16(step3[12], step2[15]);
+        }
+        {
+          const __m256i s1_05_0 = _mm256_unpacklo_epi16(step3[6], step3[5]);
+          const __m256i s1_05_1 = _mm256_unpackhi_epi16(step3[6], step3[5]);
+          const __m256i s1_05_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_m16);
+          const __m256i s1_05_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_m16);
+          const __m256i s1_06_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_p16);
+          const __m256i s1_06_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_p16);
+          // dct_const_round_shift
+          const __m256i s1_05_4 =
+              _mm256_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_05_5 =
+              _mm256_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_06_4 =
+              _mm256_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_06_5 =
+              _mm256_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_05_6 = _mm256_srai_epi32(s1_05_4, DCT_CONST_BITS);
+          const __m256i s1_05_7 = _mm256_srai_epi32(s1_05_5, DCT_CONST_BITS);
+          const __m256i s1_06_6 = _mm256_srai_epi32(s1_06_4, DCT_CONST_BITS);
+          const __m256i s1_06_7 = _mm256_srai_epi32(s1_06_5, DCT_CONST_BITS);
+          // Combine
+          step1[5] = _mm256_packs_epi32(s1_05_6, s1_05_7);
+          step1[6] = _mm256_packs_epi32(s1_06_6, s1_06_7);
+        }
+        {
+          const __m256i s1_18_0 = _mm256_unpacklo_epi16(step3[18], step3[29]);
+          const __m256i s1_18_1 = _mm256_unpackhi_epi16(step3[18], step3[29]);
+          const __m256i s1_19_0 = _mm256_unpacklo_epi16(step3[19], step3[28]);
+          const __m256i s1_19_1 = _mm256_unpackhi_epi16(step3[19], step3[28]);
+          const __m256i s1_20_0 = _mm256_unpacklo_epi16(step3[20], step3[27]);
+          const __m256i s1_20_1 = _mm256_unpackhi_epi16(step3[20], step3[27]);
+          const __m256i s1_21_0 = _mm256_unpacklo_epi16(step3[21], step3[26]);
+          const __m256i s1_21_1 = _mm256_unpackhi_epi16(step3[21], step3[26]);
+          const __m256i s1_18_2 = _mm256_madd_epi16(s1_18_0, k__cospi_m08_p24);
+          const __m256i s1_18_3 = _mm256_madd_epi16(s1_18_1, k__cospi_m08_p24);
+          const __m256i s1_19_2 = _mm256_madd_epi16(s1_19_0, k__cospi_m08_p24);
+          const __m256i s1_19_3 = _mm256_madd_epi16(s1_19_1, k__cospi_m08_p24);
+          const __m256i s1_20_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m24_m08);
+          const __m256i s1_20_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m24_m08);
+          const __m256i s1_21_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m24_m08);
+          const __m256i s1_21_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m24_m08);
+          const __m256i s1_26_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m08_p24);
+          const __m256i s1_26_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m08_p24);
+          const __m256i s1_27_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m08_p24);
+          const __m256i s1_27_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m08_p24);
+          const __m256i s1_28_2 = _mm256_madd_epi16(s1_19_0, k__cospi_p24_p08);
+          const __m256i s1_28_3 = _mm256_madd_epi16(s1_19_1, k__cospi_p24_p08);
+          const __m256i s1_29_2 = _mm256_madd_epi16(s1_18_0, k__cospi_p24_p08);
+          const __m256i s1_29_3 = _mm256_madd_epi16(s1_18_1, k__cospi_p24_p08);
+          // dct_const_round_shift
+          const __m256i s1_18_4 =
+              _mm256_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_18_5 =
+              _mm256_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_19_4 =
+              _mm256_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_19_5 =
+              _mm256_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_20_4 =
+              _mm256_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_20_5 =
+              _mm256_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_21_4 =
+              _mm256_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_21_5 =
+              _mm256_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_26_4 =
+              _mm256_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_26_5 =
+              _mm256_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_27_4 =
+              _mm256_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_27_5 =
+              _mm256_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_28_4 =
+              _mm256_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_28_5 =
+              _mm256_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_29_4 =
+              _mm256_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
+          const __m256i s1_29_5 =
+              _mm256_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
+          const __m256i s1_18_6 = _mm256_srai_epi32(s1_18_4, DCT_CONST_BITS);
+          const __m256i s1_18_7 = _mm256_srai_epi32(s1_18_5, DCT_CONST_BITS);
+          const __m256i s1_19_6 = _mm256_srai_epi32(s1_19_4, DCT_CONST_BITS);
+          const __m256i s1_19_7 = _mm256_srai_epi32(s1_19_5, DCT_CONST_BITS);
+          const __m256i s1_20_6 = _mm256_srai_epi32(s1_20_4, DCT_CONST_BITS);
+          const __m256i s1_20_7 = _mm256_srai_epi32(s1_20_5, DCT_CONST_BITS);
+          const __m256i s1_21_6 = _mm256_srai_epi32(s1_21_4, DCT_CONST_BITS);
+          const __m256i s1_21_7 = _mm256_srai_epi32(s1_21_5, DCT_CONST_BITS);
+          const __m256i s1_26_6 = _mm256_srai_epi32(s1_26_4, DCT_CONST_BITS);
+          const __m256i s1_26_7 = _mm256_srai_epi32(s1_26_5, DCT_CONST_BITS);
+          const __m256i s1_27_6 = _mm256_srai_epi32(s1_27_4, DCT_CONST_BITS);
+          const __m256i s1_27_7 = _mm256_srai_epi32(s1_27_5, DCT_CONST_BITS);
+          const __m256i s1_28_6 = _mm256_srai_epi32(s1_28_4, DCT_CONST_BITS);
+          const __m256i s1_28_7 = _mm256_srai_epi32(s1_28_5, DCT_CONST_BITS);
+          const __m256i s1_29_6 = _mm256_srai_epi32(s1_29_4, DCT_CONST_BITS);
+          const __m256i s1_29_7 = _mm256_srai_epi32(s1_29_5, DCT_CONST_BITS);
+          // Combine
+          step1[18] = _mm256_packs_epi32(s1_18_6, s1_18_7);
+          step1[19] = _mm256_packs_epi32(s1_19_6, s1_19_7);
+          step1[20] = _mm256_packs_epi32(s1_20_6, s1_20_7);
+          step1[21] = _mm256_packs_epi32(s1_21_6, s1_21_7);
+          step1[26] = _mm256_packs_epi32(s1_26_6, s1_26_7);
+          step1[27] = _mm256_packs_epi32(s1_27_6, s1_27_7);
+          step1[28] = _mm256_packs_epi32(s1_28_6, s1_28_7);
+          step1[29] = _mm256_packs_epi32(s1_29_6, s1_29_7);
+        }
+        // Stage 5
+        {
+          step2[4] = _mm256_add_epi16(step1[5], step3[4]);
+          step2[5] = _mm256_sub_epi16(step3[4], step1[5]);
+          step2[6] = _mm256_sub_epi16(step3[7], step1[6]);
+          step2[7] = _mm256_add_epi16(step1[6], step3[7]);
+        }
+        {
+          const __m256i out_00_0 = _mm256_unpacklo_epi16(step1[0], step1[1]);
+          const __m256i out_00_1 = _mm256_unpackhi_epi16(step1[0], step1[1]);
+          const __m256i out_08_0 = _mm256_unpacklo_epi16(step1[2], step1[3]);
+          const __m256i out_08_1 = _mm256_unpackhi_epi16(step1[2], step1[3]);
+          const __m256i out_00_2 =
+              _mm256_madd_epi16(out_00_0, k__cospi_p16_p16);
+          const __m256i out_00_3 =
+              _mm256_madd_epi16(out_00_1, k__cospi_p16_p16);
+          const __m256i out_16_2 =
+              _mm256_madd_epi16(out_00_0, k__cospi_p16_m16);
+          const __m256i out_16_3 =
+              _mm256_madd_epi16(out_00_1, k__cospi_p16_m16);
+          const __m256i out_08_2 =
+              _mm256_madd_epi16(out_08_0, k__cospi_p24_p08);
+          const __m256i out_08_3 =
+              _mm256_madd_epi16(out_08_1, k__cospi_p24_p08);
+          const __m256i out_24_2 =
+              _mm256_madd_epi16(out_08_0, k__cospi_m08_p24);
+          const __m256i out_24_3 =
+              _mm256_madd_epi16(out_08_1, k__cospi_m08_p24);
+          // dct_const_round_shift
+          const __m256i out_00_4 =
+              _mm256_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_00_5 =
+              _mm256_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_16_4 =
+              _mm256_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_16_5 =
+              _mm256_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_08_4 =
+              _mm256_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_08_5 =
+              _mm256_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_24_4 =
+              _mm256_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_24_5 =
+              _mm256_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_00_6 = _mm256_srai_epi32(out_00_4, DCT_CONST_BITS);
+          const __m256i out_00_7 = _mm256_srai_epi32(out_00_5, DCT_CONST_BITS);
+          const __m256i out_16_6 = _mm256_srai_epi32(out_16_4, DCT_CONST_BITS);
+          const __m256i out_16_7 = _mm256_srai_epi32(out_16_5, DCT_CONST_BITS);
+          const __m256i out_08_6 = _mm256_srai_epi32(out_08_4, DCT_CONST_BITS);
+          const __m256i out_08_7 = _mm256_srai_epi32(out_08_5, DCT_CONST_BITS);
+          const __m256i out_24_6 = _mm256_srai_epi32(out_24_4, DCT_CONST_BITS);
+          const __m256i out_24_7 = _mm256_srai_epi32(out_24_5, DCT_CONST_BITS);
+          // Combine
+          out[0] = _mm256_packs_epi32(out_00_6, out_00_7);
+          out[16] = _mm256_packs_epi32(out_16_6, out_16_7);
+          out[8] = _mm256_packs_epi32(out_08_6, out_08_7);
+          out[24] = _mm256_packs_epi32(out_24_6, out_24_7);
+        }
+        {
+          const __m256i s2_09_0 = _mm256_unpacklo_epi16(step1[9], step1[14]);
+          const __m256i s2_09_1 = _mm256_unpackhi_epi16(step1[9], step1[14]);
+          const __m256i s2_10_0 = _mm256_unpacklo_epi16(step1[10], step1[13]);
+          const __m256i s2_10_1 = _mm256_unpackhi_epi16(step1[10], step1[13]);
+          const __m256i s2_09_2 = _mm256_madd_epi16(s2_09_0, k__cospi_m08_p24);
+          const __m256i s2_09_3 = _mm256_madd_epi16(s2_09_1, k__cospi_m08_p24);
+          const __m256i s2_10_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m24_m08);
+          const __m256i s2_10_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m24_m08);
+          const __m256i s2_13_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m08_p24);
+          const __m256i s2_13_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m08_p24);
+          const __m256i s2_14_2 = _mm256_madd_epi16(s2_09_0, k__cospi_p24_p08);
+          const __m256i s2_14_3 = _mm256_madd_epi16(s2_09_1, k__cospi_p24_p08);
+          // dct_const_round_shift
+          const __m256i s2_09_4 =
+              _mm256_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
+          const __m256i s2_09_5 =
+              _mm256_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
+          const __m256i s2_10_4 =
+              _mm256_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
+          const __m256i s2_10_5 =
+              _mm256_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
+          const __m256i s2_13_4 =
+              _mm256_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
+          const __m256i s2_13_5 =
+              _mm256_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
+          const __m256i s2_14_4 =
+              _mm256_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
+          const __m256i s2_14_5 =
+              _mm256_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
+          const __m256i s2_09_6 = _mm256_srai_epi32(s2_09_4, DCT_CONST_BITS);
+          const __m256i s2_09_7 = _mm256_srai_epi32(s2_09_5, DCT_CONST_BITS);
+          const __m256i s2_10_6 = _mm256_srai_epi32(s2_10_4, DCT_CONST_BITS);
+          const __m256i s2_10_7 = _mm256_srai_epi32(s2_10_5, DCT_CONST_BITS);
+          const __m256i s2_13_6 = _mm256_srai_epi32(s2_13_4, DCT_CONST_BITS);
+          const __m256i s2_13_7 = _mm256_srai_epi32(s2_13_5, DCT_CONST_BITS);
+          const __m256i s2_14_6 = _mm256_srai_epi32(s2_14_4, DCT_CONST_BITS);
+          const __m256i s2_14_7 = _mm256_srai_epi32(s2_14_5, DCT_CONST_BITS);
+          // Combine
+          step2[9] = _mm256_packs_epi32(s2_09_6, s2_09_7);
+          step2[10] = _mm256_packs_epi32(s2_10_6, s2_10_7);
+          step2[13] = _mm256_packs_epi32(s2_13_6, s2_13_7);
+          step2[14] = _mm256_packs_epi32(s2_14_6, s2_14_7);
+        }
+        {
+          step2[16] = _mm256_add_epi16(step1[19], step3[16]);
+          step2[17] = _mm256_add_epi16(step1[18], step3[17]);
+          step2[18] = _mm256_sub_epi16(step3[17], step1[18]);
+          step2[19] = _mm256_sub_epi16(step3[16], step1[19]);
+          step2[20] = _mm256_sub_epi16(step3[23], step1[20]);
+          step2[21] = _mm256_sub_epi16(step3[22], step1[21]);
+          step2[22] = _mm256_add_epi16(step1[21], step3[22]);
+          step2[23] = _mm256_add_epi16(step1[20], step3[23]);
+          step2[24] = _mm256_add_epi16(step1[27], step3[24]);
+          step2[25] = _mm256_add_epi16(step1[26], step3[25]);
+          step2[26] = _mm256_sub_epi16(step3[25], step1[26]);
+          step2[27] = _mm256_sub_epi16(step3[24], step1[27]);
+          step2[28] = _mm256_sub_epi16(step3[31], step1[28]);
+          step2[29] = _mm256_sub_epi16(step3[30], step1[29]);
+          step2[30] = _mm256_add_epi16(step1[29], step3[30]);
+          step2[31] = _mm256_add_epi16(step1[28], step3[31]);
+        }
+        // Stage 6
+        {
+          const __m256i out_04_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
+          const __m256i out_04_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
+          const __m256i out_20_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
+          const __m256i out_20_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
+          const __m256i out_12_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
+          const __m256i out_12_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
+          const __m256i out_28_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
+          const __m256i out_28_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
+          const __m256i out_04_2 =
+              _mm256_madd_epi16(out_04_0, k__cospi_p28_p04);
+          const __m256i out_04_3 =
+              _mm256_madd_epi16(out_04_1, k__cospi_p28_p04);
+          const __m256i out_20_2 =
+              _mm256_madd_epi16(out_20_0, k__cospi_p12_p20);
+          const __m256i out_20_3 =
+              _mm256_madd_epi16(out_20_1, k__cospi_p12_p20);
+          const __m256i out_12_2 =
+              _mm256_madd_epi16(out_12_0, k__cospi_m20_p12);
+          const __m256i out_12_3 =
+              _mm256_madd_epi16(out_12_1, k__cospi_m20_p12);
+          const __m256i out_28_2 =
+              _mm256_madd_epi16(out_28_0, k__cospi_m04_p28);
+          const __m256i out_28_3 =
+              _mm256_madd_epi16(out_28_1, k__cospi_m04_p28);
+          // dct_const_round_shift
+          const __m256i out_04_4 =
+              _mm256_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_04_5 =
+              _mm256_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_20_4 =
+              _mm256_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_20_5 =
+              _mm256_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_12_4 =
+              _mm256_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_12_5 =
+              _mm256_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_28_4 =
+              _mm256_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_28_5 =
+              _mm256_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_04_6 = _mm256_srai_epi32(out_04_4, DCT_CONST_BITS);
+          const __m256i out_04_7 = _mm256_srai_epi32(out_04_5, DCT_CONST_BITS);
+          const __m256i out_20_6 = _mm256_srai_epi32(out_20_4, DCT_CONST_BITS);
+          const __m256i out_20_7 = _mm256_srai_epi32(out_20_5, DCT_CONST_BITS);
+          const __m256i out_12_6 = _mm256_srai_epi32(out_12_4, DCT_CONST_BITS);
+          const __m256i out_12_7 = _mm256_srai_epi32(out_12_5, DCT_CONST_BITS);
+          const __m256i out_28_6 = _mm256_srai_epi32(out_28_4, DCT_CONST_BITS);
+          const __m256i out_28_7 = _mm256_srai_epi32(out_28_5, DCT_CONST_BITS);
+          // Combine
+          out[4] = _mm256_packs_epi32(out_04_6, out_04_7);
+          out[20] = _mm256_packs_epi32(out_20_6, out_20_7);
+          out[12] = _mm256_packs_epi32(out_12_6, out_12_7);
+          out[28] = _mm256_packs_epi32(out_28_6, out_28_7);
+        }
+        {
+          step3[8] = _mm256_add_epi16(step2[9], step1[8]);
+          step3[9] = _mm256_sub_epi16(step1[8], step2[9]);
+          step3[10] = _mm256_sub_epi16(step1[11], step2[10]);
+          step3[11] = _mm256_add_epi16(step2[10], step1[11]);
+          step3[12] = _mm256_add_epi16(step2[13], step1[12]);
+          step3[13] = _mm256_sub_epi16(step1[12], step2[13]);
+          step3[14] = _mm256_sub_epi16(step1[15], step2[14]);
+          step3[15] = _mm256_add_epi16(step2[14], step1[15]);
+        }
+        {
+          const __m256i s3_17_0 = _mm256_unpacklo_epi16(step2[17], step2[30]);
+          const __m256i s3_17_1 = _mm256_unpackhi_epi16(step2[17], step2[30]);
+          const __m256i s3_18_0 = _mm256_unpacklo_epi16(step2[18], step2[29]);
+          const __m256i s3_18_1 = _mm256_unpackhi_epi16(step2[18], step2[29]);
+          const __m256i s3_21_0 = _mm256_unpacklo_epi16(step2[21], step2[26]);
+          const __m256i s3_21_1 = _mm256_unpackhi_epi16(step2[21], step2[26]);
+          const __m256i s3_22_0 = _mm256_unpacklo_epi16(step2[22], step2[25]);
+          const __m256i s3_22_1 = _mm256_unpackhi_epi16(step2[22], step2[25]);
+          const __m256i s3_17_2 = _mm256_madd_epi16(s3_17_0, k__cospi_m04_p28);
+          const __m256i s3_17_3 = _mm256_madd_epi16(s3_17_1, k__cospi_m04_p28);
+          const __m256i s3_18_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m28_m04);
+          const __m256i s3_18_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m28_m04);
+          const __m256i s3_21_2 = _mm256_madd_epi16(s3_21_0, k__cospi_m20_p12);
+          const __m256i s3_21_3 = _mm256_madd_epi16(s3_21_1, k__cospi_m20_p12);
+          const __m256i s3_22_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m12_m20);
+          const __m256i s3_22_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m12_m20);
+          const __m256i s3_25_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m20_p12);
+          const __m256i s3_25_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m20_p12);
+          const __m256i s3_26_2 = _mm256_madd_epi16(s3_21_0, k__cospi_p12_p20);
+          const __m256i s3_26_3 = _mm256_madd_epi16(s3_21_1, k__cospi_p12_p20);
+          const __m256i s3_29_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m04_p28);
+          const __m256i s3_29_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m04_p28);
+          const __m256i s3_30_2 = _mm256_madd_epi16(s3_17_0, k__cospi_p28_p04);
+          const __m256i s3_30_3 = _mm256_madd_epi16(s3_17_1, k__cospi_p28_p04);
+          // dct_const_round_shift
+          const __m256i s3_17_4 =
+              _mm256_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_17_5 =
+              _mm256_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_18_4 =
+              _mm256_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_18_5 =
+              _mm256_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_21_4 =
+              _mm256_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_21_5 =
+              _mm256_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_22_4 =
+              _mm256_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_22_5 =
+              _mm256_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_17_6 = _mm256_srai_epi32(s3_17_4, DCT_CONST_BITS);
+          const __m256i s3_17_7 = _mm256_srai_epi32(s3_17_5, DCT_CONST_BITS);
+          const __m256i s3_18_6 = _mm256_srai_epi32(s3_18_4, DCT_CONST_BITS);
+          const __m256i s3_18_7 = _mm256_srai_epi32(s3_18_5, DCT_CONST_BITS);
+          const __m256i s3_21_6 = _mm256_srai_epi32(s3_21_4, DCT_CONST_BITS);
+          const __m256i s3_21_7 = _mm256_srai_epi32(s3_21_5, DCT_CONST_BITS);
+          const __m256i s3_22_6 = _mm256_srai_epi32(s3_22_4, DCT_CONST_BITS);
+          const __m256i s3_22_7 = _mm256_srai_epi32(s3_22_5, DCT_CONST_BITS);
+          const __m256i s3_25_4 =
+              _mm256_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_25_5 =
+              _mm256_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_26_4 =
+              _mm256_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_26_5 =
+              _mm256_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_29_4 =
+              _mm256_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_29_5 =
+              _mm256_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_30_4 =
+              _mm256_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_30_5 =
+              _mm256_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_25_6 = _mm256_srai_epi32(s3_25_4, DCT_CONST_BITS);
+          const __m256i s3_25_7 = _mm256_srai_epi32(s3_25_5, DCT_CONST_BITS);
+          const __m256i s3_26_6 = _mm256_srai_epi32(s3_26_4, DCT_CONST_BITS);
+          const __m256i s3_26_7 = _mm256_srai_epi32(s3_26_5, DCT_CONST_BITS);
+          const __m256i s3_29_6 = _mm256_srai_epi32(s3_29_4, DCT_CONST_BITS);
+          const __m256i s3_29_7 = _mm256_srai_epi32(s3_29_5, DCT_CONST_BITS);
+          const __m256i s3_30_6 = _mm256_srai_epi32(s3_30_4, DCT_CONST_BITS);
+          const __m256i s3_30_7 = _mm256_srai_epi32(s3_30_5, DCT_CONST_BITS);
+          // Combine
+          step3[17] = _mm256_packs_epi32(s3_17_6, s3_17_7);
+          step3[18] = _mm256_packs_epi32(s3_18_6, s3_18_7);
+          step3[21] = _mm256_packs_epi32(s3_21_6, s3_21_7);
+          step3[22] = _mm256_packs_epi32(s3_22_6, s3_22_7);
+          // Combine
+          step3[25] = _mm256_packs_epi32(s3_25_6, s3_25_7);
+          step3[26] = _mm256_packs_epi32(s3_26_6, s3_26_7);
+          step3[29] = _mm256_packs_epi32(s3_29_6, s3_29_7);
+          step3[30] = _mm256_packs_epi32(s3_30_6, s3_30_7);
+        }
+        // Stage 7
+        {
+          const __m256i out_02_0 = _mm256_unpacklo_epi16(step3[8], step3[15]);
+          const __m256i out_02_1 = _mm256_unpackhi_epi16(step3[8], step3[15]);
+          const __m256i out_18_0 = _mm256_unpacklo_epi16(step3[9], step3[14]);
+          const __m256i out_18_1 = _mm256_unpackhi_epi16(step3[9], step3[14]);
+          const __m256i out_10_0 = _mm256_unpacklo_epi16(step3[10], step3[13]);
+          const __m256i out_10_1 = _mm256_unpackhi_epi16(step3[10], step3[13]);
+          const __m256i out_26_0 = _mm256_unpacklo_epi16(step3[11], step3[12]);
+          const __m256i out_26_1 = _mm256_unpackhi_epi16(step3[11], step3[12]);
+          const __m256i out_02_2 =
+              _mm256_madd_epi16(out_02_0, k__cospi_p30_p02);
+          const __m256i out_02_3 =
+              _mm256_madd_epi16(out_02_1, k__cospi_p30_p02);
+          const __m256i out_18_2 =
+              _mm256_madd_epi16(out_18_0, k__cospi_p14_p18);
+          const __m256i out_18_3 =
+              _mm256_madd_epi16(out_18_1, k__cospi_p14_p18);
+          const __m256i out_10_2 =
+              _mm256_madd_epi16(out_10_0, k__cospi_p22_p10);
+          const __m256i out_10_3 =
+              _mm256_madd_epi16(out_10_1, k__cospi_p22_p10);
+          const __m256i out_26_2 =
+              _mm256_madd_epi16(out_26_0, k__cospi_p06_p26);
+          const __m256i out_26_3 =
+              _mm256_madd_epi16(out_26_1, k__cospi_p06_p26);
+          const __m256i out_06_2 =
+              _mm256_madd_epi16(out_26_0, k__cospi_m26_p06);
+          const __m256i out_06_3 =
+              _mm256_madd_epi16(out_26_1, k__cospi_m26_p06);
+          const __m256i out_22_2 =
+              _mm256_madd_epi16(out_10_0, k__cospi_m10_p22);
+          const __m256i out_22_3 =
+              _mm256_madd_epi16(out_10_1, k__cospi_m10_p22);
+          const __m256i out_14_2 =
+              _mm256_madd_epi16(out_18_0, k__cospi_m18_p14);
+          const __m256i out_14_3 =
+              _mm256_madd_epi16(out_18_1, k__cospi_m18_p14);
+          const __m256i out_30_2 =
+              _mm256_madd_epi16(out_02_0, k__cospi_m02_p30);
+          const __m256i out_30_3 =
+              _mm256_madd_epi16(out_02_1, k__cospi_m02_p30);
+          // dct_const_round_shift
+          const __m256i out_02_4 =
+              _mm256_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_02_5 =
+              _mm256_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_18_4 =
+              _mm256_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_18_5 =
+              _mm256_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_10_4 =
+              _mm256_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_10_5 =
+              _mm256_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_26_4 =
+              _mm256_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_26_5 =
+              _mm256_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_06_4 =
+              _mm256_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_06_5 =
+              _mm256_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_22_4 =
+              _mm256_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_22_5 =
+              _mm256_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_14_4 =
+              _mm256_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_14_5 =
+              _mm256_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_30_4 =
+              _mm256_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_30_5 =
+              _mm256_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_02_6 = _mm256_srai_epi32(out_02_4, DCT_CONST_BITS);
+          const __m256i out_02_7 = _mm256_srai_epi32(out_02_5, DCT_CONST_BITS);
+          const __m256i out_18_6 = _mm256_srai_epi32(out_18_4, DCT_CONST_BITS);
+          const __m256i out_18_7 = _mm256_srai_epi32(out_18_5, DCT_CONST_BITS);
+          const __m256i out_10_6 = _mm256_srai_epi32(out_10_4, DCT_CONST_BITS);
+          const __m256i out_10_7 = _mm256_srai_epi32(out_10_5, DCT_CONST_BITS);
+          const __m256i out_26_6 = _mm256_srai_epi32(out_26_4, DCT_CONST_BITS);
+          const __m256i out_26_7 = _mm256_srai_epi32(out_26_5, DCT_CONST_BITS);
+          const __m256i out_06_6 = _mm256_srai_epi32(out_06_4, DCT_CONST_BITS);
+          const __m256i out_06_7 = _mm256_srai_epi32(out_06_5, DCT_CONST_BITS);
+          const __m256i out_22_6 = _mm256_srai_epi32(out_22_4, DCT_CONST_BITS);
+          const __m256i out_22_7 = _mm256_srai_epi32(out_22_5, DCT_CONST_BITS);
+          const __m256i out_14_6 = _mm256_srai_epi32(out_14_4, DCT_CONST_BITS);
+          const __m256i out_14_7 = _mm256_srai_epi32(out_14_5, DCT_CONST_BITS);
+          const __m256i out_30_6 = _mm256_srai_epi32(out_30_4, DCT_CONST_BITS);
+          const __m256i out_30_7 = _mm256_srai_epi32(out_30_5, DCT_CONST_BITS);
+          // Combine
+          out[2] = _mm256_packs_epi32(out_02_6, out_02_7);
+          out[18] = _mm256_packs_epi32(out_18_6, out_18_7);
+          out[10] = _mm256_packs_epi32(out_10_6, out_10_7);
+          out[26] = _mm256_packs_epi32(out_26_6, out_26_7);
+          out[6] = _mm256_packs_epi32(out_06_6, out_06_7);
+          out[22] = _mm256_packs_epi32(out_22_6, out_22_7);
+          out[14] = _mm256_packs_epi32(out_14_6, out_14_7);
+          out[30] = _mm256_packs_epi32(out_30_6, out_30_7);
+        }
+        {
+          step1[16] = _mm256_add_epi16(step3[17], step2[16]);
+          step1[17] = _mm256_sub_epi16(step2[16], step3[17]);
+          step1[18] = _mm256_sub_epi16(step2[19], step3[18]);
+          step1[19] = _mm256_add_epi16(step3[18], step2[19]);
+          step1[20] = _mm256_add_epi16(step3[21], step2[20]);
+          step1[21] = _mm256_sub_epi16(step2[20], step3[21]);
+          step1[22] = _mm256_sub_epi16(step2[23], step3[22]);
+          step1[23] = _mm256_add_epi16(step3[22], step2[23]);
+          step1[24] = _mm256_add_epi16(step3[25], step2[24]);
+          step1[25] = _mm256_sub_epi16(step2[24], step3[25]);
+          step1[26] = _mm256_sub_epi16(step2[27], step3[26]);
+          step1[27] = _mm256_add_epi16(step3[26], step2[27]);
+          step1[28] = _mm256_add_epi16(step3[29], step2[28]);
+          step1[29] = _mm256_sub_epi16(step2[28], step3[29]);
+          step1[30] = _mm256_sub_epi16(step2[31], step3[30]);
+          step1[31] = _mm256_add_epi16(step3[30], step2[31]);
+        }
+        // Final stage --- outputs indices are bit-reversed.
+        {
+          const __m256i out_01_0 = _mm256_unpacklo_epi16(step1[16], step1[31]);
+          const __m256i out_01_1 = _mm256_unpackhi_epi16(step1[16], step1[31]);
+          const __m256i out_17_0 = _mm256_unpacklo_epi16(step1[17], step1[30]);
+          const __m256i out_17_1 = _mm256_unpackhi_epi16(step1[17], step1[30]);
+          const __m256i out_09_0 = _mm256_unpacklo_epi16(step1[18], step1[29]);
+          const __m256i out_09_1 = _mm256_unpackhi_epi16(step1[18], step1[29]);
+          const __m256i out_25_0 = _mm256_unpacklo_epi16(step1[19], step1[28]);
+          const __m256i out_25_1 = _mm256_unpackhi_epi16(step1[19], step1[28]);
+          const __m256i out_01_2 =
+              _mm256_madd_epi16(out_01_0, k__cospi_p31_p01);
+          const __m256i out_01_3 =
+              _mm256_madd_epi16(out_01_1, k__cospi_p31_p01);
+          const __m256i out_17_2 =
+              _mm256_madd_epi16(out_17_0, k__cospi_p15_p17);
+          const __m256i out_17_3 =
+              _mm256_madd_epi16(out_17_1, k__cospi_p15_p17);
+          const __m256i out_09_2 =
+              _mm256_madd_epi16(out_09_0, k__cospi_p23_p09);
+          const __m256i out_09_3 =
+              _mm256_madd_epi16(out_09_1, k__cospi_p23_p09);
+          const __m256i out_25_2 =
+              _mm256_madd_epi16(out_25_0, k__cospi_p07_p25);
+          const __m256i out_25_3 =
+              _mm256_madd_epi16(out_25_1, k__cospi_p07_p25);
+          const __m256i out_07_2 =
+              _mm256_madd_epi16(out_25_0, k__cospi_m25_p07);
+          const __m256i out_07_3 =
+              _mm256_madd_epi16(out_25_1, k__cospi_m25_p07);
+          const __m256i out_23_2 =
+              _mm256_madd_epi16(out_09_0, k__cospi_m09_p23);
+          const __m256i out_23_3 =
+              _mm256_madd_epi16(out_09_1, k__cospi_m09_p23);
+          const __m256i out_15_2 =
+              _mm256_madd_epi16(out_17_0, k__cospi_m17_p15);
+          const __m256i out_15_3 =
+              _mm256_madd_epi16(out_17_1, k__cospi_m17_p15);
+          const __m256i out_31_2 =
+              _mm256_madd_epi16(out_01_0, k__cospi_m01_p31);
+          const __m256i out_31_3 =
+              _mm256_madd_epi16(out_01_1, k__cospi_m01_p31);
+          // dct_const_round_shift
+          const __m256i out_01_4 =
+              _mm256_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_01_5 =
+              _mm256_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_17_4 =
+              _mm256_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_17_5 =
+              _mm256_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_09_4 =
+              _mm256_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_09_5 =
+              _mm256_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_25_4 =
+              _mm256_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_25_5 =
+              _mm256_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_07_4 =
+              _mm256_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_07_5 =
+              _mm256_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_23_4 =
+              _mm256_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_23_5 =
+              _mm256_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_15_4 =
+              _mm256_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_15_5 =
+              _mm256_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_31_4 =
+              _mm256_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_31_5 =
+              _mm256_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_01_6 = _mm256_srai_epi32(out_01_4, DCT_CONST_BITS);
+          const __m256i out_01_7 = _mm256_srai_epi32(out_01_5, DCT_CONST_BITS);
+          const __m256i out_17_6 = _mm256_srai_epi32(out_17_4, DCT_CONST_BITS);
+          const __m256i out_17_7 = _mm256_srai_epi32(out_17_5, DCT_CONST_BITS);
+          const __m256i out_09_6 = _mm256_srai_epi32(out_09_4, DCT_CONST_BITS);
+          const __m256i out_09_7 = _mm256_srai_epi32(out_09_5, DCT_CONST_BITS);
+          const __m256i out_25_6 = _mm256_srai_epi32(out_25_4, DCT_CONST_BITS);
+          const __m256i out_25_7 = _mm256_srai_epi32(out_25_5, DCT_CONST_BITS);
+          const __m256i out_07_6 = _mm256_srai_epi32(out_07_4, DCT_CONST_BITS);
+          const __m256i out_07_7 = _mm256_srai_epi32(out_07_5, DCT_CONST_BITS);
+          const __m256i out_23_6 = _mm256_srai_epi32(out_23_4, DCT_CONST_BITS);
+          const __m256i out_23_7 = _mm256_srai_epi32(out_23_5, DCT_CONST_BITS);
+          const __m256i out_15_6 = _mm256_srai_epi32(out_15_4, DCT_CONST_BITS);
+          const __m256i out_15_7 = _mm256_srai_epi32(out_15_5, DCT_CONST_BITS);
+          const __m256i out_31_6 = _mm256_srai_epi32(out_31_4, DCT_CONST_BITS);
+          const __m256i out_31_7 = _mm256_srai_epi32(out_31_5, DCT_CONST_BITS);
+          // Combine
+          out[1] = _mm256_packs_epi32(out_01_6, out_01_7);
+          out[17] = _mm256_packs_epi32(out_17_6, out_17_7);
+          out[9] = _mm256_packs_epi32(out_09_6, out_09_7);
+          out[25] = _mm256_packs_epi32(out_25_6, out_25_7);
+          out[7] = _mm256_packs_epi32(out_07_6, out_07_7);
+          out[23] = _mm256_packs_epi32(out_23_6, out_23_7);
+          out[15] = _mm256_packs_epi32(out_15_6, out_15_7);
+          out[31] = _mm256_packs_epi32(out_31_6, out_31_7);
+        }
+        {
+          const __m256i out_05_0 = _mm256_unpacklo_epi16(step1[20], step1[27]);
+          const __m256i out_05_1 = _mm256_unpackhi_epi16(step1[20], step1[27]);
+          const __m256i out_21_0 = _mm256_unpacklo_epi16(step1[21], step1[26]);
+          const __m256i out_21_1 = _mm256_unpackhi_epi16(step1[21], step1[26]);
+          const __m256i out_13_0 = _mm256_unpacklo_epi16(step1[22], step1[25]);
+          const __m256i out_13_1 = _mm256_unpackhi_epi16(step1[22], step1[25]);
+          const __m256i out_29_0 = _mm256_unpacklo_epi16(step1[23], step1[24]);
+          const __m256i out_29_1 = _mm256_unpackhi_epi16(step1[23], step1[24]);
+          const __m256i out_05_2 =
+              _mm256_madd_epi16(out_05_0, k__cospi_p27_p05);
+          const __m256i out_05_3 =
+              _mm256_madd_epi16(out_05_1, k__cospi_p27_p05);
+          const __m256i out_21_2 =
+              _mm256_madd_epi16(out_21_0, k__cospi_p11_p21);
+          const __m256i out_21_3 =
+              _mm256_madd_epi16(out_21_1, k__cospi_p11_p21);
+          const __m256i out_13_2 =
+              _mm256_madd_epi16(out_13_0, k__cospi_p19_p13);
+          const __m256i out_13_3 =
+              _mm256_madd_epi16(out_13_1, k__cospi_p19_p13);
+          const __m256i out_29_2 =
+              _mm256_madd_epi16(out_29_0, k__cospi_p03_p29);
+          const __m256i out_29_3 =
+              _mm256_madd_epi16(out_29_1, k__cospi_p03_p29);
+          const __m256i out_03_2 =
+              _mm256_madd_epi16(out_29_0, k__cospi_m29_p03);
+          const __m256i out_03_3 =
+              _mm256_madd_epi16(out_29_1, k__cospi_m29_p03);
+          const __m256i out_19_2 =
+              _mm256_madd_epi16(out_13_0, k__cospi_m13_p19);
+          const __m256i out_19_3 =
+              _mm256_madd_epi16(out_13_1, k__cospi_m13_p19);
+          const __m256i out_11_2 =
+              _mm256_madd_epi16(out_21_0, k__cospi_m21_p11);
+          const __m256i out_11_3 =
+              _mm256_madd_epi16(out_21_1, k__cospi_m21_p11);
+          const __m256i out_27_2 =
+              _mm256_madd_epi16(out_05_0, k__cospi_m05_p27);
+          const __m256i out_27_3 =
+              _mm256_madd_epi16(out_05_1, k__cospi_m05_p27);
+          // dct_const_round_shift
+          const __m256i out_05_4 =
+              _mm256_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_05_5 =
+              _mm256_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_21_4 =
+              _mm256_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_21_5 =
+              _mm256_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_13_4 =
+              _mm256_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_13_5 =
+              _mm256_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_29_4 =
+              _mm256_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_29_5 =
+              _mm256_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_03_4 =
+              _mm256_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_03_5 =
+              _mm256_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_19_4 =
+              _mm256_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_19_5 =
+              _mm256_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_11_4 =
+              _mm256_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_11_5 =
+              _mm256_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_27_4 =
+              _mm256_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
+          const __m256i out_27_5 =
+              _mm256_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
+          const __m256i out_05_6 = _mm256_srai_epi32(out_05_4, DCT_CONST_BITS);
+          const __m256i out_05_7 = _mm256_srai_epi32(out_05_5, DCT_CONST_BITS);
+          const __m256i out_21_6 = _mm256_srai_epi32(out_21_4, DCT_CONST_BITS);
+          const __m256i out_21_7 = _mm256_srai_epi32(out_21_5, DCT_CONST_BITS);
+          const __m256i out_13_6 = _mm256_srai_epi32(out_13_4, DCT_CONST_BITS);
+          const __m256i out_13_7 = _mm256_srai_epi32(out_13_5, DCT_CONST_BITS);
+          const __m256i out_29_6 = _mm256_srai_epi32(out_29_4, DCT_CONST_BITS);
+          const __m256i out_29_7 = _mm256_srai_epi32(out_29_5, DCT_CONST_BITS);
+          const __m256i out_03_6 = _mm256_srai_epi32(out_03_4, DCT_CONST_BITS);
+          const __m256i out_03_7 = _mm256_srai_epi32(out_03_5, DCT_CONST_BITS);
+          const __m256i out_19_6 = _mm256_srai_epi32(out_19_4, DCT_CONST_BITS);
+          const __m256i out_19_7 = _mm256_srai_epi32(out_19_5, DCT_CONST_BITS);
+          const __m256i out_11_6 = _mm256_srai_epi32(out_11_4, DCT_CONST_BITS);
+          const __m256i out_11_7 = _mm256_srai_epi32(out_11_5, DCT_CONST_BITS);
+          const __m256i out_27_6 = _mm256_srai_epi32(out_27_4, DCT_CONST_BITS);
+          const __m256i out_27_7 = _mm256_srai_epi32(out_27_5, DCT_CONST_BITS);
+          // Combine
+          out[5] = _mm256_packs_epi32(out_05_6, out_05_7);
+          out[21] = _mm256_packs_epi32(out_21_6, out_21_7);
+          out[13] = _mm256_packs_epi32(out_13_6, out_13_7);
+          out[29] = _mm256_packs_epi32(out_29_6, out_29_7);
+          out[3] = _mm256_packs_epi32(out_03_6, out_03_7);
+          out[19] = _mm256_packs_epi32(out_19_6, out_19_7);
+          out[11] = _mm256_packs_epi32(out_11_6, out_11_7);
+          out[27] = _mm256_packs_epi32(out_27_6, out_27_7);
+        }
 #if FDCT32x32_HIGH_PRECISION
       } else {
         __m256i lstep1[64], lstep2[64], lstep3[64];
@@ -1156,32 +1377,32 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         // stage 3
         {
           // expanding to 32-bit length priori to addition operations
-          lstep2[ 0] = _mm256_unpacklo_epi16(step2[ 0], kZero);
-          lstep2[ 1] = _mm256_unpackhi_epi16(step2[ 0], kZero);
-          lstep2[ 2] = _mm256_unpacklo_epi16(step2[ 1], kZero);
-          lstep2[ 3] = _mm256_unpackhi_epi16(step2[ 1], kZero);
-          lstep2[ 4] = _mm256_unpacklo_epi16(step2[ 2], kZero);
-          lstep2[ 5] = _mm256_unpackhi_epi16(step2[ 2], kZero);
-          lstep2[ 6] = _mm256_unpacklo_epi16(step2[ 3], kZero);
-          lstep2[ 7] = _mm256_unpackhi_epi16(step2[ 3], kZero);
-          lstep2[ 8] = _mm256_unpacklo_epi16(step2[ 4], kZero);
-          lstep2[ 9] = _mm256_unpackhi_epi16(step2[ 4], kZero);
-          lstep2[10] = _mm256_unpacklo_epi16(step2[ 5], kZero);
-          lstep2[11] = _mm256_unpackhi_epi16(step2[ 5], kZero);
-          lstep2[12] = _mm256_unpacklo_epi16(step2[ 6], kZero);
-          lstep2[13] = _mm256_unpackhi_epi16(step2[ 6], kZero);
-          lstep2[14] = _mm256_unpacklo_epi16(step2[ 7], kZero);
-          lstep2[15] = _mm256_unpackhi_epi16(step2[ 7], kZero);
-          lstep2[ 0] = _mm256_madd_epi16(lstep2[ 0], kOne);
-          lstep2[ 1] = _mm256_madd_epi16(lstep2[ 1], kOne);
-          lstep2[ 2] = _mm256_madd_epi16(lstep2[ 2], kOne);
-          lstep2[ 3] = _mm256_madd_epi16(lstep2[ 3], kOne);
-          lstep2[ 4] = _mm256_madd_epi16(lstep2[ 4], kOne);
-          lstep2[ 5] = _mm256_madd_epi16(lstep2[ 5], kOne);
-          lstep2[ 6] = _mm256_madd_epi16(lstep2[ 6], kOne);
-          lstep2[ 7] = _mm256_madd_epi16(lstep2[ 7], kOne);
-          lstep2[ 8] = _mm256_madd_epi16(lstep2[ 8], kOne);
-          lstep2[ 9] = _mm256_madd_epi16(lstep2[ 9], kOne);
+          lstep2[0] = _mm256_unpacklo_epi16(step2[0], kZero);
+          lstep2[1] = _mm256_unpackhi_epi16(step2[0], kZero);
+          lstep2[2] = _mm256_unpacklo_epi16(step2[1], kZero);
+          lstep2[3] = _mm256_unpackhi_epi16(step2[1], kZero);
+          lstep2[4] = _mm256_unpacklo_epi16(step2[2], kZero);
+          lstep2[5] = _mm256_unpackhi_epi16(step2[2], kZero);
+          lstep2[6] = _mm256_unpacklo_epi16(step2[3], kZero);
+          lstep2[7] = _mm256_unpackhi_epi16(step2[3], kZero);
+          lstep2[8] = _mm256_unpacklo_epi16(step2[4], kZero);
+          lstep2[9] = _mm256_unpackhi_epi16(step2[4], kZero);
+          lstep2[10] = _mm256_unpacklo_epi16(step2[5], kZero);
+          lstep2[11] = _mm256_unpackhi_epi16(step2[5], kZero);
+          lstep2[12] = _mm256_unpacklo_epi16(step2[6], kZero);
+          lstep2[13] = _mm256_unpackhi_epi16(step2[6], kZero);
+          lstep2[14] = _mm256_unpacklo_epi16(step2[7], kZero);
+          lstep2[15] = _mm256_unpackhi_epi16(step2[7], kZero);
+          lstep2[0] = _mm256_madd_epi16(lstep2[0], kOne);
+          lstep2[1] = _mm256_madd_epi16(lstep2[1], kOne);
+          lstep2[2] = _mm256_madd_epi16(lstep2[2], kOne);
+          lstep2[3] = _mm256_madd_epi16(lstep2[3], kOne);
+          lstep2[4] = _mm256_madd_epi16(lstep2[4], kOne);
+          lstep2[5] = _mm256_madd_epi16(lstep2[5], kOne);
+          lstep2[6] = _mm256_madd_epi16(lstep2[6], kOne);
+          lstep2[7] = _mm256_madd_epi16(lstep2[7], kOne);
+          lstep2[8] = _mm256_madd_epi16(lstep2[8], kOne);
+          lstep2[9] = _mm256_madd_epi16(lstep2[9], kOne);
           lstep2[10] = _mm256_madd_epi16(lstep2[10], kOne);
           lstep2[11] = _mm256_madd_epi16(lstep2[11], kOne);
           lstep2[12] = _mm256_madd_epi16(lstep2[12], kOne);
@@ -1189,22 +1410,22 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           lstep2[14] = _mm256_madd_epi16(lstep2[14], kOne);
           lstep2[15] = _mm256_madd_epi16(lstep2[15], kOne);
 
-          lstep3[ 0] = _mm256_add_epi32(lstep2[14], lstep2[ 0]);
-          lstep3[ 1] = _mm256_add_epi32(lstep2[15], lstep2[ 1]);
-          lstep3[ 2] = _mm256_add_epi32(lstep2[12], lstep2[ 2]);
-          lstep3[ 3] = _mm256_add_epi32(lstep2[13], lstep2[ 3]);
-          lstep3[ 4] = _mm256_add_epi32(lstep2[10], lstep2[ 4]);
-          lstep3[ 5] = _mm256_add_epi32(lstep2[11], lstep2[ 5]);
-          lstep3[ 6] = _mm256_add_epi32(lstep2[ 8], lstep2[ 6]);
-          lstep3[ 7] = _mm256_add_epi32(lstep2[ 9], lstep2[ 7]);
-          lstep3[ 8] = _mm256_sub_epi32(lstep2[ 6], lstep2[ 8]);
-          lstep3[ 9] = _mm256_sub_epi32(lstep2[ 7], lstep2[ 9]);
-          lstep3[10] = _mm256_sub_epi32(lstep2[ 4], lstep2[10]);
-          lstep3[11] = _mm256_sub_epi32(lstep2[ 5], lstep2[11]);
-          lstep3[12] = _mm256_sub_epi32(lstep2[ 2], lstep2[12]);
-          lstep3[13] = _mm256_sub_epi32(lstep2[ 3], lstep2[13]);
-          lstep3[14] = _mm256_sub_epi32(lstep2[ 0], lstep2[14]);
-          lstep3[15] = _mm256_sub_epi32(lstep2[ 1], lstep2[15]);
+          lstep3[0] = _mm256_add_epi32(lstep2[14], lstep2[0]);
+          lstep3[1] = _mm256_add_epi32(lstep2[15], lstep2[1]);
+          lstep3[2] = _mm256_add_epi32(lstep2[12], lstep2[2]);
+          lstep3[3] = _mm256_add_epi32(lstep2[13], lstep2[3]);
+          lstep3[4] = _mm256_add_epi32(lstep2[10], lstep2[4]);
+          lstep3[5] = _mm256_add_epi32(lstep2[11], lstep2[5]);
+          lstep3[6] = _mm256_add_epi32(lstep2[8], lstep2[6]);
+          lstep3[7] = _mm256_add_epi32(lstep2[9], lstep2[7]);
+          lstep3[8] = _mm256_sub_epi32(lstep2[6], lstep2[8]);
+          lstep3[9] = _mm256_sub_epi32(lstep2[7], lstep2[9]);
+          lstep3[10] = _mm256_sub_epi32(lstep2[4], lstep2[10]);
+          lstep3[11] = _mm256_sub_epi32(lstep2[5], lstep2[11]);
+          lstep3[12] = _mm256_sub_epi32(lstep2[2], lstep2[12]);
+          lstep3[13] = _mm256_sub_epi32(lstep2[3], lstep2[13]);
+          lstep3[14] = _mm256_sub_epi32(lstep2[0], lstep2[14]);
+          lstep3[15] = _mm256_sub_epi32(lstep2[1], lstep2[15]);
         }
         {
           const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]);
@@ -1220,14 +1441,22 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16);
           const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16);
           // dct_const_round_shift
-          const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
-          const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
-          const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
-          const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
-          const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
-          const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
-          const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
-          const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_10_4 =
+              _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_10_5 =
+              _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_11_4 =
+              _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_11_5 =
+              _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_12_4 =
+              _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_12_5 =
+              _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_13_4 =
+              _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_13_5 =
+              _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
           lstep3[20] = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS);
           lstep3[21] = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS);
           lstep3[22] = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS);
@@ -1342,10 +1571,10 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         // stage 4
         {
           // expanding to 32-bit length priori to addition operations
-          lstep2[16] = _mm256_unpacklo_epi16(step2[ 8], kZero);
-          lstep2[17] = _mm256_unpackhi_epi16(step2[ 8], kZero);
-          lstep2[18] = _mm256_unpacklo_epi16(step2[ 9], kZero);
-          lstep2[19] = _mm256_unpackhi_epi16(step2[ 9], kZero);
+          lstep2[16] = _mm256_unpacklo_epi16(step2[8], kZero);
+          lstep2[17] = _mm256_unpackhi_epi16(step2[8], kZero);
+          lstep2[18] = _mm256_unpacklo_epi16(step2[9], kZero);
+          lstep2[19] = _mm256_unpackhi_epi16(step2[9], kZero);
           lstep2[28] = _mm256_unpacklo_epi16(step2[14], kZero);
           lstep2[29] = _mm256_unpackhi_epi16(step2[14], kZero);
           lstep2[30] = _mm256_unpacklo_epi16(step2[15], kZero);
@@ -1359,14 +1588,14 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           lstep2[30] = _mm256_madd_epi16(lstep2[30], kOne);
           lstep2[31] = _mm256_madd_epi16(lstep2[31], kOne);
 
-          lstep1[ 0] = _mm256_add_epi32(lstep3[ 6], lstep3[ 0]);
-          lstep1[ 1] = _mm256_add_epi32(lstep3[ 7], lstep3[ 1]);
-          lstep1[ 2] = _mm256_add_epi32(lstep3[ 4], lstep3[ 2]);
-          lstep1[ 3] = _mm256_add_epi32(lstep3[ 5], lstep3[ 3]);
-          lstep1[ 4] = _mm256_sub_epi32(lstep3[ 2], lstep3[ 4]);
-          lstep1[ 5] = _mm256_sub_epi32(lstep3[ 3], lstep3[ 5]);
-          lstep1[ 6] = _mm256_sub_epi32(lstep3[ 0], lstep3[ 6]);
-          lstep1[ 7] = _mm256_sub_epi32(lstep3[ 1], lstep3[ 7]);
+          lstep1[0] = _mm256_add_epi32(lstep3[6], lstep3[0]);
+          lstep1[1] = _mm256_add_epi32(lstep3[7], lstep3[1]);
+          lstep1[2] = _mm256_add_epi32(lstep3[4], lstep3[2]);
+          lstep1[3] = _mm256_add_epi32(lstep3[5], lstep3[3]);
+          lstep1[4] = _mm256_sub_epi32(lstep3[2], lstep3[4]);
+          lstep1[5] = _mm256_sub_epi32(lstep3[3], lstep3[5]);
+          lstep1[6] = _mm256_sub_epi32(lstep3[0], lstep3[6]);
+          lstep1[7] = _mm256_sub_epi32(lstep3[1], lstep3[7]);
           lstep1[16] = _mm256_add_epi32(lstep3[22], lstep2[16]);
           lstep1[17] = _mm256_add_epi32(lstep3[23], lstep2[17]);
           lstep1[18] = _mm256_add_epi32(lstep3[20], lstep2[18]);
@@ -1385,57 +1614,62 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           lstep1[31] = _mm256_add_epi32(lstep3[25], lstep2[31]);
         }
         {
-        // to be continued...
-        //
-        const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64);
-        const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64);
-
-        u[0] = _mm256_unpacklo_epi32(lstep3[12], lstep3[10]);
-        u[1] = _mm256_unpackhi_epi32(lstep3[12], lstep3[10]);
-        u[2] = _mm256_unpacklo_epi32(lstep3[13], lstep3[11]);
-        u[3] = _mm256_unpackhi_epi32(lstep3[13], lstep3[11]);
-
-        // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide
-        // instruction latency.
-        v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_m16);
-        v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_m16);
-        v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_m16);
-        v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_m16);
-        v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_p16);
-        v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_p16);
-        v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_p16);
-        v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_p16);
-
-        u[0] = k_packs_epi64_avx2(v[0], v[1]);
-        u[1] = k_packs_epi64_avx2(v[2], v[3]);
-        u[2] = k_packs_epi64_avx2(v[4], v[5]);
-        u[3] = k_packs_epi64_avx2(v[6], v[7]);
-
-        v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-        v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-        v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-        v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-
-        lstep1[10] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
-        lstep1[11] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
-        lstep1[12] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
-        lstep1[13] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
+          // to be continued...
+          //
+          const __m256i k32_p16_p16 =
+              pair256_set_epi32(cospi_16_64, cospi_16_64);
+          const __m256i k32_p16_m16 =
+              pair256_set_epi32(cospi_16_64, -cospi_16_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep3[12], lstep3[10]);
+          u[1] = _mm256_unpackhi_epi32(lstep3[12], lstep3[10]);
+          u[2] = _mm256_unpacklo_epi32(lstep3[13], lstep3[11]);
+          u[3] = _mm256_unpackhi_epi32(lstep3[13], lstep3[11]);
+
+          // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide
+          // instruction latency.
+          v[0] = k_madd_epi32_avx2(u[0], k32_p16_m16);
+          v[1] = k_madd_epi32_avx2(u[1], k32_p16_m16);
+          v[2] = k_madd_epi32_avx2(u[2], k32_p16_m16);
+          v[3] = k_madd_epi32_avx2(u[3], k32_p16_m16);
+          v[4] = k_madd_epi32_avx2(u[0], k32_p16_p16);
+          v[5] = k_madd_epi32_avx2(u[1], k32_p16_p16);
+          v[6] = k_madd_epi32_avx2(u[2], k32_p16_p16);
+          v[7] = k_madd_epi32_avx2(u[3], k32_p16_p16);
+
+          u[0] = k_packs_epi64_avx2(v[0], v[1]);
+          u[1] = k_packs_epi64_avx2(v[2], v[3]);
+          u[2] = k_packs_epi64_avx2(v[4], v[5]);
+          u[3] = k_packs_epi64_avx2(v[6], v[7]);
+
+          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+
+          lstep1[10] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
+          lstep1[11] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
+          lstep1[12] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
+          lstep1[13] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
         }
         {
-          const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64);
-          const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64);
-          const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep3[36], lstep3[58]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep3[36], lstep3[58]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep3[37], lstep3[59]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep3[37], lstep3[59]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep3[38], lstep3[56]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep3[38], lstep3[56]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep3[39], lstep3[57]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep3[39], lstep3[57]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep3[40], lstep3[54]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep3[40], lstep3[54]);
+          const __m256i k32_m08_p24 =
+              pair256_set_epi32(-cospi_8_64, cospi_24_64);
+          const __m256i k32_m24_m08 =
+              pair256_set_epi32(-cospi_24_64, -cospi_8_64);
+          const __m256i k32_p24_p08 =
+              pair256_set_epi32(cospi_24_64, cospi_8_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep3[36], lstep3[58]);
+          u[1] = _mm256_unpackhi_epi32(lstep3[36], lstep3[58]);
+          u[2] = _mm256_unpacklo_epi32(lstep3[37], lstep3[59]);
+          u[3] = _mm256_unpackhi_epi32(lstep3[37], lstep3[59]);
+          u[4] = _mm256_unpacklo_epi32(lstep3[38], lstep3[56]);
+          u[5] = _mm256_unpackhi_epi32(lstep3[38], lstep3[56]);
+          u[6] = _mm256_unpacklo_epi32(lstep3[39], lstep3[57]);
+          u[7] = _mm256_unpackhi_epi32(lstep3[39], lstep3[57]);
+          u[8] = _mm256_unpacklo_epi32(lstep3[40], lstep3[54]);
+          u[9] = _mm256_unpackhi_epi32(lstep3[40], lstep3[54]);
           u[10] = _mm256_unpacklo_epi32(lstep3[41], lstep3[55]);
           u[11] = _mm256_unpackhi_epi32(lstep3[41], lstep3[55]);
           u[12] = _mm256_unpacklo_epi32(lstep3[42], lstep3[52]);
@@ -1443,16 +1677,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_unpacklo_epi32(lstep3[43], lstep3[53]);
           u[15] = _mm256_unpackhi_epi32(lstep3[43], lstep3[53]);
 
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m08_p24);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m08_p24);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m08_p24);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m08_p24);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m08_p24);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m08_p24);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m08_p24);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m08_p24);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m24_m08);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m24_m08);
+          v[0] = k_madd_epi32_avx2(u[0], k32_m08_p24);
+          v[1] = k_madd_epi32_avx2(u[1], k32_m08_p24);
+          v[2] = k_madd_epi32_avx2(u[2], k32_m08_p24);
+          v[3] = k_madd_epi32_avx2(u[3], k32_m08_p24);
+          v[4] = k_madd_epi32_avx2(u[4], k32_m08_p24);
+          v[5] = k_madd_epi32_avx2(u[5], k32_m08_p24);
+          v[6] = k_madd_epi32_avx2(u[6], k32_m08_p24);
+          v[7] = k_madd_epi32_avx2(u[7], k32_m08_p24);
+          v[8] = k_madd_epi32_avx2(u[8], k32_m24_m08);
+          v[9] = k_madd_epi32_avx2(u[9], k32_m24_m08);
           v[10] = k_madd_epi32_avx2(u[10], k32_m24_m08);
           v[11] = k_madd_epi32_avx2(u[11], k32_m24_m08);
           v[12] = k_madd_epi32_avx2(u[12], k32_m24_m08);
@@ -1463,29 +1697,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[17] = k_madd_epi32_avx2(u[13], k32_m08_p24);
           v[18] = k_madd_epi32_avx2(u[14], k32_m08_p24);
           v[19] = k_madd_epi32_avx2(u[15], k32_m08_p24);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_m08_p24);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_m08_p24);
+          v[20] = k_madd_epi32_avx2(u[8], k32_m08_p24);
+          v[21] = k_madd_epi32_avx2(u[9], k32_m08_p24);
           v[22] = k_madd_epi32_avx2(u[10], k32_m08_p24);
           v[23] = k_madd_epi32_avx2(u[11], k32_m08_p24);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_p24_p08);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_p24_p08);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_p24_p08);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_p24_p08);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_p24_p08);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_p24_p08);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_p24_p08);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_p24_p08);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          v[24] = k_madd_epi32_avx2(u[4], k32_p24_p08);
+          v[25] = k_madd_epi32_avx2(u[5], k32_p24_p08);
+          v[26] = k_madd_epi32_avx2(u[6], k32_p24_p08);
+          v[27] = k_madd_epi32_avx2(u[7], k32_p24_p08);
+          v[28] = k_madd_epi32_avx2(u[0], k32_p24_p08);
+          v[29] = k_madd_epi32_avx2(u[1], k32_p24_p08);
+          v[30] = k_madd_epi32_avx2(u[2], k32_p24_p08);
+          v[31] = k_madd_epi32_avx2(u[3], k32_p24_p08);
+
+          u[0] = k_packs_epi64_avx2(v[0], v[1]);
+          u[1] = k_packs_epi64_avx2(v[2], v[3]);
+          u[2] = k_packs_epi64_avx2(v[4], v[5]);
+          u[3] = k_packs_epi64_avx2(v[6], v[7]);
+          u[4] = k_packs_epi64_avx2(v[8], v[9]);
+          u[5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[9] = k_packs_epi64_avx2(v[18], v[19]);
           u[10] = k_packs_epi64_avx2(v[20], v[21]);
           u[11] = k_packs_epi64_avx2(v[22], v[23]);
           u[12] = k_packs_epi64_avx2(v[24], v[25]);
@@ -1493,16 +1727,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = k_packs_epi64_avx2(v[28], v[29]);
           u[15] = k_packs_epi64_avx2(v[30], v[31]);
 
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -1510,16 +1744,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          lstep1[36] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep1[37] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep1[38] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep1[39] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep1[40] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep1[41] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep1[42] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep1[43] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep1[52] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep1[53] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep1[36] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
+          lstep1[37] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
+          lstep1[38] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
+          lstep1[39] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
+          lstep1[40] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
+          lstep1[41] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
+          lstep1[42] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
+          lstep1[43] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
+          lstep1[52] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
+          lstep1[53] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
           lstep1[54] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
           lstep1[55] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
           lstep1[56] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
@@ -1529,20 +1763,24 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         }
         // stage 5
         {
-          lstep2[ 8] = _mm256_add_epi32(lstep1[10], lstep3[ 8]);
-          lstep2[ 9] = _mm256_add_epi32(lstep1[11], lstep3[ 9]);
-          lstep2[10] = _mm256_sub_epi32(lstep3[ 8], lstep1[10]);
-          lstep2[11] = _mm256_sub_epi32(lstep3[ 9], lstep1[11]);
+          lstep2[8] = _mm256_add_epi32(lstep1[10], lstep3[8]);
+          lstep2[9] = _mm256_add_epi32(lstep1[11], lstep3[9]);
+          lstep2[10] = _mm256_sub_epi32(lstep3[8], lstep1[10]);
+          lstep2[11] = _mm256_sub_epi32(lstep3[9], lstep1[11]);
           lstep2[12] = _mm256_sub_epi32(lstep3[14], lstep1[12]);
           lstep2[13] = _mm256_sub_epi32(lstep3[15], lstep1[13]);
           lstep2[14] = _mm256_add_epi32(lstep1[12], lstep3[14]);
           lstep2[15] = _mm256_add_epi32(lstep1[13], lstep3[15]);
         }
         {
-          const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64);
-          const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64);
-          const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64);
-          const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64);
+          const __m256i k32_p16_p16 =
+              pair256_set_epi32(cospi_16_64, cospi_16_64);
+          const __m256i k32_p16_m16 =
+              pair256_set_epi32(cospi_16_64, -cospi_16_64);
+          const __m256i k32_p24_p08 =
+              pair256_set_epi32(cospi_24_64, cospi_8_64);
+          const __m256i k32_m08_p24 =
+              pair256_set_epi32(-cospi_8_64, cospi_24_64);
 
           u[0] = _mm256_unpacklo_epi32(lstep1[0], lstep1[2]);
           u[1] = _mm256_unpackhi_epi32(lstep1[0], lstep1[2]);
@@ -1555,16 +1793,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
 
           // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide
           // instruction latency.
-          v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_p16);
-          v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_p16);
-          v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_p16);
-          v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_p16);
-          v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_m16);
-          v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_m16);
-          v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_m16);
-          v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_m16);
-          v[ 8] = k_madd_epi32_avx2(u[4], k32_p24_p08);
-          v[ 9] = k_madd_epi32_avx2(u[5], k32_p24_p08);
+          v[0] = k_madd_epi32_avx2(u[0], k32_p16_p16);
+          v[1] = k_madd_epi32_avx2(u[1], k32_p16_p16);
+          v[2] = k_madd_epi32_avx2(u[2], k32_p16_p16);
+          v[3] = k_madd_epi32_avx2(u[3], k32_p16_p16);
+          v[4] = k_madd_epi32_avx2(u[0], k32_p16_m16);
+          v[5] = k_madd_epi32_avx2(u[1], k32_p16_m16);
+          v[6] = k_madd_epi32_avx2(u[2], k32_p16_m16);
+          v[7] = k_madd_epi32_avx2(u[3], k32_p16_m16);
+          v[8] = k_madd_epi32_avx2(u[4], k32_p24_p08);
+          v[9] = k_madd_epi32_avx2(u[5], k32_p24_p08);
           v[10] = k_madd_epi32_avx2(u[6], k32_p24_p08);
           v[11] = k_madd_epi32_avx2(u[7], k32_p24_p08);
           v[12] = k_madd_epi32_avx2(u[4], k32_m08_p24);
@@ -1599,14 +1837,14 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
           u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
 
-          sign[0] = _mm256_cmpgt_epi32(kZero,u[0]);
-          sign[1] = _mm256_cmpgt_epi32(kZero,u[1]);
-          sign[2] = _mm256_cmpgt_epi32(kZero,u[2]);
-          sign[3] = _mm256_cmpgt_epi32(kZero,u[3]);
-          sign[4] = _mm256_cmpgt_epi32(kZero,u[4]);
-          sign[5] = _mm256_cmpgt_epi32(kZero,u[5]);
-          sign[6] = _mm256_cmpgt_epi32(kZero,u[6]);
-          sign[7] = _mm256_cmpgt_epi32(kZero,u[7]);
+          sign[0] = _mm256_cmpgt_epi32(kZero, u[0]);
+          sign[1] = _mm256_cmpgt_epi32(kZero, u[1]);
+          sign[2] = _mm256_cmpgt_epi32(kZero, u[2]);
+          sign[3] = _mm256_cmpgt_epi32(kZero, u[3]);
+          sign[4] = _mm256_cmpgt_epi32(kZero, u[4]);
+          sign[5] = _mm256_cmpgt_epi32(kZero, u[5]);
+          sign[6] = _mm256_cmpgt_epi32(kZero, u[6]);
+          sign[7] = _mm256_cmpgt_epi32(kZero, u[7]);
 
           u[0] = _mm256_sub_epi32(u[0], sign[0]);
           u[1] = _mm256_sub_epi32(u[1], sign[1]);
@@ -1636,15 +1874,18 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[7] = _mm256_srai_epi32(u[7], 2);
 
           // Combine
-          out[ 0] = _mm256_packs_epi32(u[0], u[1]);
+          out[0] = _mm256_packs_epi32(u[0], u[1]);
           out[16] = _mm256_packs_epi32(u[2], u[3]);
-          out[ 8] = _mm256_packs_epi32(u[4], u[5]);
+          out[8] = _mm256_packs_epi32(u[4], u[5]);
           out[24] = _mm256_packs_epi32(u[6], u[7]);
         }
         {
-          const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64);
-          const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64);
-          const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64);
+          const __m256i k32_m08_p24 =
+              pair256_set_epi32(-cospi_8_64, cospi_24_64);
+          const __m256i k32_m24_m08 =
+              pair256_set_epi32(-cospi_24_64, -cospi_8_64);
+          const __m256i k32_p24_p08 =
+              pair256_set_epi32(cospi_24_64, cospi_8_64);
 
           u[0] = _mm256_unpacklo_epi32(lstep1[18], lstep1[28]);
           u[1] = _mm256_unpackhi_epi32(lstep1[18], lstep1[28]);
@@ -1663,8 +1904,8 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[5] = k_madd_epi32_avx2(u[5], k32_m24_m08);
           v[6] = k_madd_epi32_avx2(u[6], k32_m24_m08);
           v[7] = k_madd_epi32_avx2(u[7], k32_m24_m08);
-          v[ 8] = k_madd_epi32_avx2(u[4], k32_m08_p24);
-          v[ 9] = k_madd_epi32_avx2(u[5], k32_m08_p24);
+          v[8] = k_madd_epi32_avx2(u[4], k32_m08_p24);
+          v[9] = k_madd_epi32_avx2(u[5], k32_m08_p24);
           v[10] = k_madd_epi32_avx2(u[6], k32_m08_p24);
           v[11] = k_madd_epi32_avx2(u[7], k32_m08_p24);
           v[12] = k_madd_epi32_avx2(u[0], k32_p24_p08);
@@ -1735,15 +1976,19 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         }
         // stage 6
         {
-          const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64);
-          const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64);
-          const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64);
-          const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64);
-
-          u[0] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[1] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[2] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[3] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+          const __m256i k32_p28_p04 =
+              pair256_set_epi32(cospi_28_64, cospi_4_64);
+          const __m256i k32_p12_p20 =
+              pair256_set_epi32(cospi_12_64, cospi_20_64);
+          const __m256i k32_m20_p12 =
+              pair256_set_epi32(-cospi_20_64, cospi_12_64);
+          const __m256i k32_m04_p28 =
+              pair256_set_epi32(-cospi_4_64, cospi_28_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep2[8], lstep2[14]);
+          u[1] = _mm256_unpackhi_epi32(lstep2[8], lstep2[14]);
+          u[2] = _mm256_unpacklo_epi32(lstep2[9], lstep2[15]);
+          u[3] = _mm256_unpackhi_epi32(lstep2[9], lstep2[15]);
           u[4] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]);
           u[5] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]);
           u[6] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]);
@@ -1752,10 +1997,10 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[9] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]);
           u[10] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]);
           u[11] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]);
-          u[12] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[13] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[14] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[15] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+          u[12] = _mm256_unpacklo_epi32(lstep2[8], lstep2[14]);
+          u[13] = _mm256_unpackhi_epi32(lstep2[8], lstep2[14]);
+          u[14] = _mm256_unpacklo_epi32(lstep2[9], lstep2[15]);
+          u[15] = _mm256_unpackhi_epi32(lstep2[9], lstep2[15]);
 
           v[0] = k_madd_epi32_avx2(u[0], k32_p28_p04);
           v[1] = k_madd_epi32_avx2(u[1], k32_p28_p04);
@@ -1765,8 +2010,8 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[5] = k_madd_epi32_avx2(u[5], k32_p12_p20);
           v[6] = k_madd_epi32_avx2(u[6], k32_p12_p20);
           v[7] = k_madd_epi32_avx2(u[7], k32_p12_p20);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12);
+          v[8] = k_madd_epi32_avx2(u[8], k32_m20_p12);
+          v[9] = k_madd_epi32_avx2(u[9], k32_m20_p12);
           v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12);
           v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12);
           v[12] = k_madd_epi32_avx2(u[12], k32_m04_p28);
@@ -1801,14 +2046,14 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
           u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
 
-          sign[0] = _mm256_cmpgt_epi32(kZero,u[0]);
-          sign[1] = _mm256_cmpgt_epi32(kZero,u[1]);
-          sign[2] = _mm256_cmpgt_epi32(kZero,u[2]);
-          sign[3] = _mm256_cmpgt_epi32(kZero,u[3]);
-          sign[4] = _mm256_cmpgt_epi32(kZero,u[4]);
-          sign[5] = _mm256_cmpgt_epi32(kZero,u[5]);
-          sign[6] = _mm256_cmpgt_epi32(kZero,u[6]);
-          sign[7] = _mm256_cmpgt_epi32(kZero,u[7]);
+          sign[0] = _mm256_cmpgt_epi32(kZero, u[0]);
+          sign[1] = _mm256_cmpgt_epi32(kZero, u[1]);
+          sign[2] = _mm256_cmpgt_epi32(kZero, u[2]);
+          sign[3] = _mm256_cmpgt_epi32(kZero, u[3]);
+          sign[4] = _mm256_cmpgt_epi32(kZero, u[4]);
+          sign[5] = _mm256_cmpgt_epi32(kZero, u[5]);
+          sign[6] = _mm256_cmpgt_epi32(kZero, u[6]);
+          sign[7] = _mm256_cmpgt_epi32(kZero, u[7]);
 
           u[0] = _mm256_sub_epi32(u[0], sign[0]);
           u[1] = _mm256_sub_epi32(u[1], sign[1]);
@@ -1837,7 +2082,7 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[6] = _mm256_srai_epi32(u[6], 2);
           u[7] = _mm256_srai_epi32(u[7], 2);
 
-          out[ 4] = _mm256_packs_epi32(u[0], u[1]);
+          out[4] = _mm256_packs_epi32(u[0], u[1]);
           out[20] = _mm256_packs_epi32(u[2], u[3]);
           out[12] = _mm256_packs_epi32(u[4], u[5]);
           out[28] = _mm256_packs_epi32(u[6], u[7]);
@@ -1861,24 +2106,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           lstep3[31] = _mm256_add_epi32(lstep2[29], lstep1[31]);
         }
         {
-          const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64);
-          const __m256i k32_m28_m04 = pair256_set_epi32(-cospi_28_64, -cospi_4_64);
-          const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64);
-          const __m256i k32_m12_m20 = pair256_set_epi32(-cospi_12_64,
-                                                     -cospi_20_64);
-          const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64);
-          const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep2[34], lstep2[60]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep2[34], lstep2[60]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep2[35], lstep2[61]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep2[35], lstep2[61]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep2[36], lstep2[58]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep2[36], lstep2[58]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep2[37], lstep2[59]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep2[37], lstep2[59]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep2[42], lstep2[52]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep2[42], lstep2[52]);
+          const __m256i k32_m04_p28 =
+              pair256_set_epi32(-cospi_4_64, cospi_28_64);
+          const __m256i k32_m28_m04 =
+              pair256_set_epi32(-cospi_28_64, -cospi_4_64);
+          const __m256i k32_m20_p12 =
+              pair256_set_epi32(-cospi_20_64, cospi_12_64);
+          const __m256i k32_m12_m20 =
+              pair256_set_epi32(-cospi_12_64, -cospi_20_64);
+          const __m256i k32_p12_p20 =
+              pair256_set_epi32(cospi_12_64, cospi_20_64);
+          const __m256i k32_p28_p04 =
+              pair256_set_epi32(cospi_28_64, cospi_4_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep2[34], lstep2[60]);
+          u[1] = _mm256_unpackhi_epi32(lstep2[34], lstep2[60]);
+          u[2] = _mm256_unpacklo_epi32(lstep2[35], lstep2[61]);
+          u[3] = _mm256_unpackhi_epi32(lstep2[35], lstep2[61]);
+          u[4] = _mm256_unpacklo_epi32(lstep2[36], lstep2[58]);
+          u[5] = _mm256_unpackhi_epi32(lstep2[36], lstep2[58]);
+          u[6] = _mm256_unpacklo_epi32(lstep2[37], lstep2[59]);
+          u[7] = _mm256_unpackhi_epi32(lstep2[37], lstep2[59]);
+          u[8] = _mm256_unpacklo_epi32(lstep2[42], lstep2[52]);
+          u[9] = _mm256_unpackhi_epi32(lstep2[42], lstep2[52]);
           u[10] = _mm256_unpacklo_epi32(lstep2[43], lstep2[53]);
           u[11] = _mm256_unpackhi_epi32(lstep2[43], lstep2[53]);
           u[12] = _mm256_unpacklo_epi32(lstep2[44], lstep2[50]);
@@ -1886,16 +2136,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_unpacklo_epi32(lstep2[45], lstep2[51]);
           u[15] = _mm256_unpackhi_epi32(lstep2[45], lstep2[51]);
 
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m04_p28);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m04_p28);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m04_p28);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m04_p28);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m28_m04);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m28_m04);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m28_m04);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m28_m04);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12);
+          v[0] = k_madd_epi32_avx2(u[0], k32_m04_p28);
+          v[1] = k_madd_epi32_avx2(u[1], k32_m04_p28);
+          v[2] = k_madd_epi32_avx2(u[2], k32_m04_p28);
+          v[3] = k_madd_epi32_avx2(u[3], k32_m04_p28);
+          v[4] = k_madd_epi32_avx2(u[4], k32_m28_m04);
+          v[5] = k_madd_epi32_avx2(u[5], k32_m28_m04);
+          v[6] = k_madd_epi32_avx2(u[6], k32_m28_m04);
+          v[7] = k_madd_epi32_avx2(u[7], k32_m28_m04);
+          v[8] = k_madd_epi32_avx2(u[8], k32_m20_p12);
+          v[9] = k_madd_epi32_avx2(u[9], k32_m20_p12);
           v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12);
           v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12);
           v[12] = k_madd_epi32_avx2(u[12], k32_m12_m20);
@@ -1906,29 +2156,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[17] = k_madd_epi32_avx2(u[13], k32_m20_p12);
           v[18] = k_madd_epi32_avx2(u[14], k32_m20_p12);
           v[19] = k_madd_epi32_avx2(u[15], k32_m20_p12);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_p12_p20);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_p12_p20);
+          v[20] = k_madd_epi32_avx2(u[8], k32_p12_p20);
+          v[21] = k_madd_epi32_avx2(u[9], k32_p12_p20);
           v[22] = k_madd_epi32_avx2(u[10], k32_p12_p20);
           v[23] = k_madd_epi32_avx2(u[11], k32_p12_p20);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_m04_p28);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_m04_p28);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_m04_p28);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_m04_p28);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_p28_p04);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_p28_p04);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_p28_p04);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_p28_p04);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          v[24] = k_madd_epi32_avx2(u[4], k32_m04_p28);
+          v[25] = k_madd_epi32_avx2(u[5], k32_m04_p28);
+          v[26] = k_madd_epi32_avx2(u[6], k32_m04_p28);
+          v[27] = k_madd_epi32_avx2(u[7], k32_m04_p28);
+          v[28] = k_madd_epi32_avx2(u[0], k32_p28_p04);
+          v[29] = k_madd_epi32_avx2(u[1], k32_p28_p04);
+          v[30] = k_madd_epi32_avx2(u[2], k32_p28_p04);
+          v[31] = k_madd_epi32_avx2(u[3], k32_p28_p04);
+
+          u[0] = k_packs_epi64_avx2(v[0], v[1]);
+          u[1] = k_packs_epi64_avx2(v[2], v[3]);
+          u[2] = k_packs_epi64_avx2(v[4], v[5]);
+          u[3] = k_packs_epi64_avx2(v[6], v[7]);
+          u[4] = k_packs_epi64_avx2(v[8], v[9]);
+          u[5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[9] = k_packs_epi64_avx2(v[18], v[19]);
           u[10] = k_packs_epi64_avx2(v[20], v[21]);
           u[11] = k_packs_epi64_avx2(v[22], v[23]);
           u[12] = k_packs_epi64_avx2(v[24], v[25]);
@@ -1936,16 +2186,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = k_packs_epi64_avx2(v[28], v[29]);
           u[15] = k_packs_epi64_avx2(v[30], v[31]);
 
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -1953,16 +2203,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          lstep3[34] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep3[35] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep3[36] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep3[37] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep3[42] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep3[43] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep3[44] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep3[45] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep3[50] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep3[51] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep3[34] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
+          lstep3[35] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
+          lstep3[36] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
+          lstep3[37] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
+          lstep3[42] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
+          lstep3[43] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
+          lstep3[44] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
+          lstep3[45] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
+          lstep3[50] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
+          lstep3[51] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
           lstep3[52] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
           lstep3[53] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
           lstep3[58] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
@@ -1972,25 +2222,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         }
         // stage 7
         {
-          const __m256i k32_p30_p02 = pair256_set_epi32(cospi_30_64, cospi_2_64);
-          const __m256i k32_p14_p18 = pair256_set_epi32(cospi_14_64, cospi_18_64);
-          const __m256i k32_p22_p10 = pair256_set_epi32(cospi_22_64, cospi_10_64);
-          const __m256i k32_p06_p26 = pair256_set_epi32(cospi_6_64,  cospi_26_64);
-          const __m256i k32_m26_p06 = pair256_set_epi32(-cospi_26_64, cospi_6_64);
-          const __m256i k32_m10_p22 = pair256_set_epi32(-cospi_10_64, cospi_22_64);
-          const __m256i k32_m18_p14 = pair256_set_epi32(-cospi_18_64, cospi_14_64);
-          const __m256i k32_m02_p30 = pair256_set_epi32(-cospi_2_64, cospi_30_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep3[16], lstep3[30]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep3[16], lstep3[30]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep3[17], lstep3[31]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep3[17], lstep3[31]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep3[18], lstep3[28]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep3[18], lstep3[28]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep3[19], lstep3[29]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep3[19], lstep3[29]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep3[20], lstep3[26]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep3[20], lstep3[26]);
+          const __m256i k32_p30_p02 =
+              pair256_set_epi32(cospi_30_64, cospi_2_64);
+          const __m256i k32_p14_p18 =
+              pair256_set_epi32(cospi_14_64, cospi_18_64);
+          const __m256i k32_p22_p10 =
+              pair256_set_epi32(cospi_22_64, cospi_10_64);
+          const __m256i k32_p06_p26 =
+              pair256_set_epi32(cospi_6_64, cospi_26_64);
+          const __m256i k32_m26_p06 =
+              pair256_set_epi32(-cospi_26_64, cospi_6_64);
+          const __m256i k32_m10_p22 =
+              pair256_set_epi32(-cospi_10_64, cospi_22_64);
+          const __m256i k32_m18_p14 =
+              pair256_set_epi32(-cospi_18_64, cospi_14_64);
+          const __m256i k32_m02_p30 =
+              pair256_set_epi32(-cospi_2_64, cospi_30_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep3[16], lstep3[30]);
+          u[1] = _mm256_unpackhi_epi32(lstep3[16], lstep3[30]);
+          u[2] = _mm256_unpacklo_epi32(lstep3[17], lstep3[31]);
+          u[3] = _mm256_unpackhi_epi32(lstep3[17], lstep3[31]);
+          u[4] = _mm256_unpacklo_epi32(lstep3[18], lstep3[28]);
+          u[5] = _mm256_unpackhi_epi32(lstep3[18], lstep3[28]);
+          u[6] = _mm256_unpacklo_epi32(lstep3[19], lstep3[29]);
+          u[7] = _mm256_unpackhi_epi32(lstep3[19], lstep3[29]);
+          u[8] = _mm256_unpacklo_epi32(lstep3[20], lstep3[26]);
+          u[9] = _mm256_unpackhi_epi32(lstep3[20], lstep3[26]);
           u[10] = _mm256_unpacklo_epi32(lstep3[21], lstep3[27]);
           u[11] = _mm256_unpackhi_epi32(lstep3[21], lstep3[27]);
           u[12] = _mm256_unpacklo_epi32(lstep3[22], lstep3[24]);
@@ -1998,16 +2256,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_unpacklo_epi32(lstep3[23], lstep3[25]);
           u[15] = _mm256_unpackhi_epi32(lstep3[23], lstep3[25]);
 
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p30_p02);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p30_p02);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p30_p02);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p30_p02);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p14_p18);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p14_p18);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p14_p18);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p14_p18);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p22_p10);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p22_p10);
+          v[0] = k_madd_epi32_avx2(u[0], k32_p30_p02);
+          v[1] = k_madd_epi32_avx2(u[1], k32_p30_p02);
+          v[2] = k_madd_epi32_avx2(u[2], k32_p30_p02);
+          v[3] = k_madd_epi32_avx2(u[3], k32_p30_p02);
+          v[4] = k_madd_epi32_avx2(u[4], k32_p14_p18);
+          v[5] = k_madd_epi32_avx2(u[5], k32_p14_p18);
+          v[6] = k_madd_epi32_avx2(u[6], k32_p14_p18);
+          v[7] = k_madd_epi32_avx2(u[7], k32_p14_p18);
+          v[8] = k_madd_epi32_avx2(u[8], k32_p22_p10);
+          v[9] = k_madd_epi32_avx2(u[9], k32_p22_p10);
           v[10] = k_madd_epi32_avx2(u[10], k32_p22_p10);
           v[11] = k_madd_epi32_avx2(u[11], k32_p22_p10);
           v[12] = k_madd_epi32_avx2(u[12], k32_p06_p26);
@@ -2018,29 +2276,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[17] = k_madd_epi32_avx2(u[13], k32_m26_p06);
           v[18] = k_madd_epi32_avx2(u[14], k32_m26_p06);
           v[19] = k_madd_epi32_avx2(u[15], k32_m26_p06);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_m10_p22);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_m10_p22);
+          v[20] = k_madd_epi32_avx2(u[8], k32_m10_p22);
+          v[21] = k_madd_epi32_avx2(u[9], k32_m10_p22);
           v[22] = k_madd_epi32_avx2(u[10], k32_m10_p22);
           v[23] = k_madd_epi32_avx2(u[11], k32_m10_p22);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_m18_p14);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_m18_p14);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_m18_p14);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_m18_p14);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_m02_p30);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_m02_p30);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_m02_p30);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_m02_p30);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          v[24] = k_madd_epi32_avx2(u[4], k32_m18_p14);
+          v[25] = k_madd_epi32_avx2(u[5], k32_m18_p14);
+          v[26] = k_madd_epi32_avx2(u[6], k32_m18_p14);
+          v[27] = k_madd_epi32_avx2(u[7], k32_m18_p14);
+          v[28] = k_madd_epi32_avx2(u[0], k32_m02_p30);
+          v[29] = k_madd_epi32_avx2(u[1], k32_m02_p30);
+          v[30] = k_madd_epi32_avx2(u[2], k32_m02_p30);
+          v[31] = k_madd_epi32_avx2(u[3], k32_m02_p30);
+
+          u[0] = k_packs_epi64_avx2(v[0], v[1]);
+          u[1] = k_packs_epi64_avx2(v[2], v[3]);
+          u[2] = k_packs_epi64_avx2(v[4], v[5]);
+          u[3] = k_packs_epi64_avx2(v[6], v[7]);
+          u[4] = k_packs_epi64_avx2(v[8], v[9]);
+          u[5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[9] = k_packs_epi64_avx2(v[18], v[19]);
           u[10] = k_packs_epi64_avx2(v[20], v[21]);
           u[11] = k_packs_epi64_avx2(v[22], v[23]);
           u[12] = k_packs_epi64_avx2(v[24], v[25]);
@@ -2048,16 +2306,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = k_packs_epi64_avx2(v[28], v[29]);
           u[15] = k_packs_epi64_avx2(v[30], v[31]);
 
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2065,16 +2323,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
+          u[8] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
+          u[9] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
           u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
           u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
           u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2082,33 +2340,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
           u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
 
-          v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]);
-          v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]);
-          v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]);
-          v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]);
-          v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]);
-          v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]);
-          v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]);
-          v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]);
-          v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]);
-          v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]);
-          v[10] = _mm256_cmpgt_epi32(kZero,u[10]);
-          v[11] = _mm256_cmpgt_epi32(kZero,u[11]);
-          v[12] = _mm256_cmpgt_epi32(kZero,u[12]);
-          v[13] = _mm256_cmpgt_epi32(kZero,u[13]);
-          v[14] = _mm256_cmpgt_epi32(kZero,u[14]);
-          v[15] = _mm256_cmpgt_epi32(kZero,u[15]);
-
-          u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]);
+          v[0] = _mm256_cmpgt_epi32(kZero, u[0]);
+          v[1] = _mm256_cmpgt_epi32(kZero, u[1]);
+          v[2] = _mm256_cmpgt_epi32(kZero, u[2]);
+          v[3] = _mm256_cmpgt_epi32(kZero, u[3]);
+          v[4] = _mm256_cmpgt_epi32(kZero, u[4]);
+          v[5] = _mm256_cmpgt_epi32(kZero, u[5]);
+          v[6] = _mm256_cmpgt_epi32(kZero, u[6]);
+          v[7] = _mm256_cmpgt_epi32(kZero, u[7]);
+          v[8] = _mm256_cmpgt_epi32(kZero, u[8]);
+          v[9] = _mm256_cmpgt_epi32(kZero, u[9]);
+          v[10] = _mm256_cmpgt_epi32(kZero, u[10]);
+          v[11] = _mm256_cmpgt_epi32(kZero, u[11]);
+          v[12] = _mm256_cmpgt_epi32(kZero, u[12]);
+          v[13] = _mm256_cmpgt_epi32(kZero, u[13]);
+          v[14] = _mm256_cmpgt_epi32(kZero, u[14]);
+          v[15] = _mm256_cmpgt_epi32(kZero, u[15]);
+
+          u[0] = _mm256_sub_epi32(u[0], v[0]);
+          u[1] = _mm256_sub_epi32(u[1], v[1]);
+          u[2] = _mm256_sub_epi32(u[2], v[2]);
+          u[3] = _mm256_sub_epi32(u[3], v[3]);
+          u[4] = _mm256_sub_epi32(u[4], v[4]);
+          u[5] = _mm256_sub_epi32(u[5], v[5]);
+          u[6] = _mm256_sub_epi32(u[6], v[6]);
+          u[7] = _mm256_sub_epi32(u[7], v[7]);
+          u[8] = _mm256_sub_epi32(u[8], v[8]);
+          u[9] = _mm256_sub_epi32(u[9], v[9]);
           u[10] = _mm256_sub_epi32(u[10], v[10]);
           u[11] = _mm256_sub_epi32(u[11], v[11]);
           u[12] = _mm256_sub_epi32(u[12], v[12]);
@@ -2116,16 +2374,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_sub_epi32(u[14], v[14]);
           u[15] = _mm256_sub_epi32(u[15], v[15]);
 
-          v[ 0] = _mm256_add_epi32(u[ 0], K32One);
-          v[ 1] = _mm256_add_epi32(u[ 1], K32One);
-          v[ 2] = _mm256_add_epi32(u[ 2], K32One);
-          v[ 3] = _mm256_add_epi32(u[ 3], K32One);
-          v[ 4] = _mm256_add_epi32(u[ 4], K32One);
-          v[ 5] = _mm256_add_epi32(u[ 5], K32One);
-          v[ 6] = _mm256_add_epi32(u[ 6], K32One);
-          v[ 7] = _mm256_add_epi32(u[ 7], K32One);
-          v[ 8] = _mm256_add_epi32(u[ 8], K32One);
-          v[ 9] = _mm256_add_epi32(u[ 9], K32One);
+          v[0] = _mm256_add_epi32(u[0], K32One);
+          v[1] = _mm256_add_epi32(u[1], K32One);
+          v[2] = _mm256_add_epi32(u[2], K32One);
+          v[3] = _mm256_add_epi32(u[3], K32One);
+          v[4] = _mm256_add_epi32(u[4], K32One);
+          v[5] = _mm256_add_epi32(u[5], K32One);
+          v[6] = _mm256_add_epi32(u[6], K32One);
+          v[7] = _mm256_add_epi32(u[7], K32One);
+          v[8] = _mm256_add_epi32(u[8], K32One);
+          v[9] = _mm256_add_epi32(u[9], K32One);
           v[10] = _mm256_add_epi32(u[10], K32One);
           v[11] = _mm256_add_epi32(u[11], K32One);
           v[12] = _mm256_add_epi32(u[12], K32One);
@@ -2133,16 +2391,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[14] = _mm256_add_epi32(u[14], K32One);
           v[15] = _mm256_add_epi32(u[15], K32One);
 
-          u[ 0] = _mm256_srai_epi32(v[ 0], 2);
-          u[ 1] = _mm256_srai_epi32(v[ 1], 2);
-          u[ 2] = _mm256_srai_epi32(v[ 2], 2);
-          u[ 3] = _mm256_srai_epi32(v[ 3], 2);
-          u[ 4] = _mm256_srai_epi32(v[ 4], 2);
-          u[ 5] = _mm256_srai_epi32(v[ 5], 2);
-          u[ 6] = _mm256_srai_epi32(v[ 6], 2);
-          u[ 7] = _mm256_srai_epi32(v[ 7], 2);
-          u[ 8] = _mm256_srai_epi32(v[ 8], 2);
-          u[ 9] = _mm256_srai_epi32(v[ 9], 2);
+          u[0] = _mm256_srai_epi32(v[0], 2);
+          u[1] = _mm256_srai_epi32(v[1], 2);
+          u[2] = _mm256_srai_epi32(v[2], 2);
+          u[3] = _mm256_srai_epi32(v[3], 2);
+          u[4] = _mm256_srai_epi32(v[4], 2);
+          u[5] = _mm256_srai_epi32(v[5], 2);
+          u[6] = _mm256_srai_epi32(v[6], 2);
+          u[7] = _mm256_srai_epi32(v[7], 2);
+          u[8] = _mm256_srai_epi32(v[8], 2);
+          u[9] = _mm256_srai_epi32(v[9], 2);
           u[10] = _mm256_srai_epi32(v[10], 2);
           u[11] = _mm256_srai_epi32(v[11], 2);
           u[12] = _mm256_srai_epi32(v[12], 2);
@@ -2150,11 +2408,11 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_srai_epi32(v[14], 2);
           u[15] = _mm256_srai_epi32(v[15], 2);
 
-          out[ 2] = _mm256_packs_epi32(u[0], u[1]);
+          out[2] = _mm256_packs_epi32(u[0], u[1]);
           out[18] = _mm256_packs_epi32(u[2], u[3]);
           out[10] = _mm256_packs_epi32(u[4], u[5]);
           out[26] = _mm256_packs_epi32(u[6], u[7]);
-          out[ 6] = _mm256_packs_epi32(u[8], u[9]);
+          out[6] = _mm256_packs_epi32(u[8], u[9]);
           out[22] = _mm256_packs_epi32(u[10], u[11]);
           out[14] = _mm256_packs_epi32(u[12], u[13]);
           out[30] = _mm256_packs_epi32(u[14], u[15]);
@@ -2195,25 +2453,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
         }
         // stage 8
         {
-          const __m256i k32_p31_p01 = pair256_set_epi32(cospi_31_64, cospi_1_64);
-          const __m256i k32_p15_p17 = pair256_set_epi32(cospi_15_64, cospi_17_64);
-          const __m256i k32_p23_p09 = pair256_set_epi32(cospi_23_64, cospi_9_64);
-          const __m256i k32_p07_p25 = pair256_set_epi32(cospi_7_64, cospi_25_64);
-          const __m256i k32_m25_p07 = pair256_set_epi32(-cospi_25_64, cospi_7_64);
-          const __m256i k32_m09_p23 = pair256_set_epi32(-cospi_9_64, cospi_23_64);
-          const __m256i k32_m17_p15 = pair256_set_epi32(-cospi_17_64, cospi_15_64);
-          const __m256i k32_m01_p31 = pair256_set_epi32(-cospi_1_64, cospi_31_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep1[32], lstep1[62]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep1[32], lstep1[62]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep1[33], lstep1[63]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep1[33], lstep1[63]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep1[34], lstep1[60]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep1[34], lstep1[60]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep1[35], lstep1[61]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep1[35], lstep1[61]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep1[36], lstep1[58]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep1[36], lstep1[58]);
+          const __m256i k32_p31_p01 =
+              pair256_set_epi32(cospi_31_64, cospi_1_64);
+          const __m256i k32_p15_p17 =
+              pair256_set_epi32(cospi_15_64, cospi_17_64);
+          const __m256i k32_p23_p09 =
+              pair256_set_epi32(cospi_23_64, cospi_9_64);
+          const __m256i k32_p07_p25 =
+              pair256_set_epi32(cospi_7_64, cospi_25_64);
+          const __m256i k32_m25_p07 =
+              pair256_set_epi32(-cospi_25_64, cospi_7_64);
+          const __m256i k32_m09_p23 =
+              pair256_set_epi32(-cospi_9_64, cospi_23_64);
+          const __m256i k32_m17_p15 =
+              pair256_set_epi32(-cospi_17_64, cospi_15_64);
+          const __m256i k32_m01_p31 =
+              pair256_set_epi32(-cospi_1_64, cospi_31_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep1[32], lstep1[62]);
+          u[1] = _mm256_unpackhi_epi32(lstep1[32], lstep1[62]);
+          u[2] = _mm256_unpacklo_epi32(lstep1[33], lstep1[63]);
+          u[3] = _mm256_unpackhi_epi32(lstep1[33], lstep1[63]);
+          u[4] = _mm256_unpacklo_epi32(lstep1[34], lstep1[60]);
+          u[5] = _mm256_unpackhi_epi32(lstep1[34], lstep1[60]);
+          u[6] = _mm256_unpacklo_epi32(lstep1[35], lstep1[61]);
+          u[7] = _mm256_unpackhi_epi32(lstep1[35], lstep1[61]);
+          u[8] = _mm256_unpacklo_epi32(lstep1[36], lstep1[58]);
+          u[9] = _mm256_unpackhi_epi32(lstep1[36], lstep1[58]);
           u[10] = _mm256_unpacklo_epi32(lstep1[37], lstep1[59]);
           u[11] = _mm256_unpackhi_epi32(lstep1[37], lstep1[59]);
           u[12] = _mm256_unpacklo_epi32(lstep1[38], lstep1[56]);
@@ -2221,16 +2487,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_unpacklo_epi32(lstep1[39], lstep1[57]);
           u[15] = _mm256_unpackhi_epi32(lstep1[39], lstep1[57]);
 
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p31_p01);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p31_p01);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p31_p01);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p31_p01);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p15_p17);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p15_p17);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p15_p17);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p15_p17);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p23_p09);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p23_p09);
+          v[0] = k_madd_epi32_avx2(u[0], k32_p31_p01);
+          v[1] = k_madd_epi32_avx2(u[1], k32_p31_p01);
+          v[2] = k_madd_epi32_avx2(u[2], k32_p31_p01);
+          v[3] = k_madd_epi32_avx2(u[3], k32_p31_p01);
+          v[4] = k_madd_epi32_avx2(u[4], k32_p15_p17);
+          v[5] = k_madd_epi32_avx2(u[5], k32_p15_p17);
+          v[6] = k_madd_epi32_avx2(u[6], k32_p15_p17);
+          v[7] = k_madd_epi32_avx2(u[7], k32_p15_p17);
+          v[8] = k_madd_epi32_avx2(u[8], k32_p23_p09);
+          v[9] = k_madd_epi32_avx2(u[9], k32_p23_p09);
           v[10] = k_madd_epi32_avx2(u[10], k32_p23_p09);
           v[11] = k_madd_epi32_avx2(u[11], k32_p23_p09);
           v[12] = k_madd_epi32_avx2(u[12], k32_p07_p25);
@@ -2241,29 +2507,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[17] = k_madd_epi32_avx2(u[13], k32_m25_p07);
           v[18] = k_madd_epi32_avx2(u[14], k32_m25_p07);
           v[19] = k_madd_epi32_avx2(u[15], k32_m25_p07);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_m09_p23);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_m09_p23);
+          v[20] = k_madd_epi32_avx2(u[8], k32_m09_p23);
+          v[21] = k_madd_epi32_avx2(u[9], k32_m09_p23);
           v[22] = k_madd_epi32_avx2(u[10], k32_m09_p23);
           v[23] = k_madd_epi32_avx2(u[11], k32_m09_p23);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_m17_p15);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_m17_p15);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_m17_p15);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_m17_p15);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_m01_p31);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_m01_p31);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_m01_p31);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_m01_p31);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          v[24] = k_madd_epi32_avx2(u[4], k32_m17_p15);
+          v[25] = k_madd_epi32_avx2(u[5], k32_m17_p15);
+          v[26] = k_madd_epi32_avx2(u[6], k32_m17_p15);
+          v[27] = k_madd_epi32_avx2(u[7], k32_m17_p15);
+          v[28] = k_madd_epi32_avx2(u[0], k32_m01_p31);
+          v[29] = k_madd_epi32_avx2(u[1], k32_m01_p31);
+          v[30] = k_madd_epi32_avx2(u[2], k32_m01_p31);
+          v[31] = k_madd_epi32_avx2(u[3], k32_m01_p31);
+
+          u[0] = k_packs_epi64_avx2(v[0], v[1]);
+          u[1] = k_packs_epi64_avx2(v[2], v[3]);
+          u[2] = k_packs_epi64_avx2(v[4], v[5]);
+          u[3] = k_packs_epi64_avx2(v[6], v[7]);
+          u[4] = k_packs_epi64_avx2(v[8], v[9]);
+          u[5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[9] = k_packs_epi64_avx2(v[18], v[19]);
           u[10] = k_packs_epi64_avx2(v[20], v[21]);
           u[11] = k_packs_epi64_avx2(v[22], v[23]);
           u[12] = k_packs_epi64_avx2(v[24], v[25]);
@@ -2271,16 +2537,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = k_packs_epi64_avx2(v[28], v[29]);
           u[15] = k_packs_epi64_avx2(v[30], v[31]);
 
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2288,16 +2554,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
+          u[8] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
+          u[9] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
           u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
           u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
           u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2305,33 +2571,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
           u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
 
-          v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]);
-          v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]);
-          v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]);
-          v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]);
-          v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]);
-          v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]);
-          v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]);
-          v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]);
-          v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]);
-          v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]);
-          v[10] = _mm256_cmpgt_epi32(kZero,u[10]);
-          v[11] = _mm256_cmpgt_epi32(kZero,u[11]);
-          v[12] = _mm256_cmpgt_epi32(kZero,u[12]);
-          v[13] = _mm256_cmpgt_epi32(kZero,u[13]);
-          v[14] = _mm256_cmpgt_epi32(kZero,u[14]);
-          v[15] = _mm256_cmpgt_epi32(kZero,u[15]);
-
-          u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]);
+          v[0] = _mm256_cmpgt_epi32(kZero, u[0]);
+          v[1] = _mm256_cmpgt_epi32(kZero, u[1]);
+          v[2] = _mm256_cmpgt_epi32(kZero, u[2]);
+          v[3] = _mm256_cmpgt_epi32(kZero, u[3]);
+          v[4] = _mm256_cmpgt_epi32(kZero, u[4]);
+          v[5] = _mm256_cmpgt_epi32(kZero, u[5]);
+          v[6] = _mm256_cmpgt_epi32(kZero, u[6]);
+          v[7] = _mm256_cmpgt_epi32(kZero, u[7]);
+          v[8] = _mm256_cmpgt_epi32(kZero, u[8]);
+          v[9] = _mm256_cmpgt_epi32(kZero, u[9]);
+          v[10] = _mm256_cmpgt_epi32(kZero, u[10]);
+          v[11] = _mm256_cmpgt_epi32(kZero, u[11]);
+          v[12] = _mm256_cmpgt_epi32(kZero, u[12]);
+          v[13] = _mm256_cmpgt_epi32(kZero, u[13]);
+          v[14] = _mm256_cmpgt_epi32(kZero, u[14]);
+          v[15] = _mm256_cmpgt_epi32(kZero, u[15]);
+
+          u[0] = _mm256_sub_epi32(u[0], v[0]);
+          u[1] = _mm256_sub_epi32(u[1], v[1]);
+          u[2] = _mm256_sub_epi32(u[2], v[2]);
+          u[3] = _mm256_sub_epi32(u[3], v[3]);
+          u[4] = _mm256_sub_epi32(u[4], v[4]);
+          u[5] = _mm256_sub_epi32(u[5], v[5]);
+          u[6] = _mm256_sub_epi32(u[6], v[6]);
+          u[7] = _mm256_sub_epi32(u[7], v[7]);
+          u[8] = _mm256_sub_epi32(u[8], v[8]);
+          u[9] = _mm256_sub_epi32(u[9], v[9]);
           u[10] = _mm256_sub_epi32(u[10], v[10]);
           u[11] = _mm256_sub_epi32(u[11], v[11]);
           u[12] = _mm256_sub_epi32(u[12], v[12]);
@@ -2373,35 +2639,43 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_srai_epi32(v[14], 2);
           u[15] = _mm256_srai_epi32(v[15], 2);
 
-          out[ 1] = _mm256_packs_epi32(u[0], u[1]);
+          out[1] = _mm256_packs_epi32(u[0], u[1]);
           out[17] = _mm256_packs_epi32(u[2], u[3]);
-          out[ 9] = _mm256_packs_epi32(u[4], u[5]);
+          out[9] = _mm256_packs_epi32(u[4], u[5]);
           out[25] = _mm256_packs_epi32(u[6], u[7]);
-          out[ 7] = _mm256_packs_epi32(u[8], u[9]);
+          out[7] = _mm256_packs_epi32(u[8], u[9]);
           out[23] = _mm256_packs_epi32(u[10], u[11]);
           out[15] = _mm256_packs_epi32(u[12], u[13]);
           out[31] = _mm256_packs_epi32(u[14], u[15]);
         }
         {
-          const __m256i k32_p27_p05 = pair256_set_epi32(cospi_27_64, cospi_5_64);
-          const __m256i k32_p11_p21 = pair256_set_epi32(cospi_11_64, cospi_21_64);
-          const __m256i k32_p19_p13 = pair256_set_epi32(cospi_19_64, cospi_13_64);
-          const __m256i k32_p03_p29 = pair256_set_epi32(cospi_3_64, cospi_29_64);
-          const __m256i k32_m29_p03 = pair256_set_epi32(-cospi_29_64, cospi_3_64);
-          const __m256i k32_m13_p19 = pair256_set_epi32(-cospi_13_64, cospi_19_64);
-          const __m256i k32_m21_p11 = pair256_set_epi32(-cospi_21_64, cospi_11_64);
-          const __m256i k32_m05_p27 = pair256_set_epi32(-cospi_5_64, cospi_27_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep1[40], lstep1[54]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep1[40], lstep1[54]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep1[41], lstep1[55]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep1[41], lstep1[55]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep1[42], lstep1[52]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep1[42], lstep1[52]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep1[43], lstep1[53]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep1[43], lstep1[53]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep1[44], lstep1[50]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep1[44], lstep1[50]);
+          const __m256i k32_p27_p05 =
+              pair256_set_epi32(cospi_27_64, cospi_5_64);
+          const __m256i k32_p11_p21 =
+              pair256_set_epi32(cospi_11_64, cospi_21_64);
+          const __m256i k32_p19_p13 =
+              pair256_set_epi32(cospi_19_64, cospi_13_64);
+          const __m256i k32_p03_p29 =
+              pair256_set_epi32(cospi_3_64, cospi_29_64);
+          const __m256i k32_m29_p03 =
+              pair256_set_epi32(-cospi_29_64, cospi_3_64);
+          const __m256i k32_m13_p19 =
+              pair256_set_epi32(-cospi_13_64, cospi_19_64);
+          const __m256i k32_m21_p11 =
+              pair256_set_epi32(-cospi_21_64, cospi_11_64);
+          const __m256i k32_m05_p27 =
+              pair256_set_epi32(-cospi_5_64, cospi_27_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep1[40], lstep1[54]);
+          u[1] = _mm256_unpackhi_epi32(lstep1[40], lstep1[54]);
+          u[2] = _mm256_unpacklo_epi32(lstep1[41], lstep1[55]);
+          u[3] = _mm256_unpackhi_epi32(lstep1[41], lstep1[55]);
+          u[4] = _mm256_unpacklo_epi32(lstep1[42], lstep1[52]);
+          u[5] = _mm256_unpackhi_epi32(lstep1[42], lstep1[52]);
+          u[6] = _mm256_unpacklo_epi32(lstep1[43], lstep1[53]);
+          u[7] = _mm256_unpackhi_epi32(lstep1[43], lstep1[53]);
+          u[8] = _mm256_unpacklo_epi32(lstep1[44], lstep1[50]);
+          u[9] = _mm256_unpackhi_epi32(lstep1[44], lstep1[50]);
           u[10] = _mm256_unpacklo_epi32(lstep1[45], lstep1[51]);
           u[11] = _mm256_unpackhi_epi32(lstep1[45], lstep1[51]);
           u[12] = _mm256_unpacklo_epi32(lstep1[46], lstep1[48]);
@@ -2409,16 +2683,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_unpacklo_epi32(lstep1[47], lstep1[49]);
           u[15] = _mm256_unpackhi_epi32(lstep1[47], lstep1[49]);
 
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p27_p05);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p27_p05);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p27_p05);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p27_p05);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p11_p21);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p11_p21);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p11_p21);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p11_p21);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p19_p13);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p19_p13);
+          v[0] = k_madd_epi32_avx2(u[0], k32_p27_p05);
+          v[1] = k_madd_epi32_avx2(u[1], k32_p27_p05);
+          v[2] = k_madd_epi32_avx2(u[2], k32_p27_p05);
+          v[3] = k_madd_epi32_avx2(u[3], k32_p27_p05);
+          v[4] = k_madd_epi32_avx2(u[4], k32_p11_p21);
+          v[5] = k_madd_epi32_avx2(u[5], k32_p11_p21);
+          v[6] = k_madd_epi32_avx2(u[6], k32_p11_p21);
+          v[7] = k_madd_epi32_avx2(u[7], k32_p11_p21);
+          v[8] = k_madd_epi32_avx2(u[8], k32_p19_p13);
+          v[9] = k_madd_epi32_avx2(u[9], k32_p19_p13);
           v[10] = k_madd_epi32_avx2(u[10], k32_p19_p13);
           v[11] = k_madd_epi32_avx2(u[11], k32_p19_p13);
           v[12] = k_madd_epi32_avx2(u[12], k32_p03_p29);
@@ -2429,29 +2703,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[17] = k_madd_epi32_avx2(u[13], k32_m29_p03);
           v[18] = k_madd_epi32_avx2(u[14], k32_m29_p03);
           v[19] = k_madd_epi32_avx2(u[15], k32_m29_p03);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_m13_p19);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_m13_p19);
+          v[20] = k_madd_epi32_avx2(u[8], k32_m13_p19);
+          v[21] = k_madd_epi32_avx2(u[9], k32_m13_p19);
           v[22] = k_madd_epi32_avx2(u[10], k32_m13_p19);
           v[23] = k_madd_epi32_avx2(u[11], k32_m13_p19);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_m21_p11);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_m21_p11);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_m21_p11);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_m21_p11);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_m05_p27);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_m05_p27);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_m05_p27);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_m05_p27);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          v[24] = k_madd_epi32_avx2(u[4], k32_m21_p11);
+          v[25] = k_madd_epi32_avx2(u[5], k32_m21_p11);
+          v[26] = k_madd_epi32_avx2(u[6], k32_m21_p11);
+          v[27] = k_madd_epi32_avx2(u[7], k32_m21_p11);
+          v[28] = k_madd_epi32_avx2(u[0], k32_m05_p27);
+          v[29] = k_madd_epi32_avx2(u[1], k32_m05_p27);
+          v[30] = k_madd_epi32_avx2(u[2], k32_m05_p27);
+          v[31] = k_madd_epi32_avx2(u[3], k32_m05_p27);
+
+          u[0] = k_packs_epi64_avx2(v[0], v[1]);
+          u[1] = k_packs_epi64_avx2(v[2], v[3]);
+          u[2] = k_packs_epi64_avx2(v[4], v[5]);
+          u[3] = k_packs_epi64_avx2(v[6], v[7]);
+          u[4] = k_packs_epi64_avx2(v[8], v[9]);
+          u[5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[9] = k_packs_epi64_avx2(v[18], v[19]);
           u[10] = k_packs_epi64_avx2(v[20], v[21]);
           u[11] = k_packs_epi64_avx2(v[22], v[23]);
           u[12] = k_packs_epi64_avx2(v[24], v[25]);
@@ -2459,16 +2733,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = k_packs_epi64_avx2(v[28], v[29]);
           u[15] = k_packs_epi64_avx2(v[30], v[31]);
 
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2476,16 +2750,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
+          u[8] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
+          u[9] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
           u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
           u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
           u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2493,33 +2767,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
           u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
 
-          v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]);
-          v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]);
-          v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]);
-          v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]);
-          v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]);
-          v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]);
-          v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]);
-          v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]);
-          v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]);
-          v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]);
-          v[10] = _mm256_cmpgt_epi32(kZero,u[10]);
-          v[11] = _mm256_cmpgt_epi32(kZero,u[11]);
-          v[12] = _mm256_cmpgt_epi32(kZero,u[12]);
-          v[13] = _mm256_cmpgt_epi32(kZero,u[13]);
-          v[14] = _mm256_cmpgt_epi32(kZero,u[14]);
-          v[15] = _mm256_cmpgt_epi32(kZero,u[15]);
-
-          u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]);
+          v[0] = _mm256_cmpgt_epi32(kZero, u[0]);
+          v[1] = _mm256_cmpgt_epi32(kZero, u[1]);
+          v[2] = _mm256_cmpgt_epi32(kZero, u[2]);
+          v[3] = _mm256_cmpgt_epi32(kZero, u[3]);
+          v[4] = _mm256_cmpgt_epi32(kZero, u[4]);
+          v[5] = _mm256_cmpgt_epi32(kZero, u[5]);
+          v[6] = _mm256_cmpgt_epi32(kZero, u[6]);
+          v[7] = _mm256_cmpgt_epi32(kZero, u[7]);
+          v[8] = _mm256_cmpgt_epi32(kZero, u[8]);
+          v[9] = _mm256_cmpgt_epi32(kZero, u[9]);
+          v[10] = _mm256_cmpgt_epi32(kZero, u[10]);
+          v[11] = _mm256_cmpgt_epi32(kZero, u[11]);
+          v[12] = _mm256_cmpgt_epi32(kZero, u[12]);
+          v[13] = _mm256_cmpgt_epi32(kZero, u[13]);
+          v[14] = _mm256_cmpgt_epi32(kZero, u[14]);
+          v[15] = _mm256_cmpgt_epi32(kZero, u[15]);
+
+          u[0] = _mm256_sub_epi32(u[0], v[0]);
+          u[1] = _mm256_sub_epi32(u[1], v[1]);
+          u[2] = _mm256_sub_epi32(u[2], v[2]);
+          u[3] = _mm256_sub_epi32(u[3], v[3]);
+          u[4] = _mm256_sub_epi32(u[4], v[4]);
+          u[5] = _mm256_sub_epi32(u[5], v[5]);
+          u[6] = _mm256_sub_epi32(u[6], v[6]);
+          u[7] = _mm256_sub_epi32(u[7], v[7]);
+          u[8] = _mm256_sub_epi32(u[8], v[8]);
+          u[9] = _mm256_sub_epi32(u[9], v[9]);
           u[10] = _mm256_sub_epi32(u[10], v[10]);
           u[11] = _mm256_sub_epi32(u[11], v[11]);
           u[12] = _mm256_sub_epi32(u[12], v[12]);
@@ -2561,11 +2835,11 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           u[14] = _mm256_srai_epi32(v[14], 2);
           u[15] = _mm256_srai_epi32(v[15], 2);
 
-          out[ 5] = _mm256_packs_epi32(u[0], u[1]);
+          out[5] = _mm256_packs_epi32(u[0], u[1]);
           out[21] = _mm256_packs_epi32(u[2], u[3]);
           out[13] = _mm256_packs_epi32(u[4], u[5]);
           out[29] = _mm256_packs_epi32(u[6], u[7]);
-          out[ 3] = _mm256_packs_epi32(u[8], u[9]);
+          out[3] = _mm256_packs_epi32(u[8], u[9]);
           out[19] = _mm256_packs_epi32(u[10], u[11]);
           out[11] = _mm256_packs_epi32(u[12], u[13]);
           out[27] = _mm256_packs_epi32(u[14], u[15]);
@@ -2575,13 +2849,13 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
       // Transpose the results, do it as four 8x8 transposes.
       {
         int transpose_block;
-        int16_t *output_currStep,*output_nextStep;
-        if (0 == pass){
-                 output_currStep = &intermediate[column_start * 32];
-                 output_nextStep = &intermediate[(column_start + 8) * 32];
-        } else{
-                 output_currStep = &output_org[column_start * 32];
-                 output_nextStep = &output_org[(column_start + 8) * 32];
+        int16_t *output_currStep, *output_nextStep;
+        if (0 == pass) {
+          output_currStep = &intermediate[column_start * 32];
+          output_nextStep = &intermediate[(column_start + 8) * 32];
+        } else {
+          output_currStep = &output_org[column_start * 32];
+          output_nextStep = &output_org[(column_start + 8) * 32];
         }
         for (transpose_block = 0; transpose_block < 4; ++transpose_block) {
           __m256i *this_out = &out[8 * transpose_block];
@@ -2684,23 +2958,39 @@ void FDCT32x32_2D_AVX2(const int16_t *input,
           }
           // Note: even though all these stores are aligned, using the aligned
           //       intrinsic make the code slightly slower.
-          _mm_storeu_si128((__m128i *)(output_currStep + 0 * 32), _mm256_castsi256_si128(tr2_0));
-          _mm_storeu_si128((__m128i *)(output_currStep + 1 * 32), _mm256_castsi256_si128(tr2_1));
-          _mm_storeu_si128((__m128i *)(output_currStep + 2 * 32), _mm256_castsi256_si128(tr2_2));
-          _mm_storeu_si128((__m128i *)(output_currStep + 3 * 32), _mm256_castsi256_si128(tr2_3));
-          _mm_storeu_si128((__m128i *)(output_currStep + 4 * 32), _mm256_castsi256_si128(tr2_4));
-          _mm_storeu_si128((__m128i *)(output_currStep + 5 * 32), _mm256_castsi256_si128(tr2_5));
-          _mm_storeu_si128((__m128i *)(output_currStep + 6 * 32), _mm256_castsi256_si128(tr2_6));
-          _mm_storeu_si128((__m128i *)(output_currStep + 7 * 32), _mm256_castsi256_si128(tr2_7));
-
-          _mm_storeu_si128((__m128i *)(output_nextStep + 0 * 32), _mm256_extractf128_si256(tr2_0,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 1 * 32), _mm256_extractf128_si256(tr2_1,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 2 * 32), _mm256_extractf128_si256(tr2_2,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 3 * 32), _mm256_extractf128_si256(tr2_3,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 4 * 32), _mm256_extractf128_si256(tr2_4,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 5 * 32), _mm256_extractf128_si256(tr2_5,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 6 * 32), _mm256_extractf128_si256(tr2_6,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 7 * 32), _mm256_extractf128_si256(tr2_7,1));
+          _mm_storeu_si128((__m128i *)(output_currStep + 0 * 32),
+                           _mm256_castsi256_si128(tr2_0));
+          _mm_storeu_si128((__m128i *)(output_currStep + 1 * 32),
+                           _mm256_castsi256_si128(tr2_1));
+          _mm_storeu_si128((__m128i *)(output_currStep + 2 * 32),
+                           _mm256_castsi256_si128(tr2_2));
+          _mm_storeu_si128((__m128i *)(output_currStep + 3 * 32),
+                           _mm256_castsi256_si128(tr2_3));
+          _mm_storeu_si128((__m128i *)(output_currStep + 4 * 32),
+                           _mm256_castsi256_si128(tr2_4));
+          _mm_storeu_si128((__m128i *)(output_currStep + 5 * 32),
+                           _mm256_castsi256_si128(tr2_5));
+          _mm_storeu_si128((__m128i *)(output_currStep + 6 * 32),
+                           _mm256_castsi256_si128(tr2_6));
+          _mm_storeu_si128((__m128i *)(output_currStep + 7 * 32),
+                           _mm256_castsi256_si128(tr2_7));
+
+          _mm_storeu_si128((__m128i *)(output_nextStep + 0 * 32),
+                           _mm256_extractf128_si256(tr2_0, 1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 1 * 32),
+                           _mm256_extractf128_si256(tr2_1, 1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 2 * 32),
+                           _mm256_extractf128_si256(tr2_2, 1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 3 * 32),
+                           _mm256_extractf128_si256(tr2_3, 1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 4 * 32),
+                           _mm256_extractf128_si256(tr2_4, 1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 5 * 32),
+                           _mm256_extractf128_si256(tr2_5, 1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 6 * 32),
+                           _mm256_extractf128_si256(tr2_6, 1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 7 * 32),
+                           _mm256_extractf128_si256(tr2_7, 1));
           // Process next 8x8
           output_currStep += 8;
           output_nextStep += 8;
diff --git a/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h b/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h
index b85ae103fa470a6c2356e671653070a8ae0eef62..37443339094b30acd90437e44c3e8b6007d32055 100644
--- a/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h
+++ b/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h
@@ -22,42 +22,37 @@
 #define SUB_EPI16 _mm_subs_epi16
 #if FDCT32x32_HIGH_PRECISION
 void vpx_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
-    int i, j;
-    for (i = 0; i < 32; ++i) {
-      tran_high_t temp_in[32], temp_out[32];
-      for (j = 0; j < 32; ++j)
-        temp_in[j] = intermediate[j * 32 + i];
-      vpx_fdct32(temp_in, temp_out, 0);
-      for (j = 0; j < 32; ++j)
-        out[j + i * 32] =
-            (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
-    }
+  int i, j;
+  for (i = 0; i < 32; ++i) {
+    tran_high_t temp_in[32], temp_out[32];
+    for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
+    vpx_fdct32(temp_in, temp_out, 0);
+    for (j = 0; j < 32; ++j)
+      out[j + i * 32] =
+          (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
+  }
 }
-  #define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_c
-  #define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rows_c
+#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_c
+#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rows_c
 #else
 void vpx_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
-    int i, j;
-    for (i = 0; i < 32; ++i) {
-      tran_high_t temp_in[32], temp_out[32];
-      for (j = 0; j < 32; ++j)
-        temp_in[j] = intermediate[j * 32 + i];
-      vpx_fdct32(temp_in, temp_out, 1);
-      for (j = 0; j < 32; ++j)
-        out[j + i * 32] = (tran_low_t)temp_out[j];
-    }
+  int i, j;
+  for (i = 0; i < 32; ++i) {
+    tran_high_t temp_in[32], temp_out[32];
+    for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
+    vpx_fdct32(temp_in, temp_out, 1);
+    for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
+  }
 }
-  #define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_rd_c
-  #define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rd_rows_c
+#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_rd_c
+#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rd_rows_c
 #endif  // FDCT32x32_HIGH_PRECISION
 #else
 #define ADD_EPI16 _mm_add_epi16
 #define SUB_EPI16 _mm_sub_epi16
 #endif  // DCT_HIGH_BIT_DEPTH
 
-
-void FDCT32x32_2D(const int16_t *input,
-                  tran_low_t *output_org, int stride) {
+void FDCT32x32_2D(const int16_t *input, tran_low_t *output_org, int stride) {
   // Calculate pre-multiplied strides
   const int str1 = stride;
   const int str2 = 2 * stride;
@@ -70,42 +65,42 @@ void FDCT32x32_2D(const int16_t *input,
   //    by constructing the 32 bit constant corresponding to that pair.
   const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64);
-  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64,   cospi_24_64);
+  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
   const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
-  const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64,  cospi_8_64);
-  const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64,  cospi_20_64);
-  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64,  cospi_12_64);
-  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64,   cospi_28_64);
-  const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64,  cospi_4_64);
+  const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64);
+  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+  const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64);
   const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
   const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
-  const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64,  cospi_2_64);
-  const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64,  cospi_18_64);
-  const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64,  cospi_10_64);
-  const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64,   cospi_26_64);
-  const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64,  cospi_6_64);
-  const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64,  cospi_22_64);
-  const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64,  cospi_14_64);
-  const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64,   cospi_30_64);
-  const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64,  cospi_1_64);
-  const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64,  cospi_17_64);
-  const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64,  cospi_9_64);
-  const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64,   cospi_25_64);
-  const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64,  cospi_7_64);
-  const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64,   cospi_23_64);
-  const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64,  cospi_15_64);
-  const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64,   cospi_31_64);
-  const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64,  cospi_5_64);
-  const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64,  cospi_21_64);
-  const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64,  cospi_13_64);
-  const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64,   cospi_29_64);
-  const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64,  cospi_3_64);
-  const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64,  cospi_19_64);
-  const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64,  cospi_11_64);
-  const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64,   cospi_27_64);
+  const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64, cospi_2_64);
+  const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64, cospi_18_64);
+  const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64, cospi_10_64);
+  const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64, cospi_26_64);
+  const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
+  const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
+  const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
+  const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
+  const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64, cospi_1_64);
+  const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64, cospi_17_64);
+  const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64, cospi_9_64);
+  const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64, cospi_25_64);
+  const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64, cospi_7_64);
+  const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64, cospi_23_64);
+  const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64, cospi_15_64);
+  const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64, cospi_31_64);
+  const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64, cospi_5_64);
+  const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64, cospi_21_64);
+  const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64, cospi_13_64);
+  const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64, cospi_29_64);
+  const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64, cospi_3_64);
+  const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64, cospi_19_64);
+  const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64, cospi_11_64);
+  const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64, cospi_27_64);
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i kZero = _mm_set1_epi16(0);
-  const __m128i kOne  = _mm_set1_epi16(1);
+  const __m128i kOne = _mm_set1_epi16(1);
   // Do the two transform/transpose passes
   int pass;
 #if DCT_HIGH_BIT_DEPTH
@@ -123,125 +118,125 @@ void FDCT32x32_2D(const int16_t *input,
       // Note: even though all the loads below are aligned, using the aligned
       //       intrinsic make the code slightly slower.
       if (0 == pass) {
-        const int16_t *in  = &input[column_start];
+        const int16_t *in = &input[column_start];
         // step1[i] =  (in[ 0 * stride] + in[(32 -  1) * stride]) << 2;
         // Note: the next four blocks could be in a loop. That would help the
         //       instruction cache but is actually slower.
         {
-          const int16_t *ina =  in +  0 * str1;
-          const int16_t *inb =  in + 31 * str1;
-          __m128i *step1a = &step1[ 0];
+          const int16_t *ina = in + 0 * str1;
+          const int16_t *inb = in + 31 * str1;
+          __m128i *step1a = &step1[0];
           __m128i *step1b = &step1[31];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[0] = _mm_add_epi16(ina0, inb0);
+          step1a[1] = _mm_add_epi16(ina1, inb1);
+          step1a[2] = _mm_add_epi16(ina2, inb2);
+          step1a[3] = _mm_add_epi16(ina3, inb3);
           step1b[-3] = _mm_sub_epi16(ina3, inb3);
           step1b[-2] = _mm_sub_epi16(ina2, inb2);
           step1b[-1] = _mm_sub_epi16(ina1, inb1);
           step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
           step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
         }
         {
-          const int16_t *ina =  in +  4 * str1;
-          const int16_t *inb =  in + 27 * str1;
-          __m128i *step1a = &step1[ 4];
+          const int16_t *ina = in + 4 * str1;
+          const int16_t *inb = in + 27 * str1;
+          __m128i *step1a = &step1[4];
           __m128i *step1b = &step1[27];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[0] = _mm_add_epi16(ina0, inb0);
+          step1a[1] = _mm_add_epi16(ina1, inb1);
+          step1a[2] = _mm_add_epi16(ina2, inb2);
+          step1a[3] = _mm_add_epi16(ina3, inb3);
           step1b[-3] = _mm_sub_epi16(ina3, inb3);
           step1b[-2] = _mm_sub_epi16(ina2, inb2);
           step1b[-1] = _mm_sub_epi16(ina1, inb1);
           step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
           step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
         }
         {
-          const int16_t *ina =  in +  8 * str1;
-          const int16_t *inb =  in + 23 * str1;
-          __m128i *step1a = &step1[ 8];
+          const int16_t *ina = in + 8 * str1;
+          const int16_t *inb = in + 23 * str1;
+          __m128i *step1a = &step1[8];
           __m128i *step1b = &step1[23];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[0] = _mm_add_epi16(ina0, inb0);
+          step1a[1] = _mm_add_epi16(ina1, inb1);
+          step1a[2] = _mm_add_epi16(ina2, inb2);
+          step1a[3] = _mm_add_epi16(ina3, inb3);
           step1b[-3] = _mm_sub_epi16(ina3, inb3);
           step1b[-2] = _mm_sub_epi16(ina2, inb2);
           step1b[-1] = _mm_sub_epi16(ina1, inb1);
           step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
           step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
         }
         {
-          const int16_t *ina =  in + 12 * str1;
-          const int16_t *inb =  in + 19 * str1;
+          const int16_t *ina = in + 12 * str1;
+          const int16_t *inb = in + 19 * str1;
           __m128i *step1a = &step1[12];
           __m128i *step1b = &step1[19];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[0] = _mm_add_epi16(ina0, inb0);
+          step1a[1] = _mm_add_epi16(ina1, inb1);
+          step1a[2] = _mm_add_epi16(ina2, inb2);
+          step1a[3] = _mm_add_epi16(ina3, inb3);
           step1b[-3] = _mm_sub_epi16(ina3, inb3);
           step1b[-2] = _mm_sub_epi16(ina2, inb2);
           step1b[-1] = _mm_sub_epi16(ina1, inb1);
           step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1a[0] = _mm_slli_epi16(step1a[0], 2);
+          step1a[1] = _mm_slli_epi16(step1a[1], 2);
+          step1a[2] = _mm_slli_epi16(step1a[2], 2);
+          step1a[3] = _mm_slli_epi16(step1a[3], 2);
           step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
           step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
           step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
@@ -256,14 +251,14 @@ void FDCT32x32_2D(const int16_t *input,
         // Note: the next four blocks could be in a loop. That would help the
         //       instruction cache but is actually slower.
         {
-          __m128i in00  = _mm_loadu_si128((const __m128i *)(in +  0 * 32));
-          __m128i in01  = _mm_loadu_si128((const __m128i *)(in +  1 * 32));
-          __m128i in02  = _mm_loadu_si128((const __m128i *)(in +  2 * 32));
-          __m128i in03  = _mm_loadu_si128((const __m128i *)(in +  3 * 32));
-          __m128i in28  = _mm_loadu_si128((const __m128i *)(in + 28 * 32));
-          __m128i in29  = _mm_loadu_si128((const __m128i *)(in + 29 * 32));
-          __m128i in30  = _mm_loadu_si128((const __m128i *)(in + 30 * 32));
-          __m128i in31  = _mm_loadu_si128((const __m128i *)(in + 31 * 32));
+          __m128i in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 32));
+          __m128i in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 32));
+          __m128i in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 32));
+          __m128i in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 32));
+          __m128i in28 = _mm_loadu_si128((const __m128i *)(in + 28 * 32));
+          __m128i in29 = _mm_loadu_si128((const __m128i *)(in + 29 * 32));
+          __m128i in30 = _mm_loadu_si128((const __m128i *)(in + 30 * 32));
+          __m128i in31 = _mm_loadu_si128((const __m128i *)(in + 31 * 32));
           step1[0] = ADD_EPI16(in00, in31);
           step1[1] = ADD_EPI16(in01, in30);
           step1[2] = ADD_EPI16(in02, in29);
@@ -283,14 +278,14 @@ void FDCT32x32_2D(const int16_t *input,
 #endif  // DCT_HIGH_BIT_DEPTH
         }
         {
-          __m128i in04  = _mm_loadu_si128((const __m128i *)(in +  4 * 32));
-          __m128i in05  = _mm_loadu_si128((const __m128i *)(in +  5 * 32));
-          __m128i in06  = _mm_loadu_si128((const __m128i *)(in +  6 * 32));
-          __m128i in07  = _mm_loadu_si128((const __m128i *)(in +  7 * 32));
-          __m128i in24  = _mm_loadu_si128((const __m128i *)(in + 24 * 32));
-          __m128i in25  = _mm_loadu_si128((const __m128i *)(in + 25 * 32));
-          __m128i in26  = _mm_loadu_si128((const __m128i *)(in + 26 * 32));
-          __m128i in27  = _mm_loadu_si128((const __m128i *)(in + 27 * 32));
+          __m128i in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 32));
+          __m128i in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 32));
+          __m128i in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 32));
+          __m128i in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 32));
+          __m128i in24 = _mm_loadu_si128((const __m128i *)(in + 24 * 32));
+          __m128i in25 = _mm_loadu_si128((const __m128i *)(in + 25 * 32));
+          __m128i in26 = _mm_loadu_si128((const __m128i *)(in + 26 * 32));
+          __m128i in27 = _mm_loadu_si128((const __m128i *)(in + 27 * 32));
           step1[4] = ADD_EPI16(in04, in27);
           step1[5] = ADD_EPI16(in05, in26);
           step1[6] = ADD_EPI16(in06, in25);
@@ -310,14 +305,14 @@ void FDCT32x32_2D(const int16_t *input,
 #endif  // DCT_HIGH_BIT_DEPTH
         }
         {
-          __m128i in08  = _mm_loadu_si128((const __m128i *)(in +  8 * 32));
-          __m128i in09  = _mm_loadu_si128((const __m128i *)(in +  9 * 32));
-          __m128i in10  = _mm_loadu_si128((const __m128i *)(in + 10 * 32));
-          __m128i in11  = _mm_loadu_si128((const __m128i *)(in + 11 * 32));
-          __m128i in20  = _mm_loadu_si128((const __m128i *)(in + 20 * 32));
-          __m128i in21  = _mm_loadu_si128((const __m128i *)(in + 21 * 32));
-          __m128i in22  = _mm_loadu_si128((const __m128i *)(in + 22 * 32));
-          __m128i in23  = _mm_loadu_si128((const __m128i *)(in + 23 * 32));
+          __m128i in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 32));
+          __m128i in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 32));
+          __m128i in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 32));
+          __m128i in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 32));
+          __m128i in20 = _mm_loadu_si128((const __m128i *)(in + 20 * 32));
+          __m128i in21 = _mm_loadu_si128((const __m128i *)(in + 21 * 32));
+          __m128i in22 = _mm_loadu_si128((const __m128i *)(in + 22 * 32));
+          __m128i in23 = _mm_loadu_si128((const __m128i *)(in + 23 * 32));
           step1[8] = ADD_EPI16(in08, in23);
           step1[9] = ADD_EPI16(in09, in22);
           step1[10] = ADD_EPI16(in10, in21);
@@ -337,14 +332,14 @@ void FDCT32x32_2D(const int16_t *input,
 #endif  // DCT_HIGH_BIT_DEPTH
         }
         {
-          __m128i in12  = _mm_loadu_si128((const __m128i *)(in + 12 * 32));
-          __m128i in13  = _mm_loadu_si128((const __m128i *)(in + 13 * 32));
-          __m128i in14  = _mm_loadu_si128((const __m128i *)(in + 14 * 32));
-          __m128i in15  = _mm_loadu_si128((const __m128i *)(in + 15 * 32));
-          __m128i in16  = _mm_loadu_si128((const __m128i *)(in + 16 * 32));
-          __m128i in17  = _mm_loadu_si128((const __m128i *)(in + 17 * 32));
-          __m128i in18  = _mm_loadu_si128((const __m128i *)(in + 18 * 32));
-          __m128i in19  = _mm_loadu_si128((const __m128i *)(in + 19 * 32));
+          __m128i in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 32));
+          __m128i in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 32));
+          __m128i in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 32));
+          __m128i in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 32));
+          __m128i in16 = _mm_loadu_si128((const __m128i *)(in + 16 * 32));
+          __m128i in17 = _mm_loadu_si128((const __m128i *)(in + 17 * 32));
+          __m128i in18 = _mm_loadu_si128((const __m128i *)(in + 18 * 32));
+          __m128i in19 = _mm_loadu_si128((const __m128i *)(in + 19 * 32));
           step1[12] = ADD_EPI16(in12, in19);
           step1[13] = ADD_EPI16(in13, in18);
           step1[14] = ADD_EPI16(in14, in17);
@@ -372,10 +367,10 @@ void FDCT32x32_2D(const int16_t *input,
         step2[3] = ADD_EPI16(step1[3], step1[12]);
         step2[4] = ADD_EPI16(step1[4], step1[11]);
         step2[5] = ADD_EPI16(step1[5], step1[10]);
-        step2[6] = ADD_EPI16(step1[6], step1[ 9]);
-        step2[7] = ADD_EPI16(step1[7], step1[ 8]);
-        step2[8] = SUB_EPI16(step1[7], step1[ 8]);
-        step2[9] = SUB_EPI16(step1[6], step1[ 9]);
+        step2[6] = ADD_EPI16(step1[6], step1[9]);
+        step2[7] = ADD_EPI16(step1[7], step1[8]);
+        step2[8] = SUB_EPI16(step1[7], step1[8]);
+        step2[9] = SUB_EPI16(step1[6], step1[9]);
         step2[10] = SUB_EPI16(step1[5], step1[10]);
         step2[11] = SUB_EPI16(step1[4], step1[11]);
         step2[12] = SUB_EPI16(step1[3], step1[12]);
@@ -384,9 +379,8 @@ void FDCT32x32_2D(const int16_t *input,
         step2[15] = SUB_EPI16(step1[0], step1[15]);
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x16(
-            &step2[0], &step2[1], &step2[2], &step2[3],
-            &step2[4], &step2[5], &step2[6], &step2[7],
-            &step2[8], &step2[9], &step2[10], &step2[11],
+            &step2[0], &step2[1], &step2[2], &step2[3], &step2[4], &step2[5],
+            &step2[6], &step2[7], &step2[8], &step2[9], &step2[10], &step2[11],
             &step2[12], &step2[13], &step2[14], &step2[15]);
         if (overflow) {
           if (pass == 0)
@@ -482,16 +476,16 @@ void FDCT32x32_2D(const int16_t *input,
       // dump the magnitude by half, hence the intermediate values are within
       // the range of 16 bits.
       if (1 == pass) {
-        __m128i s3_00_0 = _mm_cmplt_epi16(step2[ 0], kZero);
-        __m128i s3_01_0 = _mm_cmplt_epi16(step2[ 1], kZero);
-        __m128i s3_02_0 = _mm_cmplt_epi16(step2[ 2], kZero);
-        __m128i s3_03_0 = _mm_cmplt_epi16(step2[ 3], kZero);
-        __m128i s3_04_0 = _mm_cmplt_epi16(step2[ 4], kZero);
-        __m128i s3_05_0 = _mm_cmplt_epi16(step2[ 5], kZero);
-        __m128i s3_06_0 = _mm_cmplt_epi16(step2[ 6], kZero);
-        __m128i s3_07_0 = _mm_cmplt_epi16(step2[ 7], kZero);
-        __m128i s2_08_0 = _mm_cmplt_epi16(step2[ 8], kZero);
-        __m128i s2_09_0 = _mm_cmplt_epi16(step2[ 9], kZero);
+        __m128i s3_00_0 = _mm_cmplt_epi16(step2[0], kZero);
+        __m128i s3_01_0 = _mm_cmplt_epi16(step2[1], kZero);
+        __m128i s3_02_0 = _mm_cmplt_epi16(step2[2], kZero);
+        __m128i s3_03_0 = _mm_cmplt_epi16(step2[3], kZero);
+        __m128i s3_04_0 = _mm_cmplt_epi16(step2[4], kZero);
+        __m128i s3_05_0 = _mm_cmplt_epi16(step2[5], kZero);
+        __m128i s3_06_0 = _mm_cmplt_epi16(step2[6], kZero);
+        __m128i s3_07_0 = _mm_cmplt_epi16(step2[7], kZero);
+        __m128i s2_08_0 = _mm_cmplt_epi16(step2[8], kZero);
+        __m128i s2_09_0 = _mm_cmplt_epi16(step2[9], kZero);
         __m128i s3_10_0 = _mm_cmplt_epi16(step2[10], kZero);
         __m128i s3_11_0 = _mm_cmplt_epi16(step2[11], kZero);
         __m128i s3_12_0 = _mm_cmplt_epi16(step2[12], kZero);
@@ -515,16 +509,16 @@ void FDCT32x32_2D(const int16_t *input,
         __m128i s3_30_0 = _mm_cmplt_epi16(step1[30], kZero);
         __m128i s3_31_0 = _mm_cmplt_epi16(step1[31], kZero);
 
-        step2[0] = SUB_EPI16(step2[ 0], s3_00_0);
-        step2[1] = SUB_EPI16(step2[ 1], s3_01_0);
-        step2[2] = SUB_EPI16(step2[ 2], s3_02_0);
-        step2[3] = SUB_EPI16(step2[ 3], s3_03_0);
-        step2[4] = SUB_EPI16(step2[ 4], s3_04_0);
-        step2[5] = SUB_EPI16(step2[ 5], s3_05_0);
-        step2[6] = SUB_EPI16(step2[ 6], s3_06_0);
-        step2[7] = SUB_EPI16(step2[ 7], s3_07_0);
-        step2[8] = SUB_EPI16(step2[ 8], s2_08_0);
-        step2[9] = SUB_EPI16(step2[ 9], s2_09_0);
+        step2[0] = SUB_EPI16(step2[0], s3_00_0);
+        step2[1] = SUB_EPI16(step2[1], s3_01_0);
+        step2[2] = SUB_EPI16(step2[2], s3_02_0);
+        step2[3] = SUB_EPI16(step2[3], s3_03_0);
+        step2[4] = SUB_EPI16(step2[4], s3_04_0);
+        step2[5] = SUB_EPI16(step2[5], s3_05_0);
+        step2[6] = SUB_EPI16(step2[6], s3_06_0);
+        step2[7] = SUB_EPI16(step2[7], s3_07_0);
+        step2[8] = SUB_EPI16(step2[8], s2_08_0);
+        step2[9] = SUB_EPI16(step2[9], s2_09_0);
         step2[10] = SUB_EPI16(step2[10], s3_10_0);
         step2[11] = SUB_EPI16(step2[11], s3_11_0);
         step2[12] = SUB_EPI16(step2[12], s3_12_0);
@@ -549,29 +543,27 @@ void FDCT32x32_2D(const int16_t *input,
         step1[31] = SUB_EPI16(step1[31], s3_31_0);
 #if DCT_HIGH_BIT_DEPTH
         overflow = check_epi16_overflow_x32(
-            &step2[0], &step2[1], &step2[2], &step2[3],
-            &step2[4], &step2[5], &step2[6], &step2[7],
-            &step2[8], &step2[9], &step2[10], &step2[11],
-            &step2[12], &step2[13], &step2[14], &step2[15],
-            &step1[16], &step1[17], &step1[18], &step1[19],
-            &step2[20], &step2[21], &step2[22], &step2[23],
-            &step2[24], &step2[25], &step2[26], &step2[27],
-            &step1[28], &step1[29], &step1[30], &step1[31]);
+            &step2[0], &step2[1], &step2[2], &step2[3], &step2[4], &step2[5],
+            &step2[6], &step2[7], &step2[8], &step2[9], &step2[10], &step2[11],
+            &step2[12], &step2[13], &step2[14], &step2[15], &step1[16],
+            &step1[17], &step1[18], &step1[19], &step2[20], &step2[21],
+            &step2[22], &step2[23], &step2[24], &step2[25], &step2[26],
+            &step2[27], &step1[28], &step1[29], &step1[30], &step1[31]);
         if (overflow) {
           HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
           return;
         }
 #endif  // DCT_HIGH_BIT_DEPTH
-        step2[0] = _mm_add_epi16(step2[ 0], kOne);
-        step2[1] = _mm_add_epi16(step2[ 1], kOne);
-        step2[2] = _mm_add_epi16(step2[ 2], kOne);
-        step2[3] = _mm_add_epi16(step2[ 3], kOne);
-        step2[4] = _mm_add_epi16(step2[ 4], kOne);
-        step2[5] = _mm_add_epi16(step2[ 5], kOne);
-        step2[6] = _mm_add_epi16(step2[ 6], kOne);
-        step2[7] = _mm_add_epi16(step2[ 7], kOne);
-        step2[8] = _mm_add_epi16(step2[ 8], kOne);
-        step2[9] = _mm_add_epi16(step2[ 9], kOne);
+        step2[0] = _mm_add_epi16(step2[0], kOne);
+        step2[1] = _mm_add_epi16(step2[1], kOne);
+        step2[2] = _mm_add_epi16(step2[2], kOne);
+        step2[3] = _mm_add_epi16(step2[3], kOne);
+        step2[4] = _mm_add_epi16(step2[4], kOne);
+        step2[5] = _mm_add_epi16(step2[5], kOne);
+        step2[6] = _mm_add_epi16(step2[6], kOne);
+        step2[7] = _mm_add_epi16(step2[7], kOne);
+        step2[8] = _mm_add_epi16(step2[8], kOne);
+        step2[9] = _mm_add_epi16(step2[9], kOne);
         step2[10] = _mm_add_epi16(step2[10], kOne);
         step2[11] = _mm_add_epi16(step2[11], kOne);
         step2[12] = _mm_add_epi16(step2[12], kOne);
@@ -595,16 +587,16 @@ void FDCT32x32_2D(const int16_t *input,
         step1[30] = _mm_add_epi16(step1[30], kOne);
         step1[31] = _mm_add_epi16(step1[31], kOne);
 
-        step2[0] = _mm_srai_epi16(step2[ 0], 2);
-        step2[1] = _mm_srai_epi16(step2[ 1], 2);
-        step2[2] = _mm_srai_epi16(step2[ 2], 2);
-        step2[3] = _mm_srai_epi16(step2[ 3], 2);
-        step2[4] = _mm_srai_epi16(step2[ 4], 2);
-        step2[5] = _mm_srai_epi16(step2[ 5], 2);
-        step2[6] = _mm_srai_epi16(step2[ 6], 2);
-        step2[7] = _mm_srai_epi16(step2[ 7], 2);
-        step2[8] = _mm_srai_epi16(step2[ 8], 2);
-        step2[9] = _mm_srai_epi16(step2[ 9], 2);
+        step2[0] = _mm_srai_epi16(step2[0], 2);
+        step2[1] = _mm_srai_epi16(step2[1], 2);
+        step2[2] = _mm_srai_epi16(step2[2], 2);
+        step2[3] = _mm_srai_epi16(step2[3], 2);
+        step2[4] = _mm_srai_epi16(step2[4], 2);
+        step2[5] = _mm_srai_epi16(step2[5], 2);
+        step2[6] = _mm_srai_epi16(step2[6], 2);
+        step2[7] = _mm_srai_epi16(step2[7], 2);
+        step2[8] = _mm_srai_epi16(step2[8], 2);
+        step2[9] = _mm_srai_epi16(step2[9], 2);
         step2[10] = _mm_srai_epi16(step2[10], 2);
         step2[11] = _mm_srai_epi16(step2[11], 2);
         step2[12] = _mm_srai_epi16(step2[12], 2);
@@ -633,821 +625,884 @@ void FDCT32x32_2D(const int16_t *input,
 #if FDCT32x32_HIGH_PRECISION
       if (pass == 0) {
 #endif
-      // Stage 3
-      {
-        step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]);
-        step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]);
-        step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]);
-        step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]);
-        step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]);
-        step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]);
-        step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]);
-        step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]);
+        // Stage 3
+        {
+          step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]);
+          step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]);
+          step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]);
+          step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]);
+          step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]);
+          step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]);
+          step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]);
+          step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2],
-                                           &step3[3], &step3[4], &step3[5],
-                                           &step3[6], &step3[7]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2],
+                                             &step3[3], &step3[4], &step3[5],
+                                             &step3[6], &step3[7]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
-        const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
-        const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
-        const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
-        const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
-        const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
-        const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
-        const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
-        const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
-        const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
-        const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
-        const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
-        const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
-        const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
-        const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
-        const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
-        const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
-        const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
-        const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
-        // Combine
-        step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7);
-        step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7);
-        step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7);
-        step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&step3[10], &step3[11],
-                                           &step3[12], &step3[13]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step3[16] = ADD_EPI16(step2[23], step1[16]);
-        step3[17] = ADD_EPI16(step2[22], step1[17]);
-        step3[18] = ADD_EPI16(step2[21], step1[18]);
-        step3[19] = ADD_EPI16(step2[20], step1[19]);
-        step3[20] = SUB_EPI16(step1[19], step2[20]);
-        step3[21] = SUB_EPI16(step1[18], step2[21]);
-        step3[22] = SUB_EPI16(step1[17], step2[22]);
-        step3[23] = SUB_EPI16(step1[16], step2[23]);
-        step3[24] = SUB_EPI16(step1[31], step2[24]);
-        step3[25] = SUB_EPI16(step1[30], step2[25]);
-        step3[26] = SUB_EPI16(step1[29], step2[26]);
-        step3[27] = SUB_EPI16(step1[28], step2[27]);
-        step3[28] = ADD_EPI16(step2[27], step1[28]);
-        step3[29] = ADD_EPI16(step2[26], step1[29]);
-        step3[30] = ADD_EPI16(step2[25], step1[30]);
-        step3[31] = ADD_EPI16(step2[24], step1[31]);
+        {
+          const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
+          const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
+          const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
+          const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
+          const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
+          const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
+          const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
+          const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
+          const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
+          const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
+          const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
+          const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
+          // dct_const_round_shift
+          const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
+          const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
+          const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
+          const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
+          const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
+          const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
+          const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
+          const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
+          // Combine
+          step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7);
+          step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7);
+          step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7);
+          step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step3[16], &step3[17], &step3[18], &step3[19],
-            &step3[20], &step3[21], &step3[22], &step3[23],
-            &step3[24], &step3[25], &step3[26], &step3[27],
-            &step3[28], &step3[29], &step3[30], &step3[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow = check_epi16_overflow_x4(&step3[10], &step3[11], &step3[12],
+                                             &step3[13]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        {
+          step3[16] = ADD_EPI16(step2[23], step1[16]);
+          step3[17] = ADD_EPI16(step2[22], step1[17]);
+          step3[18] = ADD_EPI16(step2[21], step1[18]);
+          step3[19] = ADD_EPI16(step2[20], step1[19]);
+          step3[20] = SUB_EPI16(step1[19], step2[20]);
+          step3[21] = SUB_EPI16(step1[18], step2[21]);
+          step3[22] = SUB_EPI16(step1[17], step2[22]);
+          step3[23] = SUB_EPI16(step1[16], step2[23]);
+          step3[24] = SUB_EPI16(step1[31], step2[24]);
+          step3[25] = SUB_EPI16(step1[30], step2[25]);
+          step3[26] = SUB_EPI16(step1[29], step2[26]);
+          step3[27] = SUB_EPI16(step1[28], step2[27]);
+          step3[28] = ADD_EPI16(step2[27], step1[28]);
+          step3[29] = ADD_EPI16(step2[26], step1[29]);
+          step3[30] = ADD_EPI16(step2[25], step1[30]);
+          step3[31] = ADD_EPI16(step2[24], step1[31]);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x16(
+              &step3[16], &step3[17], &step3[18], &step3[19], &step3[20],
+              &step3[21], &step3[22], &step3[23], &step3[24], &step3[25],
+              &step3[26], &step3[27], &step3[28], &step3[29], &step3[30],
+              &step3[31]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
+        }
 
-      // Stage 4
-      {
-        step1[0] = ADD_EPI16(step3[ 3], step3[ 0]);
-        step1[1] = ADD_EPI16(step3[ 2], step3[ 1]);
-        step1[2] = SUB_EPI16(step3[ 1], step3[ 2]);
-        step1[3] = SUB_EPI16(step3[ 0], step3[ 3]);
-        step1[8] = ADD_EPI16(step3[11], step2[ 8]);
-        step1[9] = ADD_EPI16(step3[10], step2[ 9]);
-        step1[10] = SUB_EPI16(step2[ 9], step3[10]);
-        step1[11] = SUB_EPI16(step2[ 8], step3[11]);
-        step1[12] = SUB_EPI16(step2[15], step3[12]);
-        step1[13] = SUB_EPI16(step2[14], step3[13]);
-        step1[14] = ADD_EPI16(step3[13], step2[14]);
-        step1[15] = ADD_EPI16(step3[12], step2[15]);
+        // Stage 4
+        {
+          step1[0] = ADD_EPI16(step3[3], step3[0]);
+          step1[1] = ADD_EPI16(step3[2], step3[1]);
+          step1[2] = SUB_EPI16(step3[1], step3[2]);
+          step1[3] = SUB_EPI16(step3[0], step3[3]);
+          step1[8] = ADD_EPI16(step3[11], step2[8]);
+          step1[9] = ADD_EPI16(step3[10], step2[9]);
+          step1[10] = SUB_EPI16(step2[9], step3[10]);
+          step1[11] = SUB_EPI16(step2[8], step3[11]);
+          step1[12] = SUB_EPI16(step2[15], step3[12]);
+          step1[13] = SUB_EPI16(step2[14], step3[13]);
+          step1[14] = ADD_EPI16(step3[13], step2[14]);
+          step1[15] = ADD_EPI16(step3[12], step2[15]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step1[0], &step1[1], &step1[2], &step1[3],
-            &step1[4], &step1[5], &step1[6], &step1[7],
-            &step1[8], &step1[9], &step1[10], &step1[11],
-            &step1[12], &step1[13], &step1[14], &step1[15]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x16(
+              &step1[0], &step1[1], &step1[2], &step1[3], &step1[4], &step1[5],
+              &step1[6], &step1[7], &step1[8], &step1[9], &step1[10],
+              &step1[11], &step1[12], &step1[13], &step1[14], &step1[15]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]);
-        const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]);
-        const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16);
-        const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16);
-        const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16);
-        const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS);
-        const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS);
-        const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS);
-        const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS);
-        // Combine
-        step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7);
-        step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x2(&step1[5], &step1[6]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]);
-        const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]);
-        const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]);
-        const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]);
-        const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]);
-        const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]);
-        const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]);
-        const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]);
-        const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24);
-        const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24);
-        const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24);
-        const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24);
-        const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08);
-        const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08);
-        const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08);
-        const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08);
-        const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24);
-        const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24);
-        const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24);
-        const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24);
-        const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08);
-        const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08);
-        const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08);
-        const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS);
-        const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS);
-        const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS);
-        const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS);
-        const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS);
-        const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS);
-        const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS);
-        const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS);
-        const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS);
-        const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS);
-        const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS);
-        const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS);
-        const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS);
-        const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS);
-        const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS);
-        const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS);
-        // Combine
-        step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7);
-        step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7);
-        step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7);
-        step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7);
-        step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7);
-        step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7);
-        step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7);
-        step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7);
+        {
+          const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]);
+          const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]);
+          const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16);
+          const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16);
+          const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16);
+          const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16);
+          // dct_const_round_shift
+          const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS);
+          const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS);
+          const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS);
+          const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS);
+          // Combine
+          step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7);
+          step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20],
-                                           &step1[21], &step1[26], &step1[27],
-                                           &step1[28], &step1[29]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x2(&step1[5], &step1[6]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Stage 5
-      {
-        step2[4] = ADD_EPI16(step1[5], step3[4]);
-        step2[5] = SUB_EPI16(step3[4], step1[5]);
-        step2[6] = SUB_EPI16(step3[7], step1[6]);
-        step2[7] = ADD_EPI16(step1[6], step3[7]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&step2[4], &step2[5],
-                                           &step2[6], &step2[7]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]);
-        const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]);
-        const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]);
-        const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]);
-        const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16);
-        const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16);
-        const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16);
-        const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16);
-        const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08);
-        const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08);
-        const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24);
-        const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24);
-        // dct_const_round_shift
-        const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS);
-        const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS);
-        const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS);
-        const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS);
-        const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS);
-        const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS);
-        const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS);
-        const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS);
-        // Combine
-        out[ 0] = _mm_packs_epi32(out_00_6, out_00_7);
-        out[16] = _mm_packs_epi32(out_16_6, out_16_7);
-        out[ 8] = _mm_packs_epi32(out_08_6, out_08_7);
-        out[24] = _mm_packs_epi32(out_24_6, out_24_7);
+        {
+          const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]);
+          const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]);
+          const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]);
+          const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]);
+          const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]);
+          const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]);
+          const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]);
+          const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]);
+          const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24);
+          const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24);
+          const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24);
+          const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24);
+          const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08);
+          const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08);
+          const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08);
+          const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08);
+          const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24);
+          const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24);
+          const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24);
+          const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24);
+          const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08);
+          const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08);
+          const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08);
+          const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08);
+          // dct_const_round_shift
+          const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
+          const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
+          const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS);
+          const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS);
+          const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS);
+          const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS);
+          const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS);
+          const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS);
+          const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS);
+          const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS);
+          const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS);
+          const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS);
+          const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS);
+          const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS);
+          const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS);
+          const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS);
+          const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS);
+          const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS);
+          // Combine
+          step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7);
+          step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7);
+          step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7);
+          step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7);
+          step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7);
+          step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7);
+          step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7);
+          step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&out[0], &out[16],
-                                           &out[8], &out[24]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20],
+                                             &step1[21], &step1[26], &step1[27],
+                                             &step1[28], &step1[29]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[ 9], step1[14]);
-        const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[ 9], step1[14]);
-        const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]);
-        const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]);
-        const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24);
-        const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24);
-        const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08);
-        const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08);
-        const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24);
-        const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24);
-        const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08);
-        const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS);
-        const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS);
-        const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS);
-        const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS);
-        const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS);
-        const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS);
-        const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS);
-        const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS);
-        // Combine
-        step2[ 9] = _mm_packs_epi32(s2_09_6, s2_09_7);
-        step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7);
-        step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7);
-        step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&step2[9], &step2[10],
-                                           &step2[13], &step2[14]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step2[16] = ADD_EPI16(step1[19], step3[16]);
-        step2[17] = ADD_EPI16(step1[18], step3[17]);
-        step2[18] = SUB_EPI16(step3[17], step1[18]);
-        step2[19] = SUB_EPI16(step3[16], step1[19]);
-        step2[20] = SUB_EPI16(step3[23], step1[20]);
-        step2[21] = SUB_EPI16(step3[22], step1[21]);
-        step2[22] = ADD_EPI16(step1[21], step3[22]);
-        step2[23] = ADD_EPI16(step1[20], step3[23]);
-        step2[24] = ADD_EPI16(step1[27], step3[24]);
-        step2[25] = ADD_EPI16(step1[26], step3[25]);
-        step2[26] = SUB_EPI16(step3[25], step1[26]);
-        step2[27] = SUB_EPI16(step3[24], step1[27]);
-        step2[28] = SUB_EPI16(step3[31], step1[28]);
-        step2[29] = SUB_EPI16(step3[30], step1[29]);
-        step2[30] = ADD_EPI16(step1[29], step3[30]);
-        step2[31] = ADD_EPI16(step1[28], step3[31]);
+        // Stage 5
+        {
+          step2[4] = ADD_EPI16(step1[5], step3[4]);
+          step2[5] = SUB_EPI16(step3[4], step1[5]);
+          step2[6] = SUB_EPI16(step3[7], step1[6]);
+          step2[7] = ADD_EPI16(step1[6], step3[7]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step2[16], &step2[17], &step2[18], &step2[19],
-            &step2[20], &step2[21], &step2[22], &step2[23],
-            &step2[24], &step2[25], &step2[26], &step2[27],
-            &step2[28], &step2[29], &step2[30], &step2[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = check_epi16_overflow_x4(&step2[4], &step2[5], &step2[6],
+                                             &step2[7]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Stage 6
-      {
-        const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
-        const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
-        const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
-        const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
-        const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
-        const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
-        const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
-        const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
-        const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04);
-        const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04);
-        const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20);
-        const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20);
-        const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12);
-        const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12);
-        const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28);
-        const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28);
-        // dct_const_round_shift
-        const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS);
-        const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS);
-        const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS);
-        const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS);
-        const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS);
-        const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS);
-        const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS);
-        const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS);
-        // Combine
-        out[4] = _mm_packs_epi32(out_04_6, out_04_7);
-        out[20] = _mm_packs_epi32(out_20_6, out_20_7);
-        out[12] = _mm_packs_epi32(out_12_6, out_12_7);
-        out[28] = _mm_packs_epi32(out_28_6, out_28_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&out[4], &out[20],
-                                           &out[12], &out[28]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step3[8] = ADD_EPI16(step2[ 9], step1[ 8]);
-        step3[9] = SUB_EPI16(step1[ 8], step2[ 9]);
-        step3[10] = SUB_EPI16(step1[11], step2[10]);
-        step3[11] = ADD_EPI16(step2[10], step1[11]);
-        step3[12] = ADD_EPI16(step2[13], step1[12]);
-        step3[13] = SUB_EPI16(step1[12], step2[13]);
-        step3[14] = SUB_EPI16(step1[15], step2[14]);
-        step3[15] = ADD_EPI16(step2[14], step1[15]);
+        {
+          const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]);
+          const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]);
+          const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]);
+          const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]);
+          const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16);
+          const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16);
+          const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16);
+          const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16);
+          const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08);
+          const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08);
+          const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24);
+          const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24);
+          // dct_const_round_shift
+          const __m128i out_00_4 =
+              _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_00_5 =
+              _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_16_4 =
+              _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_16_5 =
+              _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_08_4 =
+              _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_08_5 =
+              _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_24_4 =
+              _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_24_5 =
+              _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS);
+          const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS);
+          const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS);
+          const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS);
+          const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS);
+          const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS);
+          const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS);
+          const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS);
+          // Combine
+          out[0] = _mm_packs_epi32(out_00_6, out_00_7);
+          out[16] = _mm_packs_epi32(out_16_6, out_16_7);
+          out[8] = _mm_packs_epi32(out_08_6, out_08_7);
+          out[24] = _mm_packs_epi32(out_24_6, out_24_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10],
-                                           &step3[11], &step3[12], &step3[13],
-                                           &step3[14], &step3[15]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow =
+              check_epi16_overflow_x4(&out[0], &out[16], &out[8], &out[24]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]);
-        const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]);
-        const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]);
-        const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]);
-        const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]);
-        const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]);
-        const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]);
-        const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]);
-        const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28);
-        const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28);
-        const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04);
-        const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04);
-        const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12);
-        const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12);
-        const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20);
-        const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20);
-        const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12);
-        const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12);
-        const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20);
-        const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20);
-        const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28);
-        const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28);
-        const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04);
-        const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04);
-        // dct_const_round_shift
-        const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS);
-        const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS);
-        const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS);
-        const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS);
-        const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS);
-        const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS);
-        const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS);
-        const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS);
-        const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS);
-        const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS);
-        const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS);
-        const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS);
-        const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS);
-        const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS);
-        const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS);
-        const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS);
-        // Combine
-        step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7);
-        step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7);
-        step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7);
-        step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7);
-        // Combine
-        step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7);
-        step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7);
-        step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7);
-        step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21],
-                                           &step3[22], &step3[25], &step3[26],
-                                           &step3[29], &step3[30]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
         }
+        {
+          const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[9], step1[14]);
+          const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[9], step1[14]);
+          const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]);
+          const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]);
+          const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24);
+          const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24);
+          const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08);
+          const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08);
+          const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24);
+          const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24);
+          const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08);
+          const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08);
+          // dct_const_round_shift
+          const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
+          const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
+          const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
+          const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
+          const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
+          const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
+          const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
+          const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
+          const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS);
+          const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS);
+          const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS);
+          const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS);
+          const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS);
+          const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS);
+          const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS);
+          const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS);
+          // Combine
+          step2[9] = _mm_packs_epi32(s2_09_6, s2_09_7);
+          step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7);
+          step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7);
+          step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x4(&step2[9], &step2[10], &step2[13],
+                                             &step2[14]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Stage 7
-      {
-        const __m128i out_02_0 = _mm_unpacklo_epi16(step3[ 8], step3[15]);
-        const __m128i out_02_1 = _mm_unpackhi_epi16(step3[ 8], step3[15]);
-        const __m128i out_18_0 = _mm_unpacklo_epi16(step3[ 9], step3[14]);
-        const __m128i out_18_1 = _mm_unpackhi_epi16(step3[ 9], step3[14]);
-        const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]);
-        const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]);
-        const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]);
-        const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]);
-        const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02);
-        const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02);
-        const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18);
-        const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18);
-        const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10);
-        const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10);
-        const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26);
-        const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26);
-        const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06);
-        const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06);
-        const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22);
-        const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22);
-        const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14);
-        const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14);
-        const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30);
-        const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30);
-        // dct_const_round_shift
-        const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS);
-        const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS);
-        const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS);
-        const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS);
-        const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS);
-        const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS);
-        const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS);
-        const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS);
-        const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS);
-        const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS);
-        const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS);
-        const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS);
-        const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS);
-        const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS);
-        const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS);
-        const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS);
-        // Combine
-        out[ 2] = _mm_packs_epi32(out_02_6, out_02_7);
-        out[18] = _mm_packs_epi32(out_18_6, out_18_7);
-        out[10] = _mm_packs_epi32(out_10_6, out_10_7);
-        out[26] = _mm_packs_epi32(out_26_6, out_26_7);
-        out[ 6] = _mm_packs_epi32(out_06_6, out_06_7);
-        out[22] = _mm_packs_epi32(out_22_6, out_22_7);
-        out[14] = _mm_packs_epi32(out_14_6, out_14_7);
-        out[30] = _mm_packs_epi32(out_30_6, out_30_7);
+        }
+        {
+          step2[16] = ADD_EPI16(step1[19], step3[16]);
+          step2[17] = ADD_EPI16(step1[18], step3[17]);
+          step2[18] = SUB_EPI16(step3[17], step1[18]);
+          step2[19] = SUB_EPI16(step3[16], step1[19]);
+          step2[20] = SUB_EPI16(step3[23], step1[20]);
+          step2[21] = SUB_EPI16(step3[22], step1[21]);
+          step2[22] = ADD_EPI16(step1[21], step3[22]);
+          step2[23] = ADD_EPI16(step1[20], step3[23]);
+          step2[24] = ADD_EPI16(step1[27], step3[24]);
+          step2[25] = ADD_EPI16(step1[26], step3[25]);
+          step2[26] = SUB_EPI16(step3[25], step1[26]);
+          step2[27] = SUB_EPI16(step3[24], step1[27]);
+          step2[28] = SUB_EPI16(step3[31], step1[28]);
+          step2[29] = SUB_EPI16(step3[30], step1[29]);
+          step2[30] = ADD_EPI16(step1[29], step3[30]);
+          step2[31] = ADD_EPI16(step1[28], step3[31]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
-                                           &out[26], &out[6], &out[22],
-                                           &out[14], &out[30]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow = check_epi16_overflow_x16(
+              &step2[16], &step2[17], &step2[18], &step2[19], &step2[20],
+              &step2[21], &step2[22], &step2[23], &step2[24], &step2[25],
+              &step2[26], &step2[27], &step2[28], &step2[29], &step2[30],
+              &step2[31]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        // Stage 6
+        {
+          const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+          const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+          const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+          const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+          const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+          const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+          const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+          const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+          const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04);
+          const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04);
+          const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20);
+          const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20);
+          const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12);
+          const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12);
+          const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28);
+          const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28);
+          // dct_const_round_shift
+          const __m128i out_04_4 =
+              _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_04_5 =
+              _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_20_4 =
+              _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_20_5 =
+              _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_12_4 =
+              _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_12_5 =
+              _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_28_4 =
+              _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_28_5 =
+              _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS);
+          const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS);
+          const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS);
+          const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS);
+          const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS);
+          const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS);
+          const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS);
+          const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS);
+          // Combine
+          out[4] = _mm_packs_epi32(out_04_6, out_04_7);
+          out[20] = _mm_packs_epi32(out_20_6, out_20_7);
+          out[12] = _mm_packs_epi32(out_12_6, out_12_7);
+          out[28] = _mm_packs_epi32(out_28_6, out_28_7);
+#if DCT_HIGH_BIT_DEPTH
+          overflow =
+              check_epi16_overflow_x4(&out[4], &out[20], &out[12], &out[28]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step1[16] = ADD_EPI16(step3[17], step2[16]);
-        step1[17] = SUB_EPI16(step2[16], step3[17]);
-        step1[18] = SUB_EPI16(step2[19], step3[18]);
-        step1[19] = ADD_EPI16(step3[18], step2[19]);
-        step1[20] = ADD_EPI16(step3[21], step2[20]);
-        step1[21] = SUB_EPI16(step2[20], step3[21]);
-        step1[22] = SUB_EPI16(step2[23], step3[22]);
-        step1[23] = ADD_EPI16(step3[22], step2[23]);
-        step1[24] = ADD_EPI16(step3[25], step2[24]);
-        step1[25] = SUB_EPI16(step2[24], step3[25]);
-        step1[26] = SUB_EPI16(step2[27], step3[26]);
-        step1[27] = ADD_EPI16(step3[26], step2[27]);
-        step1[28] = ADD_EPI16(step3[29], step2[28]);
-        step1[29] = SUB_EPI16(step2[28], step3[29]);
-        step1[30] = SUB_EPI16(step2[31], step3[30]);
-        step1[31] = ADD_EPI16(step3[30], step2[31]);
+        }
+        {
+          step3[8] = ADD_EPI16(step2[9], step1[8]);
+          step3[9] = SUB_EPI16(step1[8], step2[9]);
+          step3[10] = SUB_EPI16(step1[11], step2[10]);
+          step3[11] = ADD_EPI16(step2[10], step1[11]);
+          step3[12] = ADD_EPI16(step2[13], step1[12]);
+          step3[13] = SUB_EPI16(step1[12], step2[13]);
+          step3[14] = SUB_EPI16(step1[15], step2[14]);
+          step3[15] = ADD_EPI16(step2[14], step1[15]);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step1[16], &step1[17], &step1[18], &step1[19],
-            &step1[20], &step1[21], &step1[22], &step1[23],
-            &step1[24], &step1[25], &step1[26], &step1[27],
-            &step1[28], &step1[29], &step1[30], &step1[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10],
+                                             &step3[11], &step3[12], &step3[13],
+                                             &step3[14], &step3[15]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        {
+          const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]);
+          const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]);
+          const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]);
+          const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]);
+          const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]);
+          const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]);
+          const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]);
+          const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]);
+          const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28);
+          const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28);
+          const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04);
+          const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04);
+          const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12);
+          const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12);
+          const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20);
+          const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20);
+          const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12);
+          const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12);
+          const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20);
+          const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20);
+          const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28);
+          const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28);
+          const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04);
+          const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04);
+          // dct_const_round_shift
+          const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS);
+          const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS);
+          const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS);
+          const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS);
+          const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS);
+          const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS);
+          const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS);
+          const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS);
+          const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS);
+          const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS);
+          const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS);
+          const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS);
+          const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS);
+          const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS);
+          const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS);
+          const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS);
+          // Combine
+          step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7);
+          step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7);
+          step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7);
+          step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7);
+          // Combine
+          step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7);
+          step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7);
+          step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7);
+          step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21],
+                                             &step3[22], &step3[25], &step3[26],
+                                             &step3[29], &step3[30]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Final stage --- outputs indices are bit-reversed.
-      {
-        const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]);
-        const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]);
-        const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]);
-        const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]);
-        const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]);
-        const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]);
-        const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]);
-        const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]);
-        const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01);
-        const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01);
-        const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17);
-        const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17);
-        const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09);
-        const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09);
-        const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25);
-        const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25);
-        const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07);
-        const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07);
-        const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23);
-        const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23);
-        const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15);
-        const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15);
-        const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31);
-        const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31);
-        // dct_const_round_shift
-        const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS);
-        const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS);
-        const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS);
-        const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS);
-        const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS);
-        const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS);
-        const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS);
-        const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS);
-        const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS);
-        const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS);
-        const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS);
-        const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS);
-        const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS);
-        const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS);
-        const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS);
-        const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS);
-        // Combine
-        out[ 1] = _mm_packs_epi32(out_01_6, out_01_7);
-        out[17] = _mm_packs_epi32(out_17_6, out_17_7);
-        out[ 9] = _mm_packs_epi32(out_09_6, out_09_7);
-        out[25] = _mm_packs_epi32(out_25_6, out_25_7);
-        out[ 7] = _mm_packs_epi32(out_07_6, out_07_7);
-        out[23] = _mm_packs_epi32(out_23_6, out_23_7);
-        out[15] = _mm_packs_epi32(out_15_6, out_15_7);
-        out[31] = _mm_packs_epi32(out_31_6, out_31_7);
+        }
+        // Stage 7
+        {
+          const __m128i out_02_0 = _mm_unpacklo_epi16(step3[8], step3[15]);
+          const __m128i out_02_1 = _mm_unpackhi_epi16(step3[8], step3[15]);
+          const __m128i out_18_0 = _mm_unpacklo_epi16(step3[9], step3[14]);
+          const __m128i out_18_1 = _mm_unpackhi_epi16(step3[9], step3[14]);
+          const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]);
+          const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]);
+          const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]);
+          const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]);
+          const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02);
+          const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02);
+          const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18);
+          const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18);
+          const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10);
+          const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10);
+          const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26);
+          const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26);
+          const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06);
+          const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06);
+          const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22);
+          const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22);
+          const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14);
+          const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14);
+          const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30);
+          const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30);
+          // dct_const_round_shift
+          const __m128i out_02_4 =
+              _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_02_5 =
+              _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_18_4 =
+              _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_18_5 =
+              _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_10_4 =
+              _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_10_5 =
+              _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_26_4 =
+              _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_26_5 =
+              _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_06_4 =
+              _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_06_5 =
+              _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_22_4 =
+              _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_22_5 =
+              _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_14_4 =
+              _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_14_5 =
+              _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_30_4 =
+              _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_30_5 =
+              _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS);
+          const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS);
+          const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS);
+          const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS);
+          const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS);
+          const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS);
+          const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS);
+          const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS);
+          const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS);
+          const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS);
+          const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS);
+          const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS);
+          const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS);
+          const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS);
+          const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS);
+          const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS);
+          // Combine
+          out[2] = _mm_packs_epi32(out_02_6, out_02_7);
+          out[18] = _mm_packs_epi32(out_18_6, out_18_7);
+          out[10] = _mm_packs_epi32(out_10_6, out_10_7);
+          out[26] = _mm_packs_epi32(out_26_6, out_26_7);
+          out[6] = _mm_packs_epi32(out_06_6, out_06_7);
+          out[22] = _mm_packs_epi32(out_22_6, out_22_7);
+          out[14] = _mm_packs_epi32(out_14_6, out_14_7);
+          out[30] = _mm_packs_epi32(out_30_6, out_30_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
-                                           &out[25], &out[7], &out[23],
-                                           &out[15], &out[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow =
+              check_epi16_overflow_x8(&out[2], &out[18], &out[10], &out[26],
+                                      &out[6], &out[22], &out[14], &out[30]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        {
+          step1[16] = ADD_EPI16(step3[17], step2[16]);
+          step1[17] = SUB_EPI16(step2[16], step3[17]);
+          step1[18] = SUB_EPI16(step2[19], step3[18]);
+          step1[19] = ADD_EPI16(step3[18], step2[19]);
+          step1[20] = ADD_EPI16(step3[21], step2[20]);
+          step1[21] = SUB_EPI16(step2[20], step3[21]);
+          step1[22] = SUB_EPI16(step2[23], step3[22]);
+          step1[23] = ADD_EPI16(step3[22], step2[23]);
+          step1[24] = ADD_EPI16(step3[25], step2[24]);
+          step1[25] = SUB_EPI16(step2[24], step3[25]);
+          step1[26] = SUB_EPI16(step2[27], step3[26]);
+          step1[27] = ADD_EPI16(step3[26], step2[27]);
+          step1[28] = ADD_EPI16(step3[29], step2[28]);
+          step1[29] = SUB_EPI16(step2[28], step3[29]);
+          step1[30] = SUB_EPI16(step2[31], step3[30]);
+          step1[31] = ADD_EPI16(step3[30], step2[31]);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x16(
+              &step1[16], &step1[17], &step1[18], &step1[19], &step1[20],
+              &step1[21], &step1[22], &step1[23], &step1[24], &step1[25],
+              &step1[26], &step1[27], &step1[28], &step1[29], &step1[30],
+              &step1[31]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]);
-        const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]);
-        const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]);
-        const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]);
-        const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]);
-        const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]);
-        const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]);
-        const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]);
-        const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05);
-        const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05);
-        const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21);
-        const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21);
-        const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13);
-        const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13);
-        const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29);
-        const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29);
-        const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03);
-        const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03);
-        const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19);
-        const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19);
-        const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11);
-        const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11);
-        const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27);
-        const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27);
-        // dct_const_round_shift
-        const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS);
-        const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS);
-        const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS);
-        const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS);
-        const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS);
-        const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS);
-        const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS);
-        const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS);
-        const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS);
-        const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS);
-        const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS);
-        const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS);
-        const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS);
-        const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS);
-        const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS);
-        const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS);
-        // Combine
-        out[ 5] = _mm_packs_epi32(out_05_6, out_05_7);
-        out[21] = _mm_packs_epi32(out_21_6, out_21_7);
-        out[13] = _mm_packs_epi32(out_13_6, out_13_7);
-        out[29] = _mm_packs_epi32(out_29_6, out_29_7);
-        out[ 3] = _mm_packs_epi32(out_03_6, out_03_7);
-        out[19] = _mm_packs_epi32(out_19_6, out_19_7);
-        out[11] = _mm_packs_epi32(out_11_6, out_11_7);
-        out[27] = _mm_packs_epi32(out_27_6, out_27_7);
+        }
+        // Final stage --- outputs indices are bit-reversed.
+        {
+          const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]);
+          const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]);
+          const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]);
+          const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]);
+          const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]);
+          const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]);
+          const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]);
+          const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]);
+          const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01);
+          const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01);
+          const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17);
+          const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17);
+          const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09);
+          const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09);
+          const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25);
+          const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25);
+          const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07);
+          const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07);
+          const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23);
+          const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23);
+          const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15);
+          const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15);
+          const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31);
+          const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31);
+          // dct_const_round_shift
+          const __m128i out_01_4 =
+              _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_01_5 =
+              _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_17_4 =
+              _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_17_5 =
+              _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_09_4 =
+              _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_09_5 =
+              _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_25_4 =
+              _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_25_5 =
+              _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_07_4 =
+              _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_07_5 =
+              _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_23_4 =
+              _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_23_5 =
+              _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_15_4 =
+              _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_15_5 =
+              _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_31_4 =
+              _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_31_5 =
+              _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS);
+          const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS);
+          const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS);
+          const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS);
+          const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS);
+          const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS);
+          const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS);
+          const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS);
+          const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS);
+          const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS);
+          const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS);
+          const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS);
+          const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS);
+          const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS);
+          const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS);
+          const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS);
+          // Combine
+          out[1] = _mm_packs_epi32(out_01_6, out_01_7);
+          out[17] = _mm_packs_epi32(out_17_6, out_17_7);
+          out[9] = _mm_packs_epi32(out_09_6, out_09_7);
+          out[25] = _mm_packs_epi32(out_25_6, out_25_7);
+          out[7] = _mm_packs_epi32(out_07_6, out_07_7);
+          out[23] = _mm_packs_epi32(out_23_6, out_23_7);
+          out[15] = _mm_packs_epi32(out_15_6, out_15_7);
+          out[31] = _mm_packs_epi32(out_31_6, out_31_7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
-                                           &out[29], &out[3], &out[19],
-                                           &out[11], &out[27]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
+          overflow =
+              check_epi16_overflow_x8(&out[1], &out[17], &out[9], &out[25],
+                                      &out[7], &out[23], &out[15], &out[31]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
         }
+        {
+          const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]);
+          const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]);
+          const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]);
+          const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]);
+          const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]);
+          const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]);
+          const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]);
+          const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]);
+          const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05);
+          const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05);
+          const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21);
+          const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21);
+          const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13);
+          const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13);
+          const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29);
+          const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29);
+          const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03);
+          const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03);
+          const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19);
+          const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19);
+          const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11);
+          const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11);
+          const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27);
+          const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27);
+          // dct_const_round_shift
+          const __m128i out_05_4 =
+              _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_05_5 =
+              _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_21_4 =
+              _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_21_5 =
+              _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_13_4 =
+              _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_13_5 =
+              _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_29_4 =
+              _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_29_5 =
+              _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_03_4 =
+              _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_03_5 =
+              _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_19_4 =
+              _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_19_5 =
+              _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_11_4 =
+              _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_11_5 =
+              _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_27_4 =
+              _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
+          const __m128i out_27_5 =
+              _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
+          const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS);
+          const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS);
+          const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS);
+          const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS);
+          const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS);
+          const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS);
+          const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS);
+          const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS);
+          const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS);
+          const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS);
+          const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS);
+          const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS);
+          const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS);
+          const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS);
+          const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS);
+          const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS);
+          // Combine
+          out[5] = _mm_packs_epi32(out_05_6, out_05_7);
+          out[21] = _mm_packs_epi32(out_21_6, out_21_7);
+          out[13] = _mm_packs_epi32(out_13_6, out_13_7);
+          out[29] = _mm_packs_epi32(out_29_6, out_29_7);
+          out[3] = _mm_packs_epi32(out_03_6, out_03_7);
+          out[19] = _mm_packs_epi32(out_19_6, out_19_7);
+          out[11] = _mm_packs_epi32(out_11_6, out_11_7);
+          out[27] = _mm_packs_epi32(out_27_6, out_27_7);
+#if DCT_HIGH_BIT_DEPTH
+          overflow =
+              check_epi16_overflow_x8(&out[5], &out[21], &out[13], &out[29],
+                                      &out[3], &out[19], &out[11], &out[27]);
+          if (overflow) {
+            if (pass == 0)
+              HIGH_FDCT32x32_2D_C(input, output_org, stride);
+            else
+              HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-      }
+        }
 #if FDCT32x32_HIGH_PRECISION
       } else {
         __m128i lstep1[64], lstep2[64], lstep3[64];
@@ -1457,32 +1512,32 @@ void FDCT32x32_2D(const int16_t *input,
         // stage 3
         {
           // expanding to 32-bit length priori to addition operations
-          lstep2[ 0] = _mm_unpacklo_epi16(step2[ 0], kZero);
-          lstep2[ 1] = _mm_unpackhi_epi16(step2[ 0], kZero);
-          lstep2[ 2] = _mm_unpacklo_epi16(step2[ 1], kZero);
-          lstep2[ 3] = _mm_unpackhi_epi16(step2[ 1], kZero);
-          lstep2[ 4] = _mm_unpacklo_epi16(step2[ 2], kZero);
-          lstep2[ 5] = _mm_unpackhi_epi16(step2[ 2], kZero);
-          lstep2[ 6] = _mm_unpacklo_epi16(step2[ 3], kZero);
-          lstep2[ 7] = _mm_unpackhi_epi16(step2[ 3], kZero);
-          lstep2[ 8] = _mm_unpacklo_epi16(step2[ 4], kZero);
-          lstep2[ 9] = _mm_unpackhi_epi16(step2[ 4], kZero);
-          lstep2[10] = _mm_unpacklo_epi16(step2[ 5], kZero);
-          lstep2[11] = _mm_unpackhi_epi16(step2[ 5], kZero);
-          lstep2[12] = _mm_unpacklo_epi16(step2[ 6], kZero);
-          lstep2[13] = _mm_unpackhi_epi16(step2[ 6], kZero);
-          lstep2[14] = _mm_unpacklo_epi16(step2[ 7], kZero);
-          lstep2[15] = _mm_unpackhi_epi16(step2[ 7], kZero);
-          lstep2[ 0] = _mm_madd_epi16(lstep2[ 0], kOne);
-          lstep2[ 1] = _mm_madd_epi16(lstep2[ 1], kOne);
-          lstep2[ 2] = _mm_madd_epi16(lstep2[ 2], kOne);
-          lstep2[ 3] = _mm_madd_epi16(lstep2[ 3], kOne);
-          lstep2[ 4] = _mm_madd_epi16(lstep2[ 4], kOne);
-          lstep2[ 5] = _mm_madd_epi16(lstep2[ 5], kOne);
-          lstep2[ 6] = _mm_madd_epi16(lstep2[ 6], kOne);
-          lstep2[ 7] = _mm_madd_epi16(lstep2[ 7], kOne);
-          lstep2[ 8] = _mm_madd_epi16(lstep2[ 8], kOne);
-          lstep2[ 9] = _mm_madd_epi16(lstep2[ 9], kOne);
+          lstep2[0] = _mm_unpacklo_epi16(step2[0], kZero);
+          lstep2[1] = _mm_unpackhi_epi16(step2[0], kZero);
+          lstep2[2] = _mm_unpacklo_epi16(step2[1], kZero);
+          lstep2[3] = _mm_unpackhi_epi16(step2[1], kZero);
+          lstep2[4] = _mm_unpacklo_epi16(step2[2], kZero);
+          lstep2[5] = _mm_unpackhi_epi16(step2[2], kZero);
+          lstep2[6] = _mm_unpacklo_epi16(step2[3], kZero);
+          lstep2[7] = _mm_unpackhi_epi16(step2[3], kZero);
+          lstep2[8] = _mm_unpacklo_epi16(step2[4], kZero);
+          lstep2[9] = _mm_unpackhi_epi16(step2[4], kZero);
+          lstep2[10] = _mm_unpacklo_epi16(step2[5], kZero);
+          lstep2[11] = _mm_unpackhi_epi16(step2[5], kZero);
+          lstep2[12] = _mm_unpacklo_epi16(step2[6], kZero);
+          lstep2[13] = _mm_unpackhi_epi16(step2[6], kZero);
+          lstep2[14] = _mm_unpacklo_epi16(step2[7], kZero);
+          lstep2[15] = _mm_unpackhi_epi16(step2[7], kZero);
+          lstep2[0] = _mm_madd_epi16(lstep2[0], kOne);
+          lstep2[1] = _mm_madd_epi16(lstep2[1], kOne);
+          lstep2[2] = _mm_madd_epi16(lstep2[2], kOne);
+          lstep2[3] = _mm_madd_epi16(lstep2[3], kOne);
+          lstep2[4] = _mm_madd_epi16(lstep2[4], kOne);
+          lstep2[5] = _mm_madd_epi16(lstep2[5], kOne);
+          lstep2[6] = _mm_madd_epi16(lstep2[6], kOne);
+          lstep2[7] = _mm_madd_epi16(lstep2[7], kOne);
+          lstep2[8] = _mm_madd_epi16(lstep2[8], kOne);
+          lstep2[9] = _mm_madd_epi16(lstep2[9], kOne);
           lstep2[10] = _mm_madd_epi16(lstep2[10], kOne);
           lstep2[11] = _mm_madd_epi16(lstep2[11], kOne);
           lstep2[12] = _mm_madd_epi16(lstep2[12], kOne);
@@ -1490,22 +1545,22 @@ void FDCT32x32_2D(const int16_t *input,
           lstep2[14] = _mm_madd_epi16(lstep2[14], kOne);
           lstep2[15] = _mm_madd_epi16(lstep2[15], kOne);
 
-          lstep3[ 0] = _mm_add_epi32(lstep2[14], lstep2[ 0]);
-          lstep3[ 1] = _mm_add_epi32(lstep2[15], lstep2[ 1]);
-          lstep3[ 2] = _mm_add_epi32(lstep2[12], lstep2[ 2]);
-          lstep3[ 3] = _mm_add_epi32(lstep2[13], lstep2[ 3]);
-          lstep3[ 4] = _mm_add_epi32(lstep2[10], lstep2[ 4]);
-          lstep3[ 5] = _mm_add_epi32(lstep2[11], lstep2[ 5]);
-          lstep3[ 6] = _mm_add_epi32(lstep2[ 8], lstep2[ 6]);
-          lstep3[ 7] = _mm_add_epi32(lstep2[ 9], lstep2[ 7]);
-          lstep3[ 8] = _mm_sub_epi32(lstep2[ 6], lstep2[ 8]);
-          lstep3[ 9] = _mm_sub_epi32(lstep2[ 7], lstep2[ 9]);
-          lstep3[10] = _mm_sub_epi32(lstep2[ 4], lstep2[10]);
-          lstep3[11] = _mm_sub_epi32(lstep2[ 5], lstep2[11]);
-          lstep3[12] = _mm_sub_epi32(lstep2[ 2], lstep2[12]);
-          lstep3[13] = _mm_sub_epi32(lstep2[ 3], lstep2[13]);
-          lstep3[14] = _mm_sub_epi32(lstep2[ 0], lstep2[14]);
-          lstep3[15] = _mm_sub_epi32(lstep2[ 1], lstep2[15]);
+          lstep3[0] = _mm_add_epi32(lstep2[14], lstep2[0]);
+          lstep3[1] = _mm_add_epi32(lstep2[15], lstep2[1]);
+          lstep3[2] = _mm_add_epi32(lstep2[12], lstep2[2]);
+          lstep3[3] = _mm_add_epi32(lstep2[13], lstep2[3]);
+          lstep3[4] = _mm_add_epi32(lstep2[10], lstep2[4]);
+          lstep3[5] = _mm_add_epi32(lstep2[11], lstep2[5]);
+          lstep3[6] = _mm_add_epi32(lstep2[8], lstep2[6]);
+          lstep3[7] = _mm_add_epi32(lstep2[9], lstep2[7]);
+          lstep3[8] = _mm_sub_epi32(lstep2[6], lstep2[8]);
+          lstep3[9] = _mm_sub_epi32(lstep2[7], lstep2[9]);
+          lstep3[10] = _mm_sub_epi32(lstep2[4], lstep2[10]);
+          lstep3[11] = _mm_sub_epi32(lstep2[5], lstep2[11]);
+          lstep3[12] = _mm_sub_epi32(lstep2[2], lstep2[12]);
+          lstep3[13] = _mm_sub_epi32(lstep2[3], lstep2[13]);
+          lstep3[14] = _mm_sub_epi32(lstep2[0], lstep2[14]);
+          lstep3[15] = _mm_sub_epi32(lstep2[1], lstep2[15]);
         }
         {
           const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
@@ -1643,10 +1698,10 @@ void FDCT32x32_2D(const int16_t *input,
         // stage 4
         {
           // expanding to 32-bit length priori to addition operations
-          lstep2[16] = _mm_unpacklo_epi16(step2[ 8], kZero);
-          lstep2[17] = _mm_unpackhi_epi16(step2[ 8], kZero);
-          lstep2[18] = _mm_unpacklo_epi16(step2[ 9], kZero);
-          lstep2[19] = _mm_unpackhi_epi16(step2[ 9], kZero);
+          lstep2[16] = _mm_unpacklo_epi16(step2[8], kZero);
+          lstep2[17] = _mm_unpackhi_epi16(step2[8], kZero);
+          lstep2[18] = _mm_unpacklo_epi16(step2[9], kZero);
+          lstep2[19] = _mm_unpackhi_epi16(step2[9], kZero);
           lstep2[28] = _mm_unpacklo_epi16(step2[14], kZero);
           lstep2[29] = _mm_unpackhi_epi16(step2[14], kZero);
           lstep2[30] = _mm_unpacklo_epi16(step2[15], kZero);
@@ -1660,14 +1715,14 @@ void FDCT32x32_2D(const int16_t *input,
           lstep2[30] = _mm_madd_epi16(lstep2[30], kOne);
           lstep2[31] = _mm_madd_epi16(lstep2[31], kOne);
 
-          lstep1[ 0] = _mm_add_epi32(lstep3[ 6], lstep3[ 0]);
-          lstep1[ 1] = _mm_add_epi32(lstep3[ 7], lstep3[ 1]);
-          lstep1[ 2] = _mm_add_epi32(lstep3[ 4], lstep3[ 2]);
-          lstep1[ 3] = _mm_add_epi32(lstep3[ 5], lstep3[ 3]);
-          lstep1[ 4] = _mm_sub_epi32(lstep3[ 2], lstep3[ 4]);
-          lstep1[ 5] = _mm_sub_epi32(lstep3[ 3], lstep3[ 5]);
-          lstep1[ 6] = _mm_sub_epi32(lstep3[ 0], lstep3[ 6]);
-          lstep1[ 7] = _mm_sub_epi32(lstep3[ 1], lstep3[ 7]);
+          lstep1[0] = _mm_add_epi32(lstep3[6], lstep3[0]);
+          lstep1[1] = _mm_add_epi32(lstep3[7], lstep3[1]);
+          lstep1[2] = _mm_add_epi32(lstep3[4], lstep3[2]);
+          lstep1[3] = _mm_add_epi32(lstep3[5], lstep3[3]);
+          lstep1[4] = _mm_sub_epi32(lstep3[2], lstep3[4]);
+          lstep1[5] = _mm_sub_epi32(lstep3[3], lstep3[5]);
+          lstep1[6] = _mm_sub_epi32(lstep3[0], lstep3[6]);
+          lstep1[7] = _mm_sub_epi32(lstep3[1], lstep3[7]);
           lstep1[16] = _mm_add_epi32(lstep3[22], lstep2[16]);
           lstep1[17] = _mm_add_epi32(lstep3[23], lstep2[17]);
           lstep1[18] = _mm_add_epi32(lstep3[20], lstep2[18]);
@@ -1686,64 +1741,64 @@ void FDCT32x32_2D(const int16_t *input,
           lstep1[31] = _mm_add_epi32(lstep3[25], lstep2[31]);
         }
         {
-        // to be continued...
-        //
-        const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
-        const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
-
-        u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]);
-        u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]);
-        u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]);
-        u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]);
-
-        // TODO(jingning): manually inline k_madd_epi32_ to further hide
-        // instruction latency.
-        v[0] = k_madd_epi32(u[0], k32_p16_m16);
-        v[1] = k_madd_epi32(u[1], k32_p16_m16);
-        v[2] = k_madd_epi32(u[2], k32_p16_m16);
-        v[3] = k_madd_epi32(u[3], k32_p16_m16);
-        v[4] = k_madd_epi32(u[0], k32_p16_p16);
-        v[5] = k_madd_epi32(u[1], k32_p16_p16);
-        v[6] = k_madd_epi32(u[2], k32_p16_p16);
-        v[7] = k_madd_epi32(u[3], k32_p16_p16);
+          // to be continued...
+          //
+          const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
+          const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
+
+          u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]);
+          u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]);
+          u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]);
+          u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]);
+
+          // TODO(jingning): manually inline k_madd_epi32_ to further hide
+          // instruction latency.
+          v[0] = k_madd_epi32(u[0], k32_p16_m16);
+          v[1] = k_madd_epi32(u[1], k32_p16_m16);
+          v[2] = k_madd_epi32(u[2], k32_p16_m16);
+          v[3] = k_madd_epi32(u[3], k32_p16_m16);
+          v[4] = k_madd_epi32(u[0], k32_p16_p16);
+          v[5] = k_madd_epi32(u[1], k32_p16_p16);
+          v[6] = k_madd_epi32(u[2], k32_p16_p16);
+          v[7] = k_madd_epi32(u[3], k32_p16_p16);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3],
-                                            &v[4], &v[5], &v[6], &v[7], &kZero);
-        if (overflow) {
-          HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
+          overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3], &v[4],
+                                              &v[5], &v[6], &v[7], &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
 #endif  // DCT_HIGH_BIT_DEPTH
-        u[0] = k_packs_epi64(v[0], v[1]);
-        u[1] = k_packs_epi64(v[2], v[3]);
-        u[2] = k_packs_epi64(v[4], v[5]);
-        u[3] = k_packs_epi64(v[6], v[7]);
-
-        v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-        v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-        v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-        v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-
-        lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
-        lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
-        lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
-        lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+
+          lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
         }
         {
           const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
           const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64);
           const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]);
-          u[ 1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]);
-          u[ 2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]);
-          u[ 3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]);
-          u[ 4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]);
-          u[ 5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]);
-          u[ 6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]);
-          u[ 7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]);
-          u[ 8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]);
-          u[ 9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]);
+          u[0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]);
+          u[1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]);
+          u[2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]);
+          u[3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]);
+          u[4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]);
+          u[5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]);
+          u[6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]);
+          u[7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]);
+          u[8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]);
+          u[9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]);
           u[10] = _mm_unpacklo_epi32(lstep3[41], lstep3[55]);
           u[11] = _mm_unpackhi_epi32(lstep3[41], lstep3[55]);
           u[12] = _mm_unpacklo_epi32(lstep3[42], lstep3[52]);
@@ -1751,16 +1806,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep3[43], lstep3[53]);
           u[15] = _mm_unpackhi_epi32(lstep3[43], lstep3[53]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_m08_p24);
-          v[ 1] = k_madd_epi32(u[ 1], k32_m08_p24);
-          v[ 2] = k_madd_epi32(u[ 2], k32_m08_p24);
-          v[ 3] = k_madd_epi32(u[ 3], k32_m08_p24);
-          v[ 4] = k_madd_epi32(u[ 4], k32_m08_p24);
-          v[ 5] = k_madd_epi32(u[ 5], k32_m08_p24);
-          v[ 6] = k_madd_epi32(u[ 6], k32_m08_p24);
-          v[ 7] = k_madd_epi32(u[ 7], k32_m08_p24);
-          v[ 8] = k_madd_epi32(u[ 8], k32_m24_m08);
-          v[ 9] = k_madd_epi32(u[ 9], k32_m24_m08);
+          v[0] = k_madd_epi32(u[0], k32_m08_p24);
+          v[1] = k_madd_epi32(u[1], k32_m08_p24);
+          v[2] = k_madd_epi32(u[2], k32_m08_p24);
+          v[3] = k_madd_epi32(u[3], k32_m08_p24);
+          v[4] = k_madd_epi32(u[4], k32_m08_p24);
+          v[5] = k_madd_epi32(u[5], k32_m08_p24);
+          v[6] = k_madd_epi32(u[6], k32_m08_p24);
+          v[7] = k_madd_epi32(u[7], k32_m08_p24);
+          v[8] = k_madd_epi32(u[8], k32_m24_m08);
+          v[9] = k_madd_epi32(u[9], k32_m24_m08);
           v[10] = k_madd_epi32(u[10], k32_m24_m08);
           v[11] = k_madd_epi32(u[11], k32_m24_m08);
           v[12] = k_madd_epi32(u[12], k32_m24_m08);
@@ -1771,41 +1826,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m08_p24);
           v[18] = k_madd_epi32(u[14], k32_m08_p24);
           v[19] = k_madd_epi32(u[15], k32_m08_p24);
-          v[20] = k_madd_epi32(u[ 8], k32_m08_p24);
-          v[21] = k_madd_epi32(u[ 9], k32_m08_p24);
+          v[20] = k_madd_epi32(u[8], k32_m08_p24);
+          v[21] = k_madd_epi32(u[9], k32_m08_p24);
           v[22] = k_madd_epi32(u[10], k32_m08_p24);
           v[23] = k_madd_epi32(u[11], k32_m08_p24);
-          v[24] = k_madd_epi32(u[ 4], k32_p24_p08);
-          v[25] = k_madd_epi32(u[ 5], k32_p24_p08);
-          v[26] = k_madd_epi32(u[ 6], k32_p24_p08);
-          v[27] = k_madd_epi32(u[ 7], k32_p24_p08);
-          v[28] = k_madd_epi32(u[ 0], k32_p24_p08);
-          v[29] = k_madd_epi32(u[ 1], k32_p24_p08);
-          v[30] = k_madd_epi32(u[ 2], k32_p24_p08);
-          v[31] = k_madd_epi32(u[ 3], k32_p24_p08);
+          v[24] = k_madd_epi32(u[4], k32_p24_p08);
+          v[25] = k_madd_epi32(u[5], k32_p24_p08);
+          v[26] = k_madd_epi32(u[6], k32_p24_p08);
+          v[27] = k_madd_epi32(u[7], k32_p24_p08);
+          v[28] = k_madd_epi32(u[0], k32_p24_p08);
+          v[29] = k_madd_epi32(u[1], k32_p24_p08);
+          v[30] = k_madd_epi32(u[2], k32_p24_p08);
+          v[31] = k_madd_epi32(u[3], k32_p24_p08);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -1813,16 +1867,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -1830,16 +1884,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          lstep1[36] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep1[37] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep1[38] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep1[39] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep1[40] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep1[41] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep1[42] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep1[43] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep1[52] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep1[53] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep1[36] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          lstep1[37] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          lstep1[38] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          lstep1[39] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          lstep1[40] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          lstep1[41] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          lstep1[42] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          lstep1[43] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          lstep1[52] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          lstep1[53] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           lstep1[54] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           lstep1[55] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           lstep1[56] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -1849,10 +1903,10 @@ void FDCT32x32_2D(const int16_t *input,
         }
         // stage 5
         {
-          lstep2[ 8] = _mm_add_epi32(lstep1[10], lstep3[ 8]);
-          lstep2[ 9] = _mm_add_epi32(lstep1[11], lstep3[ 9]);
-          lstep2[10] = _mm_sub_epi32(lstep3[ 8], lstep1[10]);
-          lstep2[11] = _mm_sub_epi32(lstep3[ 9], lstep1[11]);
+          lstep2[8] = _mm_add_epi32(lstep1[10], lstep3[8]);
+          lstep2[9] = _mm_add_epi32(lstep1[11], lstep3[9]);
+          lstep2[10] = _mm_sub_epi32(lstep3[8], lstep1[10]);
+          lstep2[11] = _mm_sub_epi32(lstep3[9], lstep1[11]);
           lstep2[12] = _mm_sub_epi32(lstep3[14], lstep1[12]);
           lstep2[13] = _mm_sub_epi32(lstep3[15], lstep1[13]);
           lstep2[14] = _mm_add_epi32(lstep1[12], lstep3[14]);
@@ -1875,16 +1929,16 @@ void FDCT32x32_2D(const int16_t *input,
 
           // TODO(jingning): manually inline k_madd_epi32_ to further hide
           // instruction latency.
-          v[ 0] = k_madd_epi32(u[0], k32_p16_p16);
-          v[ 1] = k_madd_epi32(u[1], k32_p16_p16);
-          v[ 2] = k_madd_epi32(u[2], k32_p16_p16);
-          v[ 3] = k_madd_epi32(u[3], k32_p16_p16);
-          v[ 4] = k_madd_epi32(u[0], k32_p16_m16);
-          v[ 5] = k_madd_epi32(u[1], k32_p16_m16);
-          v[ 6] = k_madd_epi32(u[2], k32_p16_m16);
-          v[ 7] = k_madd_epi32(u[3], k32_p16_m16);
-          v[ 8] = k_madd_epi32(u[4], k32_p24_p08);
-          v[ 9] = k_madd_epi32(u[5], k32_p24_p08);
+          v[0] = k_madd_epi32(u[0], k32_p16_p16);
+          v[1] = k_madd_epi32(u[1], k32_p16_p16);
+          v[2] = k_madd_epi32(u[2], k32_p16_p16);
+          v[3] = k_madd_epi32(u[3], k32_p16_p16);
+          v[4] = k_madd_epi32(u[0], k32_p16_m16);
+          v[5] = k_madd_epi32(u[1], k32_p16_m16);
+          v[6] = k_madd_epi32(u[2], k32_p16_m16);
+          v[7] = k_madd_epi32(u[3], k32_p16_m16);
+          v[8] = k_madd_epi32(u[4], k32_p24_p08);
+          v[9] = k_madd_epi32(u[5], k32_p24_p08);
           v[10] = k_madd_epi32(u[6], k32_p24_p08);
           v[11] = k_madd_epi32(u[7], k32_p24_p08);
           v[12] = k_madd_epi32(u[4], k32_m08_p24);
@@ -1894,9 +1948,8 @@ void FDCT32x32_2D(const int16_t *input,
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_16(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -1966,13 +2019,13 @@ void FDCT32x32_2D(const int16_t *input,
           u[7] = _mm_srai_epi32(u[7], 2);
 
           // Combine
-          out[ 0] = _mm_packs_epi32(u[0], u[1]);
+          out[0] = _mm_packs_epi32(u[0], u[1]);
           out[16] = _mm_packs_epi32(u[2], u[3]);
-          out[ 8] = _mm_packs_epi32(u[4], u[5]);
+          out[8] = _mm_packs_epi32(u[4], u[5]);
           out[24] = _mm_packs_epi32(u[6], u[7]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&out[0], &out[16],
-                                             &out[8], &out[24]);
+          overflow =
+              check_epi16_overflow_x4(&out[0], &out[16], &out[8], &out[24]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2001,8 +2054,8 @@ void FDCT32x32_2D(const int16_t *input,
           v[5] = k_madd_epi32(u[5], k32_m24_m08);
           v[6] = k_madd_epi32(u[6], k32_m24_m08);
           v[7] = k_madd_epi32(u[7], k32_m24_m08);
-          v[ 8] = k_madd_epi32(u[4], k32_m08_p24);
-          v[ 9] = k_madd_epi32(u[5], k32_m08_p24);
+          v[8] = k_madd_epi32(u[4], k32_m08_p24);
+          v[9] = k_madd_epi32(u[5], k32_m08_p24);
           v[10] = k_madd_epi32(u[6], k32_m08_p24);
           v[11] = k_madd_epi32(u[7], k32_m08_p24);
           v[12] = k_madd_epi32(u[0], k32_p24_p08);
@@ -2012,9 +2065,8 @@ void FDCT32x32_2D(const int16_t *input,
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_16(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2088,10 +2140,10 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
           const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
 
-          u[0] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[1] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[2] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[3] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+          u[0] = _mm_unpacklo_epi32(lstep2[8], lstep2[14]);
+          u[1] = _mm_unpackhi_epi32(lstep2[8], lstep2[14]);
+          u[2] = _mm_unpacklo_epi32(lstep2[9], lstep2[15]);
+          u[3] = _mm_unpackhi_epi32(lstep2[9], lstep2[15]);
           u[4] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]);
           u[5] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
           u[6] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
@@ -2100,10 +2152,10 @@ void FDCT32x32_2D(const int16_t *input,
           u[9] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
           u[10] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
           u[11] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]);
-          u[12] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[13] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[14] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[15] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+          u[12] = _mm_unpacklo_epi32(lstep2[8], lstep2[14]);
+          u[13] = _mm_unpackhi_epi32(lstep2[8], lstep2[14]);
+          u[14] = _mm_unpacklo_epi32(lstep2[9], lstep2[15]);
+          u[15] = _mm_unpackhi_epi32(lstep2[9], lstep2[15]);
 
           v[0] = k_madd_epi32(u[0], k32_p28_p04);
           v[1] = k_madd_epi32(u[1], k32_p28_p04);
@@ -2113,8 +2165,8 @@ void FDCT32x32_2D(const int16_t *input,
           v[5] = k_madd_epi32(u[5], k32_p12_p20);
           v[6] = k_madd_epi32(u[6], k32_p12_p20);
           v[7] = k_madd_epi32(u[7], k32_p12_p20);
-          v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
+          v[8] = k_madd_epi32(u[8], k32_m20_p12);
+          v[9] = k_madd_epi32(u[9], k32_m20_p12);
           v[10] = k_madd_epi32(u[10], k32_m20_p12);
           v[11] = k_madd_epi32(u[11], k32_m20_p12);
           v[12] = k_madd_epi32(u[12], k32_m04_p28);
@@ -2124,9 +2176,8 @@ void FDCT32x32_2D(const int16_t *input,
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_16(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2195,13 +2246,13 @@ void FDCT32x32_2D(const int16_t *input,
           u[6] = _mm_srai_epi32(u[6], 2);
           u[7] = _mm_srai_epi32(u[7], 2);
 
-          out[ 4] = _mm_packs_epi32(u[0], u[1]);
+          out[4] = _mm_packs_epi32(u[0], u[1]);
           out[20] = _mm_packs_epi32(u[2], u[3]);
           out[12] = _mm_packs_epi32(u[4], u[5]);
           out[28] = _mm_packs_epi32(u[6], u[7]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&out[4], &out[20],
-                                             &out[12], &out[28]);
+          overflow =
+              check_epi16_overflow_x4(&out[4], &out[20], &out[12], &out[28]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2230,21 +2281,21 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
           const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64);
           const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
-          const __m128i k32_m12_m20 = pair_set_epi32(-cospi_12_64,
-                                                     -cospi_20_64);
+          const __m128i k32_m12_m20 =
+              pair_set_epi32(-cospi_12_64, -cospi_20_64);
           const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64);
           const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]);
-          u[ 1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]);
-          u[ 2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]);
-          u[ 3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]);
-          u[ 4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]);
-          u[ 5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]);
-          u[ 6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]);
-          u[ 7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]);
-          u[ 8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]);
-          u[ 9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]);
+          u[0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]);
+          u[1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]);
+          u[2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]);
+          u[3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]);
+          u[4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]);
+          u[5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]);
+          u[6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]);
+          u[7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]);
+          u[8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]);
+          u[9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]);
           u[10] = _mm_unpacklo_epi32(lstep2[43], lstep2[53]);
           u[11] = _mm_unpackhi_epi32(lstep2[43], lstep2[53]);
           u[12] = _mm_unpacklo_epi32(lstep2[44], lstep2[50]);
@@ -2252,16 +2303,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep2[45], lstep2[51]);
           u[15] = _mm_unpackhi_epi32(lstep2[45], lstep2[51]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_m04_p28);
-          v[ 1] = k_madd_epi32(u[ 1], k32_m04_p28);
-          v[ 2] = k_madd_epi32(u[ 2], k32_m04_p28);
-          v[ 3] = k_madd_epi32(u[ 3], k32_m04_p28);
-          v[ 4] = k_madd_epi32(u[ 4], k32_m28_m04);
-          v[ 5] = k_madd_epi32(u[ 5], k32_m28_m04);
-          v[ 6] = k_madd_epi32(u[ 6], k32_m28_m04);
-          v[ 7] = k_madd_epi32(u[ 7], k32_m28_m04);
-          v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
+          v[0] = k_madd_epi32(u[0], k32_m04_p28);
+          v[1] = k_madd_epi32(u[1], k32_m04_p28);
+          v[2] = k_madd_epi32(u[2], k32_m04_p28);
+          v[3] = k_madd_epi32(u[3], k32_m04_p28);
+          v[4] = k_madd_epi32(u[4], k32_m28_m04);
+          v[5] = k_madd_epi32(u[5], k32_m28_m04);
+          v[6] = k_madd_epi32(u[6], k32_m28_m04);
+          v[7] = k_madd_epi32(u[7], k32_m28_m04);
+          v[8] = k_madd_epi32(u[8], k32_m20_p12);
+          v[9] = k_madd_epi32(u[9], k32_m20_p12);
           v[10] = k_madd_epi32(u[10], k32_m20_p12);
           v[11] = k_madd_epi32(u[11], k32_m20_p12);
           v[12] = k_madd_epi32(u[12], k32_m12_m20);
@@ -2272,41 +2323,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m20_p12);
           v[18] = k_madd_epi32(u[14], k32_m20_p12);
           v[19] = k_madd_epi32(u[15], k32_m20_p12);
-          v[20] = k_madd_epi32(u[ 8], k32_p12_p20);
-          v[21] = k_madd_epi32(u[ 9], k32_p12_p20);
+          v[20] = k_madd_epi32(u[8], k32_p12_p20);
+          v[21] = k_madd_epi32(u[9], k32_p12_p20);
           v[22] = k_madd_epi32(u[10], k32_p12_p20);
           v[23] = k_madd_epi32(u[11], k32_p12_p20);
-          v[24] = k_madd_epi32(u[ 4], k32_m04_p28);
-          v[25] = k_madd_epi32(u[ 5], k32_m04_p28);
-          v[26] = k_madd_epi32(u[ 6], k32_m04_p28);
-          v[27] = k_madd_epi32(u[ 7], k32_m04_p28);
-          v[28] = k_madd_epi32(u[ 0], k32_p28_p04);
-          v[29] = k_madd_epi32(u[ 1], k32_p28_p04);
-          v[30] = k_madd_epi32(u[ 2], k32_p28_p04);
-          v[31] = k_madd_epi32(u[ 3], k32_p28_p04);
+          v[24] = k_madd_epi32(u[4], k32_m04_p28);
+          v[25] = k_madd_epi32(u[5], k32_m04_p28);
+          v[26] = k_madd_epi32(u[6], k32_m04_p28);
+          v[27] = k_madd_epi32(u[7], k32_m04_p28);
+          v[28] = k_madd_epi32(u[0], k32_p28_p04);
+          v[29] = k_madd_epi32(u[1], k32_p28_p04);
+          v[30] = k_madd_epi32(u[2], k32_p28_p04);
+          v[31] = k_madd_epi32(u[3], k32_p28_p04);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -2314,16 +2364,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2331,16 +2381,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          lstep3[34] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep3[35] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep3[36] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep3[37] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep3[42] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep3[43] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep3[44] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep3[45] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep3[50] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep3[51] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep3[34] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          lstep3[35] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          lstep3[36] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          lstep3[37] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          lstep3[42] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          lstep3[43] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          lstep3[44] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          lstep3[45] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          lstep3[50] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          lstep3[51] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           lstep3[52] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           lstep3[53] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           lstep3[58] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2353,22 +2403,22 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_p30_p02 = pair_set_epi32(cospi_30_64, cospi_2_64);
           const __m128i k32_p14_p18 = pair_set_epi32(cospi_14_64, cospi_18_64);
           const __m128i k32_p22_p10 = pair_set_epi32(cospi_22_64, cospi_10_64);
-          const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64,  cospi_26_64);
+          const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64, cospi_26_64);
           const __m128i k32_m26_p06 = pair_set_epi32(-cospi_26_64, cospi_6_64);
           const __m128i k32_m10_p22 = pair_set_epi32(-cospi_10_64, cospi_22_64);
           const __m128i k32_m18_p14 = pair_set_epi32(-cospi_18_64, cospi_14_64);
           const __m128i k32_m02_p30 = pair_set_epi32(-cospi_2_64, cospi_30_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]);
-          u[ 1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]);
-          u[ 2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]);
-          u[ 3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]);
-          u[ 4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]);
-          u[ 5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]);
-          u[ 6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]);
-          u[ 7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]);
-          u[ 8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]);
-          u[ 9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]);
+          u[0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]);
+          u[1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]);
+          u[2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]);
+          u[3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]);
+          u[4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]);
+          u[5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]);
+          u[6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]);
+          u[7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]);
+          u[8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]);
+          u[9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]);
           u[10] = _mm_unpacklo_epi32(lstep3[21], lstep3[27]);
           u[11] = _mm_unpackhi_epi32(lstep3[21], lstep3[27]);
           u[12] = _mm_unpacklo_epi32(lstep3[22], lstep3[24]);
@@ -2376,16 +2426,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep3[23], lstep3[25]);
           u[15] = _mm_unpackhi_epi32(lstep3[23], lstep3[25]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_p30_p02);
-          v[ 1] = k_madd_epi32(u[ 1], k32_p30_p02);
-          v[ 2] = k_madd_epi32(u[ 2], k32_p30_p02);
-          v[ 3] = k_madd_epi32(u[ 3], k32_p30_p02);
-          v[ 4] = k_madd_epi32(u[ 4], k32_p14_p18);
-          v[ 5] = k_madd_epi32(u[ 5], k32_p14_p18);
-          v[ 6] = k_madd_epi32(u[ 6], k32_p14_p18);
-          v[ 7] = k_madd_epi32(u[ 7], k32_p14_p18);
-          v[ 8] = k_madd_epi32(u[ 8], k32_p22_p10);
-          v[ 9] = k_madd_epi32(u[ 9], k32_p22_p10);
+          v[0] = k_madd_epi32(u[0], k32_p30_p02);
+          v[1] = k_madd_epi32(u[1], k32_p30_p02);
+          v[2] = k_madd_epi32(u[2], k32_p30_p02);
+          v[3] = k_madd_epi32(u[3], k32_p30_p02);
+          v[4] = k_madd_epi32(u[4], k32_p14_p18);
+          v[5] = k_madd_epi32(u[5], k32_p14_p18);
+          v[6] = k_madd_epi32(u[6], k32_p14_p18);
+          v[7] = k_madd_epi32(u[7], k32_p14_p18);
+          v[8] = k_madd_epi32(u[8], k32_p22_p10);
+          v[9] = k_madd_epi32(u[9], k32_p22_p10);
           v[10] = k_madd_epi32(u[10], k32_p22_p10);
           v[11] = k_madd_epi32(u[11], k32_p22_p10);
           v[12] = k_madd_epi32(u[12], k32_p06_p26);
@@ -2396,41 +2446,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m26_p06);
           v[18] = k_madd_epi32(u[14], k32_m26_p06);
           v[19] = k_madd_epi32(u[15], k32_m26_p06);
-          v[20] = k_madd_epi32(u[ 8], k32_m10_p22);
-          v[21] = k_madd_epi32(u[ 9], k32_m10_p22);
+          v[20] = k_madd_epi32(u[8], k32_m10_p22);
+          v[21] = k_madd_epi32(u[9], k32_m10_p22);
           v[22] = k_madd_epi32(u[10], k32_m10_p22);
           v[23] = k_madd_epi32(u[11], k32_m10_p22);
-          v[24] = k_madd_epi32(u[ 4], k32_m18_p14);
-          v[25] = k_madd_epi32(u[ 5], k32_m18_p14);
-          v[26] = k_madd_epi32(u[ 6], k32_m18_p14);
-          v[27] = k_madd_epi32(u[ 7], k32_m18_p14);
-          v[28] = k_madd_epi32(u[ 0], k32_m02_p30);
-          v[29] = k_madd_epi32(u[ 1], k32_m02_p30);
-          v[30] = k_madd_epi32(u[ 2], k32_m02_p30);
-          v[31] = k_madd_epi32(u[ 3], k32_m02_p30);
+          v[24] = k_madd_epi32(u[4], k32_m18_p14);
+          v[25] = k_madd_epi32(u[5], k32_m18_p14);
+          v[26] = k_madd_epi32(u[6], k32_m18_p14);
+          v[27] = k_madd_epi32(u[7], k32_m18_p14);
+          v[28] = k_madd_epi32(u[0], k32_m02_p30);
+          v[29] = k_madd_epi32(u[1], k32_m02_p30);
+          v[30] = k_madd_epi32(u[2], k32_m02_p30);
+          v[31] = k_madd_epi32(u[3], k32_m02_p30);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -2438,16 +2487,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2455,16 +2504,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2472,16 +2521,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
           u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
 
-          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
-          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
-          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
-          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
-          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
-          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
-          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
-          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
-          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
-          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+          v[0] = _mm_cmplt_epi32(u[0], kZero);
+          v[1] = _mm_cmplt_epi32(u[1], kZero);
+          v[2] = _mm_cmplt_epi32(u[2], kZero);
+          v[3] = _mm_cmplt_epi32(u[3], kZero);
+          v[4] = _mm_cmplt_epi32(u[4], kZero);
+          v[5] = _mm_cmplt_epi32(u[5], kZero);
+          v[6] = _mm_cmplt_epi32(u[6], kZero);
+          v[7] = _mm_cmplt_epi32(u[7], kZero);
+          v[8] = _mm_cmplt_epi32(u[8], kZero);
+          v[9] = _mm_cmplt_epi32(u[9], kZero);
           v[10] = _mm_cmplt_epi32(u[10], kZero);
           v[11] = _mm_cmplt_epi32(u[11], kZero);
           v[12] = _mm_cmplt_epi32(u[12], kZero);
@@ -2489,16 +2538,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_cmplt_epi32(u[14], kZero);
           v[15] = _mm_cmplt_epi32(u[15], kZero);
 
-          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+          u[0] = _mm_sub_epi32(u[0], v[0]);
+          u[1] = _mm_sub_epi32(u[1], v[1]);
+          u[2] = _mm_sub_epi32(u[2], v[2]);
+          u[3] = _mm_sub_epi32(u[3], v[3]);
+          u[4] = _mm_sub_epi32(u[4], v[4]);
+          u[5] = _mm_sub_epi32(u[5], v[5]);
+          u[6] = _mm_sub_epi32(u[6], v[6]);
+          u[7] = _mm_sub_epi32(u[7], v[7]);
+          u[8] = _mm_sub_epi32(u[8], v[8]);
+          u[9] = _mm_sub_epi32(u[9], v[9]);
           u[10] = _mm_sub_epi32(u[10], v[10]);
           u[11] = _mm_sub_epi32(u[11], v[11]);
           u[12] = _mm_sub_epi32(u[12], v[12]);
@@ -2506,16 +2555,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_sub_epi32(u[14], v[14]);
           u[15] = _mm_sub_epi32(u[15], v[15]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], K32One);
-          v[ 1] = _mm_add_epi32(u[ 1], K32One);
-          v[ 2] = _mm_add_epi32(u[ 2], K32One);
-          v[ 3] = _mm_add_epi32(u[ 3], K32One);
-          v[ 4] = _mm_add_epi32(u[ 4], K32One);
-          v[ 5] = _mm_add_epi32(u[ 5], K32One);
-          v[ 6] = _mm_add_epi32(u[ 6], K32One);
-          v[ 7] = _mm_add_epi32(u[ 7], K32One);
-          v[ 8] = _mm_add_epi32(u[ 8], K32One);
-          v[ 9] = _mm_add_epi32(u[ 9], K32One);
+          v[0] = _mm_add_epi32(u[0], K32One);
+          v[1] = _mm_add_epi32(u[1], K32One);
+          v[2] = _mm_add_epi32(u[2], K32One);
+          v[3] = _mm_add_epi32(u[3], K32One);
+          v[4] = _mm_add_epi32(u[4], K32One);
+          v[5] = _mm_add_epi32(u[5], K32One);
+          v[6] = _mm_add_epi32(u[6], K32One);
+          v[7] = _mm_add_epi32(u[7], K32One);
+          v[8] = _mm_add_epi32(u[8], K32One);
+          v[9] = _mm_add_epi32(u[9], K32One);
           v[10] = _mm_add_epi32(u[10], K32One);
           v[11] = _mm_add_epi32(u[11], K32One);
           v[12] = _mm_add_epi32(u[12], K32One);
@@ -2523,16 +2572,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], K32One);
           v[15] = _mm_add_epi32(u[15], K32One);
 
-          u[ 0] = _mm_srai_epi32(v[ 0], 2);
-          u[ 1] = _mm_srai_epi32(v[ 1], 2);
-          u[ 2] = _mm_srai_epi32(v[ 2], 2);
-          u[ 3] = _mm_srai_epi32(v[ 3], 2);
-          u[ 4] = _mm_srai_epi32(v[ 4], 2);
-          u[ 5] = _mm_srai_epi32(v[ 5], 2);
-          u[ 6] = _mm_srai_epi32(v[ 6], 2);
-          u[ 7] = _mm_srai_epi32(v[ 7], 2);
-          u[ 8] = _mm_srai_epi32(v[ 8], 2);
-          u[ 9] = _mm_srai_epi32(v[ 9], 2);
+          u[0] = _mm_srai_epi32(v[0], 2);
+          u[1] = _mm_srai_epi32(v[1], 2);
+          u[2] = _mm_srai_epi32(v[2], 2);
+          u[3] = _mm_srai_epi32(v[3], 2);
+          u[4] = _mm_srai_epi32(v[4], 2);
+          u[5] = _mm_srai_epi32(v[5], 2);
+          u[6] = _mm_srai_epi32(v[6], 2);
+          u[7] = _mm_srai_epi32(v[7], 2);
+          u[8] = _mm_srai_epi32(v[8], 2);
+          u[9] = _mm_srai_epi32(v[9], 2);
           u[10] = _mm_srai_epi32(v[10], 2);
           u[11] = _mm_srai_epi32(v[11], 2);
           u[12] = _mm_srai_epi32(v[12], 2);
@@ -2540,18 +2589,18 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], 2);
           u[15] = _mm_srai_epi32(v[15], 2);
 
-          out[ 2] = _mm_packs_epi32(u[0], u[1]);
+          out[2] = _mm_packs_epi32(u[0], u[1]);
           out[18] = _mm_packs_epi32(u[2], u[3]);
           out[10] = _mm_packs_epi32(u[4], u[5]);
           out[26] = _mm_packs_epi32(u[6], u[7]);
-          out[ 6] = _mm_packs_epi32(u[8], u[9]);
+          out[6] = _mm_packs_epi32(u[8], u[9]);
           out[22] = _mm_packs_epi32(u[10], u[11]);
           out[14] = _mm_packs_epi32(u[12], u[13]);
           out[30] = _mm_packs_epi32(u[14], u[15]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
-                                             &out[26], &out[6], &out[22],
-                                             &out[14], &out[30]);
+          overflow =
+              check_epi16_overflow_x8(&out[2], &out[18], &out[10], &out[26],
+                                      &out[6], &out[22], &out[14], &out[30]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2603,16 +2652,16 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_m17_p15 = pair_set_epi32(-cospi_17_64, cospi_15_64);
           const __m128i k32_m01_p31 = pair_set_epi32(-cospi_1_64, cospi_31_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]);
-          u[ 1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]);
-          u[ 2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]);
-          u[ 3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]);
-          u[ 4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]);
-          u[ 5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]);
-          u[ 6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]);
-          u[ 7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]);
-          u[ 8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]);
-          u[ 9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]);
+          u[0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]);
+          u[1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]);
+          u[2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]);
+          u[3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]);
+          u[4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]);
+          u[5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]);
+          u[6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]);
+          u[7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]);
+          u[8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]);
+          u[9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]);
           u[10] = _mm_unpacklo_epi32(lstep1[37], lstep1[59]);
           u[11] = _mm_unpackhi_epi32(lstep1[37], lstep1[59]);
           u[12] = _mm_unpacklo_epi32(lstep1[38], lstep1[56]);
@@ -2620,16 +2669,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep1[39], lstep1[57]);
           u[15] = _mm_unpackhi_epi32(lstep1[39], lstep1[57]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_p31_p01);
-          v[ 1] = k_madd_epi32(u[ 1], k32_p31_p01);
-          v[ 2] = k_madd_epi32(u[ 2], k32_p31_p01);
-          v[ 3] = k_madd_epi32(u[ 3], k32_p31_p01);
-          v[ 4] = k_madd_epi32(u[ 4], k32_p15_p17);
-          v[ 5] = k_madd_epi32(u[ 5], k32_p15_p17);
-          v[ 6] = k_madd_epi32(u[ 6], k32_p15_p17);
-          v[ 7] = k_madd_epi32(u[ 7], k32_p15_p17);
-          v[ 8] = k_madd_epi32(u[ 8], k32_p23_p09);
-          v[ 9] = k_madd_epi32(u[ 9], k32_p23_p09);
+          v[0] = k_madd_epi32(u[0], k32_p31_p01);
+          v[1] = k_madd_epi32(u[1], k32_p31_p01);
+          v[2] = k_madd_epi32(u[2], k32_p31_p01);
+          v[3] = k_madd_epi32(u[3], k32_p31_p01);
+          v[4] = k_madd_epi32(u[4], k32_p15_p17);
+          v[5] = k_madd_epi32(u[5], k32_p15_p17);
+          v[6] = k_madd_epi32(u[6], k32_p15_p17);
+          v[7] = k_madd_epi32(u[7], k32_p15_p17);
+          v[8] = k_madd_epi32(u[8], k32_p23_p09);
+          v[9] = k_madd_epi32(u[9], k32_p23_p09);
           v[10] = k_madd_epi32(u[10], k32_p23_p09);
           v[11] = k_madd_epi32(u[11], k32_p23_p09);
           v[12] = k_madd_epi32(u[12], k32_p07_p25);
@@ -2640,41 +2689,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m25_p07);
           v[18] = k_madd_epi32(u[14], k32_m25_p07);
           v[19] = k_madd_epi32(u[15], k32_m25_p07);
-          v[20] = k_madd_epi32(u[ 8], k32_m09_p23);
-          v[21] = k_madd_epi32(u[ 9], k32_m09_p23);
+          v[20] = k_madd_epi32(u[8], k32_m09_p23);
+          v[21] = k_madd_epi32(u[9], k32_m09_p23);
           v[22] = k_madd_epi32(u[10], k32_m09_p23);
           v[23] = k_madd_epi32(u[11], k32_m09_p23);
-          v[24] = k_madd_epi32(u[ 4], k32_m17_p15);
-          v[25] = k_madd_epi32(u[ 5], k32_m17_p15);
-          v[26] = k_madd_epi32(u[ 6], k32_m17_p15);
-          v[27] = k_madd_epi32(u[ 7], k32_m17_p15);
-          v[28] = k_madd_epi32(u[ 0], k32_m01_p31);
-          v[29] = k_madd_epi32(u[ 1], k32_m01_p31);
-          v[30] = k_madd_epi32(u[ 2], k32_m01_p31);
-          v[31] = k_madd_epi32(u[ 3], k32_m01_p31);
+          v[24] = k_madd_epi32(u[4], k32_m17_p15);
+          v[25] = k_madd_epi32(u[5], k32_m17_p15);
+          v[26] = k_madd_epi32(u[6], k32_m17_p15);
+          v[27] = k_madd_epi32(u[7], k32_m17_p15);
+          v[28] = k_madd_epi32(u[0], k32_m01_p31);
+          v[29] = k_madd_epi32(u[1], k32_m01_p31);
+          v[30] = k_madd_epi32(u[2], k32_m01_p31);
+          v[31] = k_madd_epi32(u[3], k32_m01_p31);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -2682,16 +2730,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2699,16 +2747,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2716,16 +2764,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
           u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
 
-          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
-          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
-          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
-          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
-          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
-          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
-          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
-          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
-          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
-          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+          v[0] = _mm_cmplt_epi32(u[0], kZero);
+          v[1] = _mm_cmplt_epi32(u[1], kZero);
+          v[2] = _mm_cmplt_epi32(u[2], kZero);
+          v[3] = _mm_cmplt_epi32(u[3], kZero);
+          v[4] = _mm_cmplt_epi32(u[4], kZero);
+          v[5] = _mm_cmplt_epi32(u[5], kZero);
+          v[6] = _mm_cmplt_epi32(u[6], kZero);
+          v[7] = _mm_cmplt_epi32(u[7], kZero);
+          v[8] = _mm_cmplt_epi32(u[8], kZero);
+          v[9] = _mm_cmplt_epi32(u[9], kZero);
           v[10] = _mm_cmplt_epi32(u[10], kZero);
           v[11] = _mm_cmplt_epi32(u[11], kZero);
           v[12] = _mm_cmplt_epi32(u[12], kZero);
@@ -2733,16 +2781,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_cmplt_epi32(u[14], kZero);
           v[15] = _mm_cmplt_epi32(u[15], kZero);
 
-          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+          u[0] = _mm_sub_epi32(u[0], v[0]);
+          u[1] = _mm_sub_epi32(u[1], v[1]);
+          u[2] = _mm_sub_epi32(u[2], v[2]);
+          u[3] = _mm_sub_epi32(u[3], v[3]);
+          u[4] = _mm_sub_epi32(u[4], v[4]);
+          u[5] = _mm_sub_epi32(u[5], v[5]);
+          u[6] = _mm_sub_epi32(u[6], v[6]);
+          u[7] = _mm_sub_epi32(u[7], v[7]);
+          u[8] = _mm_sub_epi32(u[8], v[8]);
+          u[9] = _mm_sub_epi32(u[9], v[9]);
           u[10] = _mm_sub_epi32(u[10], v[10]);
           u[11] = _mm_sub_epi32(u[11], v[11]);
           u[12] = _mm_sub_epi32(u[12], v[12]);
@@ -2784,18 +2832,18 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], 2);
           u[15] = _mm_srai_epi32(v[15], 2);
 
-          out[ 1] = _mm_packs_epi32(u[0], u[1]);
+          out[1] = _mm_packs_epi32(u[0], u[1]);
           out[17] = _mm_packs_epi32(u[2], u[3]);
-          out[ 9] = _mm_packs_epi32(u[4], u[5]);
+          out[9] = _mm_packs_epi32(u[4], u[5]);
           out[25] = _mm_packs_epi32(u[6], u[7]);
-          out[ 7] = _mm_packs_epi32(u[8], u[9]);
+          out[7] = _mm_packs_epi32(u[8], u[9]);
           out[23] = _mm_packs_epi32(u[10], u[11]);
           out[15] = _mm_packs_epi32(u[12], u[13]);
           out[31] = _mm_packs_epi32(u[14], u[15]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
-                                             &out[25], &out[7], &out[23],
-                                             &out[15], &out[31]);
+          overflow =
+              check_epi16_overflow_x8(&out[1], &out[17], &out[9], &out[25],
+                                      &out[7], &out[23], &out[15], &out[31]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
@@ -2812,16 +2860,16 @@ void FDCT32x32_2D(const int16_t *input,
           const __m128i k32_m21_p11 = pair_set_epi32(-cospi_21_64, cospi_11_64);
           const __m128i k32_m05_p27 = pair_set_epi32(-cospi_5_64, cospi_27_64);
 
-          u[ 0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]);
-          u[ 1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]);
-          u[ 2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]);
-          u[ 3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]);
-          u[ 4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]);
-          u[ 5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]);
-          u[ 6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]);
-          u[ 7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]);
-          u[ 8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]);
-          u[ 9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]);
+          u[0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]);
+          u[1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]);
+          u[2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]);
+          u[3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]);
+          u[4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]);
+          u[5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]);
+          u[6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]);
+          u[7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]);
+          u[8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]);
+          u[9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]);
           u[10] = _mm_unpacklo_epi32(lstep1[45], lstep1[51]);
           u[11] = _mm_unpackhi_epi32(lstep1[45], lstep1[51]);
           u[12] = _mm_unpacklo_epi32(lstep1[46], lstep1[48]);
@@ -2829,16 +2877,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_unpacklo_epi32(lstep1[47], lstep1[49]);
           u[15] = _mm_unpackhi_epi32(lstep1[47], lstep1[49]);
 
-          v[ 0] = k_madd_epi32(u[ 0], k32_p27_p05);
-          v[ 1] = k_madd_epi32(u[ 1], k32_p27_p05);
-          v[ 2] = k_madd_epi32(u[ 2], k32_p27_p05);
-          v[ 3] = k_madd_epi32(u[ 3], k32_p27_p05);
-          v[ 4] = k_madd_epi32(u[ 4], k32_p11_p21);
-          v[ 5] = k_madd_epi32(u[ 5], k32_p11_p21);
-          v[ 6] = k_madd_epi32(u[ 6], k32_p11_p21);
-          v[ 7] = k_madd_epi32(u[ 7], k32_p11_p21);
-          v[ 8] = k_madd_epi32(u[ 8], k32_p19_p13);
-          v[ 9] = k_madd_epi32(u[ 9], k32_p19_p13);
+          v[0] = k_madd_epi32(u[0], k32_p27_p05);
+          v[1] = k_madd_epi32(u[1], k32_p27_p05);
+          v[2] = k_madd_epi32(u[2], k32_p27_p05);
+          v[3] = k_madd_epi32(u[3], k32_p27_p05);
+          v[4] = k_madd_epi32(u[4], k32_p11_p21);
+          v[5] = k_madd_epi32(u[5], k32_p11_p21);
+          v[6] = k_madd_epi32(u[6], k32_p11_p21);
+          v[7] = k_madd_epi32(u[7], k32_p11_p21);
+          v[8] = k_madd_epi32(u[8], k32_p19_p13);
+          v[9] = k_madd_epi32(u[9], k32_p19_p13);
           v[10] = k_madd_epi32(u[10], k32_p19_p13);
           v[11] = k_madd_epi32(u[11], k32_p19_p13);
           v[12] = k_madd_epi32(u[12], k32_p03_p29);
@@ -2849,41 +2897,40 @@ void FDCT32x32_2D(const int16_t *input,
           v[17] = k_madd_epi32(u[13], k32_m29_p03);
           v[18] = k_madd_epi32(u[14], k32_m29_p03);
           v[19] = k_madd_epi32(u[15], k32_m29_p03);
-          v[20] = k_madd_epi32(u[ 8], k32_m13_p19);
-          v[21] = k_madd_epi32(u[ 9], k32_m13_p19);
+          v[20] = k_madd_epi32(u[8], k32_m13_p19);
+          v[21] = k_madd_epi32(u[9], k32_m13_p19);
           v[22] = k_madd_epi32(u[10], k32_m13_p19);
           v[23] = k_madd_epi32(u[11], k32_m13_p19);
-          v[24] = k_madd_epi32(u[ 4], k32_m21_p11);
-          v[25] = k_madd_epi32(u[ 5], k32_m21_p11);
-          v[26] = k_madd_epi32(u[ 6], k32_m21_p11);
-          v[27] = k_madd_epi32(u[ 7], k32_m21_p11);
-          v[28] = k_madd_epi32(u[ 0], k32_m05_p27);
-          v[29] = k_madd_epi32(u[ 1], k32_m05_p27);
-          v[30] = k_madd_epi32(u[ 2], k32_m05_p27);
-          v[31] = k_madd_epi32(u[ 3], k32_m05_p27);
+          v[24] = k_madd_epi32(u[4], k32_m21_p11);
+          v[25] = k_madd_epi32(u[5], k32_m21_p11);
+          v[26] = k_madd_epi32(u[6], k32_m21_p11);
+          v[27] = k_madd_epi32(u[7], k32_m21_p11);
+          v[28] = k_madd_epi32(u[0], k32_m05_p27);
+          v[29] = k_madd_epi32(u[1], k32_m05_p27);
+          v[30] = k_madd_epi32(u[2], k32_m05_p27);
+          v[31] = k_madd_epi32(u[3], k32_m05_p27);
 
 #if DCT_HIGH_BIT_DEPTH
           overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8],
+              &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16],
+              &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24],
+              &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
           }
 #endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+          u[8] = k_packs_epi64(v[16], v[17]);
+          u[9] = k_packs_epi64(v[18], v[19]);
           u[10] = k_packs_epi64(v[20], v[21]);
           u[11] = k_packs_epi64(v[22], v[23]);
           u[12] = k_packs_epi64(v[24], v[25]);
@@ -2891,16 +2938,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = k_packs_epi64(v[28], v[29]);
           u[15] = k_packs_epi64(v[30], v[31]);
 
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+          v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+          v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
           v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
           v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
           v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
@@ -2908,16 +2955,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
           v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
 
-          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+          u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+          u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
           u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
           u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
           u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
@@ -2925,16 +2972,16 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
           u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
 
-          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
-          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
-          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
-          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
-          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
-          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
-          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
-          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
-          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
-          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+          v[0] = _mm_cmplt_epi32(u[0], kZero);
+          v[1] = _mm_cmplt_epi32(u[1], kZero);
+          v[2] = _mm_cmplt_epi32(u[2], kZero);
+          v[3] = _mm_cmplt_epi32(u[3], kZero);
+          v[4] = _mm_cmplt_epi32(u[4], kZero);
+          v[5] = _mm_cmplt_epi32(u[5], kZero);
+          v[6] = _mm_cmplt_epi32(u[6], kZero);
+          v[7] = _mm_cmplt_epi32(u[7], kZero);
+          v[8] = _mm_cmplt_epi32(u[8], kZero);
+          v[9] = _mm_cmplt_epi32(u[9], kZero);
           v[10] = _mm_cmplt_epi32(u[10], kZero);
           v[11] = _mm_cmplt_epi32(u[11], kZero);
           v[12] = _mm_cmplt_epi32(u[12], kZero);
@@ -2942,16 +2989,16 @@ void FDCT32x32_2D(const int16_t *input,
           v[14] = _mm_cmplt_epi32(u[14], kZero);
           v[15] = _mm_cmplt_epi32(u[15], kZero);
 
-          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+          u[0] = _mm_sub_epi32(u[0], v[0]);
+          u[1] = _mm_sub_epi32(u[1], v[1]);
+          u[2] = _mm_sub_epi32(u[2], v[2]);
+          u[3] = _mm_sub_epi32(u[3], v[3]);
+          u[4] = _mm_sub_epi32(u[4], v[4]);
+          u[5] = _mm_sub_epi32(u[5], v[5]);
+          u[6] = _mm_sub_epi32(u[6], v[6]);
+          u[7] = _mm_sub_epi32(u[7], v[7]);
+          u[8] = _mm_sub_epi32(u[8], v[8]);
+          u[9] = _mm_sub_epi32(u[9], v[9]);
           u[10] = _mm_sub_epi32(u[10], v[10]);
           u[11] = _mm_sub_epi32(u[11], v[11]);
           u[12] = _mm_sub_epi32(u[12], v[12]);
@@ -2993,18 +3040,18 @@ void FDCT32x32_2D(const int16_t *input,
           u[14] = _mm_srai_epi32(v[14], 2);
           u[15] = _mm_srai_epi32(v[15], 2);
 
-          out[ 5] = _mm_packs_epi32(u[0], u[1]);
+          out[5] = _mm_packs_epi32(u[0], u[1]);
           out[21] = _mm_packs_epi32(u[2], u[3]);
           out[13] = _mm_packs_epi32(u[4], u[5]);
           out[29] = _mm_packs_epi32(u[6], u[7]);
-          out[ 3] = _mm_packs_epi32(u[8], u[9]);
+          out[3] = _mm_packs_epi32(u[8], u[9]);
           out[19] = _mm_packs_epi32(u[10], u[11]);
           out[11] = _mm_packs_epi32(u[12], u[13]);
           out[27] = _mm_packs_epi32(u[14], u[15]);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
-                                             &out[29], &out[3], &out[19],
-                                             &out[11], &out[27]);
+          overflow =
+              check_epi16_overflow_x8(&out[5], &out[21], &out[13], &out[29],
+                                      &out[3], &out[19], &out[11], &out[27]);
           if (overflow) {
             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
             return;
diff --git a/vpx_dsp/x86/fwd_txfm_avx2.c b/vpx_dsp/x86/fwd_txfm_avx2.c
index 6d9da6aa89e62009c14218dc49346bdbaf7d105e..21f11f0c3e5ff05e27ac8d48c74cf1bae6ef2c7f 100644
--- a/vpx_dsp/x86/fwd_txfm_avx2.c
+++ b/vpx_dsp/x86/fwd_txfm_avx2.c
@@ -13,11 +13,11 @@
 #define FDCT32x32_2D_AVX2 vpx_fdct32x32_rd_avx2
 #define FDCT32x32_HIGH_PRECISION 0
 #include "vpx_dsp/x86/fwd_dct32x32_impl_avx2.h"
-#undef  FDCT32x32_2D_AVX2
-#undef  FDCT32x32_HIGH_PRECISION
+#undef FDCT32x32_2D_AVX2
+#undef FDCT32x32_HIGH_PRECISION
 
 #define FDCT32x32_2D_AVX2 vpx_fdct32x32_avx2
 #define FDCT32x32_HIGH_PRECISION 1
-#include "vpx_dsp/x86/fwd_dct32x32_impl_avx2.h" // NOLINT
-#undef  FDCT32x32_2D_AVX2
-#undef  FDCT32x32_HIGH_PRECISION
+#include "vpx_dsp/x86/fwd_dct32x32_impl_avx2.h"  // NOLINT
+#undef FDCT32x32_2D_AVX2
+#undef FDCT32x32_HIGH_PRECISION
diff --git a/vpx_dsp/x86/fwd_txfm_impl_sse2.h b/vpx_dsp/x86/fwd_txfm_impl_sse2.h
index 69889e2e98cdf6d10788db303f3266fbf10a3718..d27246d8c364dbf18e021edc5efe6e6e371b8f8b 100644
--- a/vpx_dsp/x86/fwd_txfm_impl_sse2.h
+++ b/vpx_dsp/x86/fwd_txfm_impl_sse2.h
@@ -43,44 +43,36 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
   // These are the coefficients used for the multiplies.
   // In the comments, pN means cos(N pi /64) and mN is -cos(N pi /64),
   // where cospi_N_64 = cos(N pi /64)
-  const __m128i k__cospi_A = octa_set_epi16(cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64,
-                                            cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64);
-  const __m128i k__cospi_B = octa_set_epi16(cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64);
-  const __m128i k__cospi_C = octa_set_epi16(cospi_8_64, cospi_24_64,
-                                            cospi_8_64, cospi_24_64,
-                                            cospi_24_64, -cospi_8_64,
-                                            cospi_24_64, -cospi_8_64);
-  const __m128i k__cospi_D = octa_set_epi16(cospi_24_64, -cospi_8_64,
-                                            cospi_24_64, -cospi_8_64,
-                                            cospi_8_64, cospi_24_64,
-                                            cospi_8_64, cospi_24_64);
-  const __m128i k__cospi_E = octa_set_epi16(cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64,
-                                            cospi_16_64, cospi_16_64);
-  const __m128i k__cospi_F = octa_set_epi16(cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64,
-                                            cospi_16_64, -cospi_16_64);
-  const __m128i k__cospi_G = octa_set_epi16(cospi_8_64, cospi_24_64,
-                                            cospi_8_64, cospi_24_64,
-                                            -cospi_8_64, -cospi_24_64,
-                                            -cospi_8_64, -cospi_24_64);
-  const __m128i k__cospi_H = octa_set_epi16(cospi_24_64, -cospi_8_64,
-                                            cospi_24_64, -cospi_8_64,
-                                            -cospi_24_64, cospi_8_64,
-                                            -cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_A =
+      octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64,
+                     cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_B =
+      octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64,
+                     cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64);
+  const __m128i k__cospi_C =
+      octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64,
+                     cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64);
+  const __m128i k__cospi_D =
+      octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64,
+                     cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_E =
+      octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64,
+                     cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64);
+  const __m128i k__cospi_F =
+      octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64,
+                     cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_G =
+      octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64,
+                     -cospi_8_64, -cospi_24_64, -cospi_8_64, -cospi_24_64);
+  const __m128i k__cospi_H =
+      octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64,
+                     -cospi_24_64, cospi_8_64, -cospi_24_64, cospi_8_64);
 
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   // This second rounding constant saves doing some extra adds at the end
-  const __m128i k__DCT_CONST_ROUNDING2 = _mm_set1_epi32(DCT_CONST_ROUNDING
-                                               +(DCT_CONST_ROUNDING << 1));
-  const int DCT_CONST_BITS2 =  DCT_CONST_BITS + 2;
+  const __m128i k__DCT_CONST_ROUNDING2 =
+      _mm_set1_epi32(DCT_CONST_ROUNDING + (DCT_CONST_ROUNDING << 1));
+  const int DCT_CONST_BITS2 = DCT_CONST_BITS + 2;
   const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
   const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
   __m128i in0, in1;
@@ -90,14 +82,14 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
 #endif
 
   // Load inputs.
-  in0  = _mm_loadl_epi64((const __m128i *)(input +  0 * stride));
-  in1  = _mm_loadl_epi64((const __m128i *)(input +  1 * stride));
-  in1  = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *)
-                                                 (input +  2 * stride)));
-  in0  = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *)
-                                                 (input +  3 * stride)));
+  in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+  in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
   // in0 = [i0 i1 i2 i3 iC iD iE iF]
   // in1 = [i4 i5 i6 i7 i8 i9 iA iB]
+  in1 = _mm_unpacklo_epi64(
+      in1, _mm_loadl_epi64((const __m128i *)(input + 2 * stride)));
+  in0 = _mm_unpacklo_epi64(
+      in0, _mm_loadl_epi64((const __m128i *)(input + 3 * stride)));
 #if DCT_HIGH_BIT_DEPTH
   // Check inputs small enough to use optimised code
   cmp0 = _mm_xor_si128(_mm_cmpgt_epi16(in0, _mm_set1_epi16(0x3ff)),
@@ -192,10 +184,10 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
     // vertical DCTs finished. Now we do the horizontal DCTs.
     // Stage 3: Add/subtract
 
-    const __m128i t0 = ADD_EPI16(in0, in1);
-    const __m128i t1 = SUB_EPI16(in0, in1);
     // t0 = [c0 c1 c8 c9  c4  c5  cC  cD]
     // t1 = [c3 c2 cB cA -c7 -c6 -cF -cE]
+    const __m128i t0 = ADD_EPI16(in0, in1);
+    const __m128i t1 = SUB_EPI16(in0, in1);
 #if DCT_HIGH_BIT_DEPTH
     overflow = check_epi16_overflow_x2(&t0, &t1);
     if (overflow) {
@@ -263,7 +255,6 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
   storeu_output(&in1, output + 2 * 4);
 }
 
-
 void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) {
   int pass;
   // Constants
@@ -283,14 +274,14 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) {
   int overflow;
 #endif
   // Load input
-  __m128i in0  = _mm_load_si128((const __m128i *)(input + 0 * stride));
-  __m128i in1  = _mm_load_si128((const __m128i *)(input + 1 * stride));
-  __m128i in2  = _mm_load_si128((const __m128i *)(input + 2 * stride));
-  __m128i in3  = _mm_load_si128((const __m128i *)(input + 3 * stride));
-  __m128i in4  = _mm_load_si128((const __m128i *)(input + 4 * stride));
-  __m128i in5  = _mm_load_si128((const __m128i *)(input + 5 * stride));
-  __m128i in6  = _mm_load_si128((const __m128i *)(input + 6 * stride));
-  __m128i in7  = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+  __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+  __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
+  __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+  __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+  __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+  __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
   // Pre-condition input (shift by two)
   in0 = _mm_slli_epi16(in0, 2);
   in1 = _mm_slli_epi16(in1, 2);
@@ -319,8 +310,8 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) {
     const __m128i q7 = SUB_EPI16(in0, in7);
 #if DCT_HIGH_BIT_DEPTH
     if (pass == 1) {
-      overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3,
-                                         &q4, &q5, &q6, &q7);
+      overflow =
+          check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
       if (overflow) {
         vpx_highbd_fdct8x8_c(input, output, stride);
         return;
@@ -630,22 +621,22 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
       __m128i res08, res09, res10, res11, res12, res13, res14, res15;
       // Load and pre-condition input.
       if (0 == pass) {
-        in00  = _mm_load_si128((const __m128i *)(in +  0 * stride));
-        in01  = _mm_load_si128((const __m128i *)(in +  1 * stride));
-        in02  = _mm_load_si128((const __m128i *)(in +  2 * stride));
-        in03  = _mm_load_si128((const __m128i *)(in +  3 * stride));
-        in04  = _mm_load_si128((const __m128i *)(in +  4 * stride));
-        in05  = _mm_load_si128((const __m128i *)(in +  5 * stride));
-        in06  = _mm_load_si128((const __m128i *)(in +  6 * stride));
-        in07  = _mm_load_si128((const __m128i *)(in +  7 * stride));
-        in08  = _mm_load_si128((const __m128i *)(in +  8 * stride));
-        in09  = _mm_load_si128((const __m128i *)(in +  9 * stride));
-        in10  = _mm_load_si128((const __m128i *)(in + 10 * stride));
-        in11  = _mm_load_si128((const __m128i *)(in + 11 * stride));
-        in12  = _mm_load_si128((const __m128i *)(in + 12 * stride));
-        in13  = _mm_load_si128((const __m128i *)(in + 13 * stride));
-        in14  = _mm_load_si128((const __m128i *)(in + 14 * stride));
-        in15  = _mm_load_si128((const __m128i *)(in + 15 * stride));
+        in00 = _mm_load_si128((const __m128i *)(in + 0 * stride));
+        in01 = _mm_load_si128((const __m128i *)(in + 1 * stride));
+        in02 = _mm_load_si128((const __m128i *)(in + 2 * stride));
+        in03 = _mm_load_si128((const __m128i *)(in + 3 * stride));
+        in04 = _mm_load_si128((const __m128i *)(in + 4 * stride));
+        in05 = _mm_load_si128((const __m128i *)(in + 5 * stride));
+        in06 = _mm_load_si128((const __m128i *)(in + 6 * stride));
+        in07 = _mm_load_si128((const __m128i *)(in + 7 * stride));
+        in08 = _mm_load_si128((const __m128i *)(in + 8 * stride));
+        in09 = _mm_load_si128((const __m128i *)(in + 9 * stride));
+        in10 = _mm_load_si128((const __m128i *)(in + 10 * stride));
+        in11 = _mm_load_si128((const __m128i *)(in + 11 * stride));
+        in12 = _mm_load_si128((const __m128i *)(in + 12 * stride));
+        in13 = _mm_load_si128((const __m128i *)(in + 13 * stride));
+        in14 = _mm_load_si128((const __m128i *)(in + 14 * stride));
+        in15 = _mm_load_si128((const __m128i *)(in + 15 * stride));
         // x = x << 2
         in00 = _mm_slli_epi16(in00, 2);
         in01 = _mm_slli_epi16(in01, 2);
@@ -664,22 +655,22 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
         in14 = _mm_slli_epi16(in14, 2);
         in15 = _mm_slli_epi16(in15, 2);
       } else {
-        in00  = _mm_load_si128((const __m128i *)(in +  0 * 16));
-        in01  = _mm_load_si128((const __m128i *)(in +  1 * 16));
-        in02  = _mm_load_si128((const __m128i *)(in +  2 * 16));
-        in03  = _mm_load_si128((const __m128i *)(in +  3 * 16));
-        in04  = _mm_load_si128((const __m128i *)(in +  4 * 16));
-        in05  = _mm_load_si128((const __m128i *)(in +  5 * 16));
-        in06  = _mm_load_si128((const __m128i *)(in +  6 * 16));
-        in07  = _mm_load_si128((const __m128i *)(in +  7 * 16));
-        in08  = _mm_load_si128((const __m128i *)(in +  8 * 16));
-        in09  = _mm_load_si128((const __m128i *)(in +  9 * 16));
-        in10  = _mm_load_si128((const __m128i *)(in + 10 * 16));
-        in11  = _mm_load_si128((const __m128i *)(in + 11 * 16));
-        in12  = _mm_load_si128((const __m128i *)(in + 12 * 16));
-        in13  = _mm_load_si128((const __m128i *)(in + 13 * 16));
-        in14  = _mm_load_si128((const __m128i *)(in + 14 * 16));
-        in15  = _mm_load_si128((const __m128i *)(in + 15 * 16));
+        in00 = _mm_load_si128((const __m128i *)(in + 0 * 16));
+        in01 = _mm_load_si128((const __m128i *)(in + 1 * 16));
+        in02 = _mm_load_si128((const __m128i *)(in + 2 * 16));
+        in03 = _mm_load_si128((const __m128i *)(in + 3 * 16));
+        in04 = _mm_load_si128((const __m128i *)(in + 4 * 16));
+        in05 = _mm_load_si128((const __m128i *)(in + 5 * 16));
+        in06 = _mm_load_si128((const __m128i *)(in + 6 * 16));
+        in07 = _mm_load_si128((const __m128i *)(in + 7 * 16));
+        in08 = _mm_load_si128((const __m128i *)(in + 8 * 16));
+        in09 = _mm_load_si128((const __m128i *)(in + 9 * 16));
+        in10 = _mm_load_si128((const __m128i *)(in + 10 * 16));
+        in11 = _mm_load_si128((const __m128i *)(in + 11 * 16));
+        in12 = _mm_load_si128((const __m128i *)(in + 12 * 16));
+        in13 = _mm_load_si128((const __m128i *)(in + 13 * 16));
+        in14 = _mm_load_si128((const __m128i *)(in + 14 * 16));
+        in15 = _mm_load_si128((const __m128i *)(in + 15 * 16));
         // x = (x + 1) >> 2
         in00 = _mm_add_epi16(in00, kOne);
         in01 = _mm_add_epi16(in01, kOne);
@@ -745,10 +736,9 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
         step1_6 = SUB_EPI16(in01, in14);
         step1_7 = SUB_EPI16(in00, in15);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step1_0, &step1_1,
-                                           &step1_2, &step1_3,
-                                           &step1_4, &step1_5,
-                                           &step1_6, &step1_7);
+        overflow =
+            check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
+                                    &step1_4, &step1_5, &step1_6, &step1_7);
         if (overflow) {
           vpx_highbd_fdct16x16_c(input, output, stride);
           return;
@@ -767,8 +757,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
         const __m128i q6 = SUB_EPI16(input1, input6);
         const __m128i q7 = SUB_EPI16(input0, input7);
 #if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3,
-                                           &q4, &q5, &q6, &q7);
+        overflow =
+            check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
         if (overflow) {
           vpx_highbd_fdct16x16_c(input, output, stride);
           return;
@@ -818,12 +808,12 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           // into 32 bits.
           const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
           const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
-          const __m128i r0 = mult_round_shift(&d0, &d1, &k__cospi_p16_m16,
-                                              &k__DCT_CONST_ROUNDING,
-                                              DCT_CONST_BITS);
-          const __m128i r1 = mult_round_shift(&d0, &d1, &k__cospi_p16_p16,
-                                              &k__DCT_CONST_ROUNDING,
-                                              DCT_CONST_BITS);
+          const __m128i r0 =
+              mult_round_shift(&d0, &d1, &k__cospi_p16_m16,
+                               &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+          const __m128i r1 =
+              mult_round_shift(&d0, &d1, &k__cospi_p16_p16,
+                               &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
 #if DCT_HIGH_BIT_DEPTH
           overflow = check_epi16_overflow_x2(&r0, &r1);
           if (overflow) {
@@ -860,8 +850,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
               res06 = mult_round_shift(&t2, &t3, &k__cospi_m20_p12,
                                        &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
 #if DCT_HIGH_BIT_DEPTH
-              overflow = check_epi16_overflow_x4(&res02, &res14,
-                                                 &res10, &res06);
+              overflow =
+                  check_epi16_overflow_x4(&res02, &res14, &res10, &res06);
               if (overflow) {
                 vpx_highbd_fdct16x16_c(input, output, stride);
                 return;
@@ -888,8 +878,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           step2_4 = mult_round_shift(&t2, &t3, &k__cospi_p16_p16,
                                      &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5,
-                                             &step2_4);
+          overflow =
+              check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4);
           if (overflow) {
             vpx_highbd_fdct16x16_c(input, output, stride);
             return;
@@ -907,10 +897,9 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           step3_6 = ADD_EPI16(step1_6, step2_5);
           step3_7 = ADD_EPI16(step1_7, step2_4);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&step3_0, &step3_1,
-                                             &step3_2, &step3_3,
-                                             &step3_4, &step3_5,
-                                             &step3_6, &step3_7);
+          overflow =
+              check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3,
+                                      &step3_4, &step3_5, &step3_6, &step3_7);
           if (overflow) {
             vpx_highbd_fdct16x16_c(input, output, stride);
             return;
@@ -932,8 +921,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           step2_5 = mult_round_shift(&t2, &t3, &k__cospi_p08_m24,
                                      &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6,
-                                             &step2_5);
+          overflow =
+              check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5);
           if (overflow) {
             vpx_highbd_fdct16x16_c(input, output, stride);
             return;
@@ -951,10 +940,9 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
           step1_6 = SUB_EPI16(step3_7, step2_6);
           step1_7 = ADD_EPI16(step3_7, step2_6);
 #if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&step1_0, &step1_1,
-                                             &step1_2, &step1_3,
-                                             &step1_4, &step1_5,
-                                             &step1_6, &step1_7);
+          overflow =
+              check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
+                                      &step1_4, &step1_5, &step1_6, &step1_7);
           if (overflow) {
             vpx_highbd_fdct16x16_c(input, output, stride);
             return;
@@ -1006,16 +994,14 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
         }
       }
       // Transpose the results, do it as two 8x8 transposes.
-      transpose_and_output8x8(&res00, &res01, &res02, &res03,
-                              &res04, &res05, &res06, &res07,
-                              pass, out0, out1);
-      transpose_and_output8x8(&res08, &res09, &res10, &res11,
-                              &res12, &res13, &res14, &res15,
-                              pass, out0 + 8, out1 + 8);
+      transpose_and_output8x8(&res00, &res01, &res02, &res03, &res04, &res05,
+                              &res06, &res07, pass, out0, out1);
+      transpose_and_output8x8(&res08, &res09, &res10, &res11, &res12, &res13,
+                              &res14, &res15, pass, out0 + 8, out1 + 8);
       if (pass == 0) {
-        out0 += 8*16;
+        out0 += 8 * 16;
       } else {
-        out1 += 8*16;
+        out1 += 8 * 16;
       }
     }
     // Setup in/out for next pass.
diff --git a/vpx_dsp/x86/fwd_txfm_sse2.c b/vpx_dsp/x86/fwd_txfm_sse2.c
index 8a01bb7694f6460151b4187ccd3f959096916d51..60da47e6a183b29236f0112d4f18d1ea4278054a 100644
--- a/vpx_dsp/x86/fwd_txfm_sse2.c
+++ b/vpx_dsp/x86/fwd_txfm_sse2.c
@@ -18,12 +18,12 @@ void vpx_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0, in1;
   __m128i tmp;
   const __m128i zero = _mm_setzero_si128();
-  in0  = _mm_loadl_epi64((const __m128i *)(input +  0 * stride));
-  in1  = _mm_loadl_epi64((const __m128i *)(input +  1 * stride));
-  in1  = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *)
-         (input +  2 * stride)));
-  in0  = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *)
-         (input +  3 * stride)));
+  in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+  in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+  in1 = _mm_unpacklo_epi64(
+      in1, _mm_loadl_epi64((const __m128i *)(input + 2 * stride)));
+  in0 = _mm_unpacklo_epi64(
+      in0, _mm_loadl_epi64((const __m128i *)(input + 3 * stride)));
 
   tmp = _mm_add_epi16(in0, in1);
   in0 = _mm_unpacklo_epi16(zero, tmp);
@@ -44,19 +44,19 @@ void vpx_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
 }
 
 void vpx_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
-  __m128i in0  = _mm_load_si128((const __m128i *)(input + 0 * stride));
-  __m128i in1  = _mm_load_si128((const __m128i *)(input + 1 * stride));
-  __m128i in2  = _mm_load_si128((const __m128i *)(input + 2 * stride));
-  __m128i in3  = _mm_load_si128((const __m128i *)(input + 3 * stride));
+  __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+  __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+  __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
   __m128i u0, u1, sum;
 
   u0 = _mm_add_epi16(in0, in1);
   u1 = _mm_add_epi16(in2, in3);
 
-  in0  = _mm_load_si128((const __m128i *)(input + 4 * stride));
-  in1  = _mm_load_si128((const __m128i *)(input + 5 * stride));
-  in2  = _mm_load_si128((const __m128i *)(input + 6 * stride));
-  in3  = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  in0 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+  in1 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+  in2 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+  in3 = _mm_load_si128((const __m128i *)(input + 7 * stride));
 
   sum = _mm_add_epi16(u0, u1);
 
@@ -64,7 +64,7 @@ void vpx_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   in2 = _mm_add_epi16(in2, in3);
   sum = _mm_add_epi16(sum, in0);
 
-  u0  = _mm_setzero_si128();
+  u0 = _mm_setzero_si128();
   sum = _mm_add_epi16(sum, in2);
 
   in0 = _mm_unpacklo_epi16(u0, sum);
@@ -92,49 +92,49 @@ void vpx_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
 
   for (i = 0; i < 2; ++i) {
     input += 8 * i;
-    in0  = _mm_load_si128((const __m128i *)(input +  0 * stride));
-    in1  = _mm_load_si128((const __m128i *)(input +  1 * stride));
-    in2  = _mm_load_si128((const __m128i *)(input +  2 * stride));
-    in3  = _mm_load_si128((const __m128i *)(input +  3 * stride));
+    in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+    in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+    in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+    in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
 
     u0 = _mm_add_epi16(in0, in1);
     u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  4 * stride));
-    in1  = _mm_load_si128((const __m128i *)(input +  5 * stride));
-    in2  = _mm_load_si128((const __m128i *)(input +  6 * stride));
-    in3  = _mm_load_si128((const __m128i *)(input +  7 * stride));
+    in0 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+    in1 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+    in2 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+    in3 = _mm_load_si128((const __m128i *)(input + 7 * stride));
 
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  8 * stride));
-    in1  = _mm_load_si128((const __m128i *)(input +  9 * stride));
-    in2  = _mm_load_si128((const __m128i *)(input + 10 * stride));
-    in3  = _mm_load_si128((const __m128i *)(input + 11 * stride));
+    in0 = _mm_load_si128((const __m128i *)(input + 8 * stride));
+    in1 = _mm_load_si128((const __m128i *)(input + 9 * stride));
+    in2 = _mm_load_si128((const __m128i *)(input + 10 * stride));
+    in3 = _mm_load_si128((const __m128i *)(input + 11 * stride));
 
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input + 12 * stride));
-    in1  = _mm_load_si128((const __m128i *)(input + 13 * stride));
-    in2  = _mm_load_si128((const __m128i *)(input + 14 * stride));
-    in3  = _mm_load_si128((const __m128i *)(input + 15 * stride));
+    in0 = _mm_load_si128((const __m128i *)(input + 12 * stride));
+    in1 = _mm_load_si128((const __m128i *)(input + 13 * stride));
+    in2 = _mm_load_si128((const __m128i *)(input + 14 * stride));
+    in3 = _mm_load_si128((const __m128i *)(input + 15 * stride));
 
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
     sum = _mm_add_epi16(sum, u1);
   }
 
-  u0  = _mm_setzero_si128();
+  u0 = _mm_setzero_si128();
   in0 = _mm_unpacklo_epi16(u0, sum);
   in1 = _mm_unpackhi_epi16(u0, sum);
   in0 = _mm_srai_epi32(in0, 16);
@@ -160,53 +160,53 @@ void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
   int i;
 
   for (i = 0; i < 8; ++i) {
-    in0  = _mm_load_si128((const __m128i *)(input +  0));
-    in1  = _mm_load_si128((const __m128i *)(input +  8));
-    in2  = _mm_load_si128((const __m128i *)(input + 16));
-    in3  = _mm_load_si128((const __m128i *)(input + 24));
+    in0 = _mm_load_si128((const __m128i *)(input + 0));
+    in1 = _mm_load_si128((const __m128i *)(input + 8));
+    in2 = _mm_load_si128((const __m128i *)(input + 16));
+    in3 = _mm_load_si128((const __m128i *)(input + 24));
 
     input += stride;
     u0 = _mm_add_epi16(in0, in1);
     u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  0));
-    in1  = _mm_load_si128((const __m128i *)(input +  8));
-    in2  = _mm_load_si128((const __m128i *)(input + 16));
-    in3  = _mm_load_si128((const __m128i *)(input + 24));
+    in0 = _mm_load_si128((const __m128i *)(input + 0));
+    in1 = _mm_load_si128((const __m128i *)(input + 8));
+    in2 = _mm_load_si128((const __m128i *)(input + 16));
+    in3 = _mm_load_si128((const __m128i *)(input + 24));
 
     input += stride;
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  0));
-    in1  = _mm_load_si128((const __m128i *)(input +  8));
-    in2  = _mm_load_si128((const __m128i *)(input + 16));
-    in3  = _mm_load_si128((const __m128i *)(input + 24));
+    in0 = _mm_load_si128((const __m128i *)(input + 0));
+    in1 = _mm_load_si128((const __m128i *)(input + 8));
+    in2 = _mm_load_si128((const __m128i *)(input + 16));
+    in3 = _mm_load_si128((const __m128i *)(input + 24));
 
     input += stride;
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
-    in0  = _mm_load_si128((const __m128i *)(input +  0));
-    in1  = _mm_load_si128((const __m128i *)(input +  8));
-    in2  = _mm_load_si128((const __m128i *)(input + 16));
-    in3  = _mm_load_si128((const __m128i *)(input + 24));
+    in0 = _mm_load_si128((const __m128i *)(input + 0));
+    in1 = _mm_load_si128((const __m128i *)(input + 8));
+    in2 = _mm_load_si128((const __m128i *)(input + 16));
+    in3 = _mm_load_si128((const __m128i *)(input + 24));
 
     input += stride;
     sum = _mm_add_epi16(sum, u1);
-    u0  = _mm_add_epi16(in0, in1);
-    u1  = _mm_add_epi16(in2, in3);
+    u0 = _mm_add_epi16(in0, in1);
+    u1 = _mm_add_epi16(in2, in3);
     sum = _mm_add_epi16(sum, u0);
 
     sum = _mm_add_epi16(sum, u1);
   }
 
-  u0  = _mm_setzero_si128();
+  u0 = _mm_setzero_si128();
   in0 = _mm_unpacklo_epi16(u0, sum);
   in1 = _mm_unpackhi_epi16(u0, sum);
   in0 = _mm_srai_epi32(in0, 16);
@@ -229,43 +229,43 @@ void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
 #define FDCT8x8_2D vpx_fdct8x8_sse2
 #define FDCT16x16_2D vpx_fdct16x16_sse2
 #include "vpx_dsp/x86/fwd_txfm_impl_sse2.h"
-#undef  FDCT4x4_2D
-#undef  FDCT8x8_2D
-#undef  FDCT16x16_2D
+#undef FDCT4x4_2D
+#undef FDCT8x8_2D
+#undef FDCT16x16_2D
 
 #define FDCT32x32_2D vpx_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
 #include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h"
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
 
 #define FDCT32x32_2D vpx_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
 #include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h"  // NOLINT
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
-#undef  DCT_HIGH_BIT_DEPTH
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
+#undef DCT_HIGH_BIT_DEPTH
 
 #if CONFIG_VPX_HIGHBITDEPTH
 #define DCT_HIGH_BIT_DEPTH 1
 #define FDCT4x4_2D vpx_highbd_fdct4x4_sse2
 #define FDCT8x8_2D vpx_highbd_fdct8x8_sse2
 #define FDCT16x16_2D vpx_highbd_fdct16x16_sse2
-#include "vpx_dsp/x86/fwd_txfm_impl_sse2.h" // NOLINT
-#undef  FDCT4x4_2D
-#undef  FDCT8x8_2D
-#undef  FDCT16x16_2D
+#include "vpx_dsp/x86/fwd_txfm_impl_sse2.h"  // NOLINT
+#undef FDCT4x4_2D
+#undef FDCT8x8_2D
+#undef FDCT16x16_2D
 
 #define FDCT32x32_2D vpx_highbd_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
-#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
+#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h"  // NOLINT
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
 
 #define FDCT32x32_2D vpx_highbd_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
-#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
-#undef  DCT_HIGH_BIT_DEPTH
+#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h"  // NOLINT
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
+#undef DCT_HIGH_BIT_DEPTH
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vpx_dsp/x86/fwd_txfm_sse2.h b/vpx_dsp/x86/fwd_txfm_sse2.h
index cb77bc5cf8c770d460eab5e0ce5d2407b8337a0e..0e4401ff53ede41f86a53640222e5e5a8b907811 100644
--- a/vpx_dsp/x86/fwd_txfm_sse2.h
+++ b/vpx_dsp/x86/fwd_txfm_sse2.h
@@ -63,99 +63,57 @@ static INLINE int check_epi16_overflow_x4(const __m128i *preg0,
   return _mm_movemask_epi8(cmp0);
 }
 
-static INLINE int check_epi16_overflow_x8(const __m128i *preg0,
-                                          const __m128i *preg1,
-                                          const __m128i *preg2,
-                                          const __m128i *preg3,
-                                          const __m128i *preg4,
-                                          const __m128i *preg5,
-                                          const __m128i *preg6,
-                                          const __m128i *preg7) {
+static INLINE int check_epi16_overflow_x8(
+    const __m128i *preg0, const __m128i *preg1, const __m128i *preg2,
+    const __m128i *preg3, const __m128i *preg4, const __m128i *preg5,
+    const __m128i *preg6, const __m128i *preg7) {
   int res0, res1;
   res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3);
   res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7);
   return res0 + res1;
 }
 
-static INLINE int check_epi16_overflow_x12(const __m128i *preg0,
-                                           const __m128i *preg1,
-                                           const __m128i *preg2,
-                                           const __m128i *preg3,
-                                           const __m128i *preg4,
-                                           const __m128i *preg5,
-                                           const __m128i *preg6,
-                                           const __m128i *preg7,
-                                           const __m128i *preg8,
-                                           const __m128i *preg9,
-                                           const __m128i *preg10,
-                                           const __m128i *preg11) {
+static INLINE int check_epi16_overflow_x12(
+    const __m128i *preg0, const __m128i *preg1, const __m128i *preg2,
+    const __m128i *preg3, const __m128i *preg4, const __m128i *preg5,
+    const __m128i *preg6, const __m128i *preg7, const __m128i *preg8,
+    const __m128i *preg9, const __m128i *preg10, const __m128i *preg11) {
   int res0, res1;
   res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3);
   res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7);
-  if (!res0)
-    res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11);
+  if (!res0) res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11);
   return res0 + res1;
 }
 
-static INLINE int check_epi16_overflow_x16(const __m128i *preg0,
-                                           const __m128i *preg1,
-                                           const __m128i *preg2,
-                                           const __m128i *preg3,
-                                           const __m128i *preg4,
-                                           const __m128i *preg5,
-                                           const __m128i *preg6,
-                                           const __m128i *preg7,
-                                           const __m128i *preg8,
-                                           const __m128i *preg9,
-                                           const __m128i *preg10,
-                                           const __m128i *preg11,
-                                           const __m128i *preg12,
-                                           const __m128i *preg13,
-                                           const __m128i *preg14,
-                                           const __m128i *preg15) {
+static INLINE int check_epi16_overflow_x16(
+    const __m128i *preg0, const __m128i *preg1, const __m128i *preg2,
+    const __m128i *preg3, const __m128i *preg4, const __m128i *preg5,
+    const __m128i *preg6, const __m128i *preg7, const __m128i *preg8,
+    const __m128i *preg9, const __m128i *preg10, const __m128i *preg11,
+    const __m128i *preg12, const __m128i *preg13, const __m128i *preg14,
+    const __m128i *preg15) {
   int res0, res1;
   res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3);
   res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7);
   if (!res0) {
     res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11);
-    if (!res1)
-      res1 = check_epi16_overflow_x4(preg12, preg13, preg14, preg15);
+    if (!res1) res1 = check_epi16_overflow_x4(preg12, preg13, preg14, preg15);
   }
   return res0 + res1;
 }
 
-static INLINE int check_epi16_overflow_x32(const __m128i *preg0,
-                                           const __m128i *preg1,
-                                           const __m128i *preg2,
-                                           const __m128i *preg3,
-                                           const __m128i *preg4,
-                                           const __m128i *preg5,
-                                           const __m128i *preg6,
-                                           const __m128i *preg7,
-                                           const __m128i *preg8,
-                                           const __m128i *preg9,
-                                           const __m128i *preg10,
-                                           const __m128i *preg11,
-                                           const __m128i *preg12,
-                                           const __m128i *preg13,
-                                           const __m128i *preg14,
-                                           const __m128i *preg15,
-                                           const __m128i *preg16,
-                                           const __m128i *preg17,
-                                           const __m128i *preg18,
-                                           const __m128i *preg19,
-                                           const __m128i *preg20,
-                                           const __m128i *preg21,
-                                           const __m128i *preg22,
-                                           const __m128i *preg23,
-                                           const __m128i *preg24,
-                                           const __m128i *preg25,
-                                           const __m128i *preg26,
-                                           const __m128i *preg27,
-                                           const __m128i *preg28,
-                                           const __m128i *preg29,
-                                           const __m128i *preg30,
-                                           const __m128i *preg31) {
+static INLINE int check_epi16_overflow_x32(
+    const __m128i *preg0, const __m128i *preg1, const __m128i *preg2,
+    const __m128i *preg3, const __m128i *preg4, const __m128i *preg5,
+    const __m128i *preg6, const __m128i *preg7, const __m128i *preg8,
+    const __m128i *preg9, const __m128i *preg10, const __m128i *preg11,
+    const __m128i *preg12, const __m128i *preg13, const __m128i *preg14,
+    const __m128i *preg15, const __m128i *preg16, const __m128i *preg17,
+    const __m128i *preg18, const __m128i *preg19, const __m128i *preg20,
+    const __m128i *preg21, const __m128i *preg22, const __m128i *preg23,
+    const __m128i *preg24, const __m128i *preg25, const __m128i *preg26,
+    const __m128i *preg27, const __m128i *preg28, const __m128i *preg29,
+    const __m128i *preg30, const __m128i *preg31) {
   int res0, res1;
   res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3);
   res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7);
@@ -190,36 +148,31 @@ static INLINE int k_check_epi32_overflow_4(const __m128i *preg0,
   __m128i reg1_shifted = _mm_slli_epi64(*preg1, 1);
   __m128i reg2_shifted = _mm_slli_epi64(*preg2, 1);
   __m128i reg3_shifted = _mm_slli_epi64(*preg3, 1);
-  __m128i reg0_top_dwords = _mm_shuffle_epi32(
-      reg0_shifted, _MM_SHUFFLE(0, 0, 3, 1));
-  __m128i reg1_top_dwords = _mm_shuffle_epi32(
-      reg1_shifted, _MM_SHUFFLE(0, 0, 3, 1));
-  __m128i reg2_top_dwords = _mm_shuffle_epi32(
-      reg2_shifted, _MM_SHUFFLE(0, 0, 3, 1));
-  __m128i reg3_top_dwords = _mm_shuffle_epi32(
-      reg3_shifted, _MM_SHUFFLE(0, 0, 3, 1));
+  __m128i reg0_top_dwords =
+      _mm_shuffle_epi32(reg0_shifted, _MM_SHUFFLE(0, 0, 3, 1));
+  __m128i reg1_top_dwords =
+      _mm_shuffle_epi32(reg1_shifted, _MM_SHUFFLE(0, 0, 3, 1));
+  __m128i reg2_top_dwords =
+      _mm_shuffle_epi32(reg2_shifted, _MM_SHUFFLE(0, 0, 3, 1));
+  __m128i reg3_top_dwords =
+      _mm_shuffle_epi32(reg3_shifted, _MM_SHUFFLE(0, 0, 3, 1));
   __m128i top_dwords_01 = _mm_unpacklo_epi64(reg0_top_dwords, reg1_top_dwords);
   __m128i top_dwords_23 = _mm_unpacklo_epi64(reg2_top_dwords, reg3_top_dwords);
   __m128i valid_positve_01 = _mm_cmpeq_epi32(top_dwords_01, *zero);
   __m128i valid_positve_23 = _mm_cmpeq_epi32(top_dwords_23, *zero);
   __m128i valid_negative_01 = _mm_cmpeq_epi32(top_dwords_01, minus_one);
   __m128i valid_negative_23 = _mm_cmpeq_epi32(top_dwords_23, minus_one);
-  int overflow_01 = _mm_movemask_epi8(
-      _mm_cmpeq_epi32(valid_positve_01, valid_negative_01));
-  int overflow_23 = _mm_movemask_epi8(
-      _mm_cmpeq_epi32(valid_positve_23, valid_negative_23));
+  int overflow_01 =
+      _mm_movemask_epi8(_mm_cmpeq_epi32(valid_positve_01, valid_negative_01));
+  int overflow_23 =
+      _mm_movemask_epi8(_mm_cmpeq_epi32(valid_positve_23, valid_negative_23));
   return (overflow_01 + overflow_23);
 }
 
-static INLINE int k_check_epi32_overflow_8(const __m128i *preg0,
-                                           const __m128i *preg1,
-                                           const __m128i *preg2,
-                                           const __m128i *preg3,
-                                           const __m128i *preg4,
-                                           const __m128i *preg5,
-                                           const __m128i *preg6,
-                                           const __m128i *preg7,
-                                           const __m128i *zero) {
+static INLINE int k_check_epi32_overflow_8(
+    const __m128i *preg0, const __m128i *preg1, const __m128i *preg2,
+    const __m128i *preg3, const __m128i *preg4, const __m128i *preg5,
+    const __m128i *preg6, const __m128i *preg7, const __m128i *zero) {
   int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero);
   if (!overflow) {
     overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero);
@@ -227,91 +180,59 @@ static INLINE int k_check_epi32_overflow_8(const __m128i *preg0,
   return overflow;
 }
 
-static INLINE int k_check_epi32_overflow_16(const __m128i *preg0,
-                                            const __m128i *preg1,
-                                            const __m128i *preg2,
-                                            const __m128i *preg3,
-                                            const __m128i *preg4,
-                                            const __m128i *preg5,
-                                            const __m128i *preg6,
-                                            const __m128i *preg7,
-                                            const __m128i *preg8,
-                                            const __m128i *preg9,
-                                            const __m128i *preg10,
-                                            const __m128i *preg11,
-                                            const __m128i *preg12,
-                                            const __m128i *preg13,
-                                            const __m128i *preg14,
-                                            const __m128i *preg15,
-                                            const __m128i *zero) {
+static INLINE int k_check_epi32_overflow_16(
+    const __m128i *preg0, const __m128i *preg1, const __m128i *preg2,
+    const __m128i *preg3, const __m128i *preg4, const __m128i *preg5,
+    const __m128i *preg6, const __m128i *preg7, const __m128i *preg8,
+    const __m128i *preg9, const __m128i *preg10, const __m128i *preg11,
+    const __m128i *preg12, const __m128i *preg13, const __m128i *preg14,
+    const __m128i *preg15, const __m128i *zero) {
   int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero);
   if (!overflow) {
     overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero);
     if (!overflow) {
-      overflow = k_check_epi32_overflow_4(preg8, preg9, preg10, preg11,
-                                          zero);
+      overflow = k_check_epi32_overflow_4(preg8, preg9, preg10, preg11, zero);
       if (!overflow) {
-        overflow = k_check_epi32_overflow_4(preg12, preg13, preg14, preg15,
-                                            zero);
+        overflow =
+            k_check_epi32_overflow_4(preg12, preg13, preg14, preg15, zero);
       }
     }
   }
   return overflow;
 }
 
-static INLINE int k_check_epi32_overflow_32(const __m128i *preg0,
-                                            const __m128i *preg1,
-                                            const __m128i *preg2,
-                                            const __m128i *preg3,
-                                            const __m128i *preg4,
-                                            const __m128i *preg5,
-                                            const __m128i *preg6,
-                                            const __m128i *preg7,
-                                            const __m128i *preg8,
-                                            const __m128i *preg9,
-                                            const __m128i *preg10,
-                                            const __m128i *preg11,
-                                            const __m128i *preg12,
-                                            const __m128i *preg13,
-                                            const __m128i *preg14,
-                                            const __m128i *preg15,
-                                            const __m128i *preg16,
-                                            const __m128i *preg17,
-                                            const __m128i *preg18,
-                                            const __m128i *preg19,
-                                            const __m128i *preg20,
-                                            const __m128i *preg21,
-                                            const __m128i *preg22,
-                                            const __m128i *preg23,
-                                            const __m128i *preg24,
-                                            const __m128i *preg25,
-                                            const __m128i *preg26,
-                                            const __m128i *preg27,
-                                            const __m128i *preg28,
-                                            const __m128i *preg29,
-                                            const __m128i *preg30,
-                                            const __m128i *preg31,
-                                            const __m128i *zero) {
+static INLINE int k_check_epi32_overflow_32(
+    const __m128i *preg0, const __m128i *preg1, const __m128i *preg2,
+    const __m128i *preg3, const __m128i *preg4, const __m128i *preg5,
+    const __m128i *preg6, const __m128i *preg7, const __m128i *preg8,
+    const __m128i *preg9, const __m128i *preg10, const __m128i *preg11,
+    const __m128i *preg12, const __m128i *preg13, const __m128i *preg14,
+    const __m128i *preg15, const __m128i *preg16, const __m128i *preg17,
+    const __m128i *preg18, const __m128i *preg19, const __m128i *preg20,
+    const __m128i *preg21, const __m128i *preg22, const __m128i *preg23,
+    const __m128i *preg24, const __m128i *preg25, const __m128i *preg26,
+    const __m128i *preg27, const __m128i *preg28, const __m128i *preg29,
+    const __m128i *preg30, const __m128i *preg31, const __m128i *zero) {
   int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero);
   if (!overflow) {
     overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero);
     if (!overflow) {
       overflow = k_check_epi32_overflow_4(preg8, preg9, preg10, preg11, zero);
       if (!overflow) {
-        overflow = k_check_epi32_overflow_4(preg12, preg13, preg14, preg15,
-                                            zero);
+        overflow =
+            k_check_epi32_overflow_4(preg12, preg13, preg14, preg15, zero);
         if (!overflow) {
-          overflow = k_check_epi32_overflow_4(preg16, preg17, preg18, preg19,
-                                              zero);
+          overflow =
+              k_check_epi32_overflow_4(preg16, preg17, preg18, preg19, zero);
           if (!overflow) {
-            overflow = k_check_epi32_overflow_4(preg20, preg21,
-                                                preg22, preg23, zero);
+            overflow =
+                k_check_epi32_overflow_4(preg20, preg21, preg22, preg23, zero);
             if (!overflow) {
-              overflow = k_check_epi32_overflow_4(preg24, preg25,
-                                                  preg26, preg27, zero);
+              overflow = k_check_epi32_overflow_4(preg24, preg25, preg26,
+                                                  preg27, zero);
               if (!overflow) {
-                overflow = k_check_epi32_overflow_4(preg28, preg29,
-                                                    preg30, preg31, zero);
+                overflow = k_check_epi32_overflow_4(preg28, preg29, preg30,
+                                                    preg31, zero);
               }
             }
           }
@@ -322,7 +243,7 @@ static INLINE int k_check_epi32_overflow_32(const __m128i *preg0,
   return overflow;
 }
 
-static INLINE void store_output(const __m128i *poutput, tran_low_t* dst_ptr) {
+static INLINE void store_output(const __m128i *poutput, tran_low_t *dst_ptr) {
 #if CONFIG_VPX_HIGHBITDEPTH
   const __m128i zero = _mm_setzero_si128();
   const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
@@ -335,7 +256,7 @@ static INLINE void store_output(const __m128i *poutput, tran_low_t* dst_ptr) {
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 }
 
-static INLINE void storeu_output(const __m128i *poutput, tran_low_t* dst_ptr) {
+static INLINE void storeu_output(const __m128i *poutput, tran_low_t *dst_ptr) {
 #if CONFIG_VPX_HIGHBITDEPTH
   const __m128i zero = _mm_setzero_si128();
   const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
@@ -348,9 +269,7 @@ static INLINE void storeu_output(const __m128i *poutput, tran_low_t* dst_ptr) {
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 }
 
-
-static INLINE __m128i mult_round_shift(const __m128i *pin0,
-                                       const __m128i *pin1,
+static INLINE __m128i mult_round_shift(const __m128i *pin0, const __m128i *pin1,
                                        const __m128i *pmultiplier,
                                        const __m128i *prounding,
                                        const int shift) {
@@ -364,12 +283,10 @@ static INLINE __m128i mult_round_shift(const __m128i *pin0,
 }
 
 static INLINE void transpose_and_output8x8(
-    const __m128i *pin00, const __m128i *pin01,
-    const __m128i *pin02, const __m128i *pin03,
-    const __m128i *pin04, const __m128i *pin05,
-    const __m128i *pin06, const __m128i *pin07,
-    const int pass, int16_t* out0_ptr,
-    tran_low_t* out1_ptr) {
+    const __m128i *pin00, const __m128i *pin01, const __m128i *pin02,
+    const __m128i *pin03, const __m128i *pin04, const __m128i *pin05,
+    const __m128i *pin06, const __m128i *pin07, const int pass,
+    int16_t *out0_ptr, tran_low_t *out1_ptr) {
   // 00 01 02 03 04 05 06 07
   // 10 11 12 13 14 15 16 17
   // 20 21 22 23 24 25 26 27
@@ -427,14 +344,14 @@ static INLINE void transpose_and_output8x8(
   // 06 16 26 36 46 56 66 76
   // 07 17 27 37 47 57 67 77
   if (pass == 0) {
-    _mm_storeu_si128((__m128i*)(out0_ptr + 0 * 16), tr2_0);
-    _mm_storeu_si128((__m128i*)(out0_ptr + 1 * 16), tr2_1);
-    _mm_storeu_si128((__m128i*)(out0_ptr + 2 * 16), tr2_2);
-    _mm_storeu_si128((__m128i*)(out0_ptr + 3 * 16), tr2_3);
-    _mm_storeu_si128((__m128i*)(out0_ptr + 4 * 16), tr2_4);
-    _mm_storeu_si128((__m128i*)(out0_ptr + 5 * 16), tr2_5);
-    _mm_storeu_si128((__m128i*)(out0_ptr + 6 * 16), tr2_6);
-    _mm_storeu_si128((__m128i*)(out0_ptr + 7 * 16), tr2_7);
+    _mm_storeu_si128((__m128i *)(out0_ptr + 0 * 16), tr2_0);
+    _mm_storeu_si128((__m128i *)(out0_ptr + 1 * 16), tr2_1);
+    _mm_storeu_si128((__m128i *)(out0_ptr + 2 * 16), tr2_2);
+    _mm_storeu_si128((__m128i *)(out0_ptr + 3 * 16), tr2_3);
+    _mm_storeu_si128((__m128i *)(out0_ptr + 4 * 16), tr2_4);
+    _mm_storeu_si128((__m128i *)(out0_ptr + 5 * 16), tr2_5);
+    _mm_storeu_si128((__m128i *)(out0_ptr + 6 * 16), tr2_6);
+    _mm_storeu_si128((__m128i *)(out0_ptr + 7 * 16), tr2_7);
   } else {
     storeu_output(&tr2_0, (out1_ptr + 0 * 16));
     storeu_output(&tr2_1, (out1_ptr + 1 * 16));
diff --git a/vpx_dsp/x86/halfpix_variance_sse2.c b/vpx_dsp/x86/halfpix_variance_sse2.c
index 5782155bf8a6bd698844d6997317dffa048b72f6..2a2bfebad2ca91d1af88ba028daf26f0901a11a1 100644
--- a/vpx_dsp/x86/halfpix_variance_sse2.c
+++ b/vpx_dsp/x86/halfpix_variance_sse2.c
@@ -15,10 +15,8 @@
 void vpx_half_horiz_vert_variance16x_h_sse2(const unsigned char *ref,
                                             int ref_stride,
                                             const unsigned char *src,
-                                            int src_stride,
-                                            unsigned int height,
-                                            int *sum,
-                                            unsigned int *sumsquared);
+                                            int src_stride, unsigned int height,
+                                            int *sum, unsigned int *sumsquared);
 void vpx_half_horiz_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
                                        const unsigned char *src, int src_stride,
                                        unsigned int height, int *sum,
@@ -31,8 +29,7 @@ void vpx_half_vert_variance16x_h_sse2(const unsigned char *ref, int ref_stride,
 uint32_t vpx_variance_halfpixvar16x16_h_sse2(const unsigned char *src,
                                              int src_stride,
                                              const unsigned char *dst,
-                                             int dst_stride,
-                                             uint32_t *sse) {
+                                             int dst_stride, uint32_t *sse) {
   int xsum0;
   unsigned int xxsum0;
 
@@ -46,23 +43,20 @@ uint32_t vpx_variance_halfpixvar16x16_h_sse2(const unsigned char *src,
 uint32_t vpx_variance_halfpixvar16x16_v_sse2(const unsigned char *src,
                                              int src_stride,
                                              const unsigned char *dst,
-                                             int dst_stride,
-                                             uint32_t *sse) {
+                                             int dst_stride, uint32_t *sse) {
   int xsum0;
   unsigned int xxsum0;
-  vpx_half_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16,
-                                   &xsum0, &xxsum0);
+  vpx_half_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, &xsum0,
+                                   &xxsum0);
 
   *sse = xxsum0;
   return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8));
 }
 
-
 uint32_t vpx_variance_halfpixvar16x16_hv_sse2(const unsigned char *src,
                                               int src_stride,
                                               const unsigned char *dst,
-                                              int dst_stride,
-                                              uint32_t *sse) {
+                                              int dst_stride, uint32_t *sse) {
   int xsum0;
   unsigned int xxsum0;
 
diff --git a/vpx_dsp/x86/highbd_loopfilter_sse2.c b/vpx_dsp/x86/highbd_loopfilter_sse2.c
index c4fd5e1a02b41e7024ff127fc2a3503c6f82c818..98fed851883074f53dd73924467ef5cb4ffc2690 100644
--- a/vpx_dsp/x86/highbd_loopfilter_sse2.c
+++ b/vpx_dsp/x86/highbd_loopfilter_sse2.c
@@ -25,16 +25,13 @@ static INLINE __m128i signed_char_clamp_bd_sse2(__m128i value, int bd) {
 
   if (bd == 8) {
     t80 = _mm_set1_epi16(0x80);
-    max = _mm_subs_epi16(
-              _mm_subs_epi16(_mm_slli_epi16(one, 8), one), t80);
+    max = _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, 8), one), t80);
   } else if (bd == 10) {
     t80 = _mm_set1_epi16(0x200);
-    max = _mm_subs_epi16(
-              _mm_subs_epi16(_mm_slli_epi16(one, 10), one), t80);
+    max = _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, 10), one), t80);
   } else {  // bd == 12
     t80 = _mm_set1_epi16(0x800);
-    max = _mm_subs_epi16(
-              _mm_subs_epi16(_mm_slli_epi16(one, 12), one), t80);
+    max = _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, 12), one), t80);
   }
 
   min = _mm_subs_epi16(zero, t80);
@@ -51,8 +48,7 @@ static INLINE __m128i signed_char_clamp_bd_sse2(__m128i value, int bd) {
 
 // TODO(debargha, peter): Break up large functions into smaller ones
 // in this file.
-static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
-                                                   int p,
+static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, int p,
                                                    const uint8_t *_blimit,
                                                    const uint8_t *_limit,
                                                    const uint8_t *_thresh,
@@ -83,16 +79,16 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
     blimit = _mm_slli_epi16(
         _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2);
     limit = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2);
     thresh = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2);
   } else {  // bd == 12
     blimit = _mm_slli_epi16(
         _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4);
     limit = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4);
     thresh = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4);
   }
 
   q4 = _mm_load_si128((__m128i *)(s + 4 * p));
@@ -120,25 +116,22 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
   hev = _mm_subs_epu16(flat, thresh);
   hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff);
 
-  abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0);  // abs(p0 - q0) * 2
-  abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);  // abs(p1 - q1) / 2
+  abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);  // abs(p0 - q0) * 2
+  abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);         // abs(p1 - q1) / 2
   mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit);
   mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
   mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one));
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p1, p0),
-                                    _mm_subs_epu16(p0, p1)),
-                       _mm_or_si128(_mm_subs_epu16(q1, q0),
-                                    _mm_subs_epu16(q0, q1)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)),
+      _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1)));
   mask = _mm_max_epi16(work, mask);
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1),
-                                    _mm_subs_epu16(p1, p2)),
-                       _mm_or_si128(_mm_subs_epu16(q2, q1),
-                                    _mm_subs_epu16(q1, q2)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p2, p1), _mm_subs_epu16(p1, p2)),
+      _mm_or_si128(_mm_subs_epu16(q2, q1), _mm_subs_epu16(q1, q2)));
   mask = _mm_max_epi16(work, mask);
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p2),
-                                    _mm_subs_epu16(p2, p3)),
-                       _mm_or_si128(_mm_subs_epu16(q3, q2),
-                                    _mm_subs_epu16(q2, q3)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p3, p2), _mm_subs_epu16(p2, p3)),
+      _mm_or_si128(_mm_subs_epu16(q3, q2), _mm_subs_epu16(q2, q3)));
   mask = _mm_max_epi16(work, mask);
 
   mask = _mm_subs_epu16(mask, limit);
@@ -162,8 +155,8 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
   ps0 = _mm_subs_epi16(p0, t80);
   qs0 = _mm_subs_epi16(q0, t80);
 
-  filt = _mm_and_si128(
-      signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd), hev);
+  filt = _mm_and_si128(signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd),
+                       hev);
   work_a = _mm_subs_epi16(qs0, ps0);
   filt = _mm_adds_epi16(filt, work_a);
   filt = _mm_adds_epi16(filt, work_a);
@@ -177,33 +170,27 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
   filter2 = _mm_srai_epi16(filter2, 0x3);
 
   qs0 = _mm_adds_epi16(
-      signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd),
-      t80);
+      signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd), t80);
   ps0 = _mm_adds_epi16(
-      signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd),
-      t80);
+      signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd), t80);
   filt = _mm_adds_epi16(filter1, t1);
   filt = _mm_srai_epi16(filt, 1);
   filt = _mm_andnot_si128(hev, filt);
-  qs1 = _mm_adds_epi16(
-      signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd),
-      t80);
-  ps1 = _mm_adds_epi16(
-      signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd),
-      t80);
+  qs1 = _mm_adds_epi16(signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd),
+                       t80);
+  ps1 = _mm_adds_epi16(signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd),
+                       t80);
 
   // end highbd_filter4
   // loopfilter done
 
   // highbd_flat_mask4
-  flat = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p0),
-                                    _mm_subs_epu16(p0, p2)),
-                       _mm_or_si128(_mm_subs_epu16(p3, p0),
-                                    _mm_subs_epu16(p0, p3)));
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(q2, q0),
-                                    _mm_subs_epu16(q0, q2)),
-                       _mm_or_si128(_mm_subs_epu16(q3, q0),
-                                    _mm_subs_epu16(q0, q3)));
+  flat = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p2, p0), _mm_subs_epu16(p0, p2)),
+      _mm_or_si128(_mm_subs_epu16(p3, p0), _mm_subs_epu16(p0, p3)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(q2, q0), _mm_subs_epu16(q0, q2)),
+      _mm_or_si128(_mm_subs_epu16(q3, q0), _mm_subs_epu16(q0, q3)));
   flat = _mm_max_epi16(work, flat);
   work = _mm_max_epi16(abs_p1p0, abs_q1q0);
   flat = _mm_max_epi16(work, flat);
@@ -231,27 +218,23 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
 
   // highbd_flat_mask5 (arguments passed in are p0, q0, p4-p7, q4-q7
   // but referred to as p0-p4 & q0-q4 in fn)
-  flat2 = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p4, p0),
-                                     _mm_subs_epu16(p0, p4)),
-                        _mm_or_si128(_mm_subs_epu16(q4, q0),
-                                     _mm_subs_epu16(q0, q4)));
-
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p5, p0),
-                                    _mm_subs_epu16(p0, p5)),
-                       _mm_or_si128(_mm_subs_epu16(q5, q0),
-                                    _mm_subs_epu16(q0, q5)));
+  flat2 = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p4, p0), _mm_subs_epu16(p0, p4)),
+      _mm_or_si128(_mm_subs_epu16(q4, q0), _mm_subs_epu16(q0, q4)));
+
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p5, p0), _mm_subs_epu16(p0, p5)),
+      _mm_or_si128(_mm_subs_epu16(q5, q0), _mm_subs_epu16(q0, q5)));
   flat2 = _mm_max_epi16(work, flat2);
 
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p6, p0),
-                                    _mm_subs_epu16(p0, p6)),
-                       _mm_or_si128(_mm_subs_epu16(q6, q0),
-                                    _mm_subs_epu16(q0, q6)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p6, p0), _mm_subs_epu16(p0, p6)),
+      _mm_or_si128(_mm_subs_epu16(q6, q0), _mm_subs_epu16(q0, q6)));
   flat2 = _mm_max_epi16(work, flat2);
 
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p7, p0),
-                                    _mm_subs_epu16(p0, p7)),
-                       _mm_or_si128(_mm_subs_epu16(q7, q0),
-                                    _mm_subs_epu16(q0, q7)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p7, p0), _mm_subs_epu16(p0, p7)),
+      _mm_or_si128(_mm_subs_epu16(q7, q0), _mm_subs_epu16(q0, q7)));
   flat2 = _mm_max_epi16(work, flat2);
 
   if (bd == 8)
@@ -270,29 +253,26 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
   eight = _mm_set1_epi16(8);
   four = _mm_set1_epi16(4);
 
-  pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6, p5),
-                                _mm_add_epi16(p4, p3));
-  pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6, q5),
-                                _mm_add_epi16(q4, q3));
+  pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6, p5), _mm_add_epi16(p4, p3));
+  pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6, q5), _mm_add_epi16(q4, q3));
 
   pixetFilter_p2p1p0 = _mm_add_epi16(p0, _mm_add_epi16(p2, p1));
   pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
 
   pixetFilter_q2q1q0 = _mm_add_epi16(q0, _mm_add_epi16(q2, q1));
   pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
-  pixelFilter_p = _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p,
-                                                      pixelFilter_q));
-  pixetFilter_p2p1p0 =   _mm_add_epi16(four,
-                                       _mm_add_epi16(pixetFilter_p2p1p0,
-                                                     pixetFilter_q2q1q0));
-  flat2_p0 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                                          _mm_add_epi16(p7, p0)), 4);
-  flat2_q0 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                                          _mm_add_epi16(q7, q0)), 4);
-  flat_p0 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
-                                         _mm_add_epi16(p3, p0)), 3);
-  flat_q0 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
-                                         _mm_add_epi16(q3, q0)), 3);
+  pixelFilter_p =
+      _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, pixelFilter_q));
+  pixetFilter_p2p1p0 = _mm_add_epi16(
+      four, _mm_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0));
+  flat2_p0 =
+      _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, _mm_add_epi16(p7, p0)), 4);
+  flat2_q0 =
+      _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, _mm_add_epi16(q7, q0)), 4);
+  flat_p0 = _mm_srli_epi16(
+      _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(p3, p0)), 3);
+  flat_q0 = _mm_srli_epi16(
+      _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(q3, q0)), 3);
 
   sum_p7 = _mm_add_epi16(p7, p7);
   sum_q7 = _mm_add_epi16(q7, q7);
@@ -308,10 +288,10 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
 
   pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2);
   pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2);
-  flat_p1 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
-                                         _mm_add_epi16(sum_p3, p1)), 3);
-  flat_q1 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0,
-                                         _mm_add_epi16(sum_q3, q1)), 3);
+  flat_p1 = _mm_srli_epi16(
+      _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p1)), 3);
+  flat_q1 = _mm_srli_epi16(
+      _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q1)), 3);
 
   sum_p7 = _mm_add_epi16(sum_p7, p7);
   sum_q7 = _mm_add_epi16(sum_q7, q7);
@@ -320,53 +300,53 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
 
   pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5);
   pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5);
-  flat2_p2 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                                          _mm_add_epi16(sum_p7, p2)), 4);
-  flat2_q2 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                                          _mm_add_epi16(sum_q7, q2)), 4);
+  flat2_p2 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p2)), 4);
+  flat2_q2 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q2)), 4);
 
   pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1);
   pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1);
-  flat_p2 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
-                                         _mm_add_epi16(sum_p3, p2)), 3);
-  flat_q2 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0,
-                                         _mm_add_epi16(sum_q3, q2)), 3);
+  flat_p2 = _mm_srli_epi16(
+      _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p2)), 3);
+  flat_q2 = _mm_srli_epi16(
+      _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q2)), 3);
 
   sum_p7 = _mm_add_epi16(sum_p7, p7);
   sum_q7 = _mm_add_epi16(sum_q7, q7);
   pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4);
   pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4);
-  flat2_p3 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                                          _mm_add_epi16(sum_p7, p3)), 4);
-  flat2_q3 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                                          _mm_add_epi16(sum_q7, q3)), 4);
+  flat2_p3 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p3)), 4);
+  flat2_q3 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q3)), 4);
 
   sum_p7 = _mm_add_epi16(sum_p7, p7);
   sum_q7 = _mm_add_epi16(sum_q7, q7);
   pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3);
   pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3);
-  flat2_p4 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                                          _mm_add_epi16(sum_p7, p4)), 4);
-  flat2_q4 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                                          _mm_add_epi16(sum_q7, q4)), 4);
+  flat2_p4 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p4)), 4);
+  flat2_q4 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q4)), 4);
 
   sum_p7 = _mm_add_epi16(sum_p7, p7);
   sum_q7 = _mm_add_epi16(sum_q7, q7);
   pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2);
   pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2);
-  flat2_p5 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                                          _mm_add_epi16(sum_p7, p5)), 4);
-  flat2_q5 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                                          _mm_add_epi16(sum_q7, q5)), 4);
+  flat2_p5 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p5)), 4);
+  flat2_q5 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q5)), 4);
 
   sum_p7 = _mm_add_epi16(sum_p7, p7);
   sum_q7 = _mm_add_epi16(sum_q7, q7);
   pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1);
   pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1);
-  flat2_p6 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                                          _mm_add_epi16(sum_p7, p6)), 4);
-  flat2_q6 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                                          _mm_add_epi16(sum_q7, q6)), 4);
+  flat2_p6 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p6)), 4);
+  flat2_q6 = _mm_srli_epi16(
+      _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q6)), 4);
 
   //  wide flat
   //  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -496,8 +476,7 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
   _mm_store_si128((__m128i *)(s - 0 * p), q0);
 }
 
-static void highbd_mb_lpf_horizontal_edge_w_sse2_16(uint16_t *s,
-                                                    int p,
+static void highbd_mb_lpf_horizontal_edge_w_sse2_16(uint16_t *s, int p,
                                                     const uint8_t *_blimit,
                                                     const uint8_t *_limit,
                                                     const uint8_t *_thresh,
@@ -511,8 +490,8 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_16(uint16_t *s,
 void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
                                        const uint8_t *_blimit,
                                        const uint8_t *_limit,
-                                       const uint8_t *_thresh,
-                                       int count, int bd) {
+                                       const uint8_t *_thresh, int count,
+                                       int bd) {
   if (count == 1)
     highbd_mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh, bd);
   else
@@ -522,8 +501,8 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
 void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
                                       const uint8_t *_blimit,
                                       const uint8_t *_limit,
-                                      const uint8_t *_thresh,
-                                      int count, int bd) {
+                                      const uint8_t *_thresh, int count,
+                                      int bd) {
   DECLARE_ALIGNED(16, uint16_t, flat_op2[16]);
   DECLARE_ALIGNED(16, uint16_t, flat_op1[16]);
   DECLARE_ALIGNED(16, uint16_t, flat_op0[16]);
@@ -565,19 +544,19 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
     t80 = _mm_set1_epi16(0x80);
   } else if (bd == 10) {
     blimit = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2);
     limit = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2);
     thresh = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2);
     t80 = _mm_set1_epi16(0x200);
   } else {  // bd == 12
     blimit = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4);
     limit = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4);
     thresh = _mm_slli_epi16(
-          _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4);
+        _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4);
     t80 = _mm_set1_epi16(0x800);
   }
 
@@ -587,20 +566,16 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
   qs1 = _mm_subs_epi16(q1, t80);
 
   // filter_mask and hev_mask
-  abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0),
-                          _mm_subs_epu16(p0, p1));
-  abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0),
-                          _mm_subs_epu16(q0, q1));
-
-  abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0),
-                          _mm_subs_epu16(q0, p0));
-  abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1),
-                          _mm_subs_epu16(q1, p1));
+  abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1));
+  abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1));
+
+  abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), _mm_subs_epu16(q0, p0));
+  abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), _mm_subs_epu16(q1, p1));
   flat = _mm_max_epi16(abs_p1p0, abs_q1q0);
   hev = _mm_subs_epu16(flat, thresh);
   hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff);
 
-  abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0);
+  abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
   abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
   mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit);
   mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
@@ -612,28 +587,24 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
   mask = _mm_max_epi16(abs_q1q0, mask);
   // mask |= (abs(q1 - q0) > limit) * -1;
 
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1),
-                                    _mm_subs_epu16(p1, p2)),
-                       _mm_or_si128(_mm_subs_epu16(q2, q1),
-                                    _mm_subs_epu16(q1, q2)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p2, p1), _mm_subs_epu16(p1, p2)),
+      _mm_or_si128(_mm_subs_epu16(q2, q1), _mm_subs_epu16(q1, q2)));
   mask = _mm_max_epi16(work, mask);
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p2),
-                                    _mm_subs_epu16(p2, p3)),
-                       _mm_or_si128(_mm_subs_epu16(q3, q2),
-                                    _mm_subs_epu16(q2, q3)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p3, p2), _mm_subs_epu16(p2, p3)),
+      _mm_or_si128(_mm_subs_epu16(q3, q2), _mm_subs_epu16(q2, q3)));
   mask = _mm_max_epi16(work, mask);
   mask = _mm_subs_epu16(mask, limit);
   mask = _mm_cmpeq_epi16(mask, zero);
 
   // flat_mask4
-  flat = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p0),
-                                    _mm_subs_epu16(p0, p2)),
-                       _mm_or_si128(_mm_subs_epu16(q2, q0),
-                                    _mm_subs_epu16(q0, q2)));
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p0),
-                                    _mm_subs_epu16(p0, p3)),
-                       _mm_or_si128(_mm_subs_epu16(q3, q0),
-                                    _mm_subs_epu16(q0, q3)));
+  flat = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p2, p0), _mm_subs_epu16(p0, p2)),
+      _mm_or_si128(_mm_subs_epu16(q2, q0), _mm_subs_epu16(q0, q2)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p3, p0), _mm_subs_epu16(p0, p3)),
+      _mm_or_si128(_mm_subs_epu16(q3, q0), _mm_subs_epu16(q0, q3)));
   flat = _mm_max_epi16(work, flat);
   flat = _mm_max_epi16(abs_p1p0, flat);
   flat = _mm_max_epi16(abs_q1q0, flat);
@@ -756,24 +727,20 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
   _mm_store_si128((__m128i *)(s + 2 * p), q2);
 }
 
-void vpx_highbd_lpf_horizontal_8_dual_sse2(uint16_t *s, int p,
-                                           const uint8_t *_blimit0,
-                                           const uint8_t *_limit0,
-                                           const uint8_t *_thresh0,
-                                           const uint8_t *_blimit1,
-                                           const uint8_t *_limit1,
-                                           const uint8_t *_thresh1,
-                                           int bd) {
+void vpx_highbd_lpf_horizontal_8_dual_sse2(
+    uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
+    const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
+    const uint8_t *_thresh1, int bd) {
   vpx_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd);
-  vpx_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1,
-                                   1, bd);
+  vpx_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 1,
+                                   bd);
 }
 
 void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
                                       const uint8_t *_blimit,
                                       const uint8_t *_limit,
-                                      const uint8_t *_thresh,
-                                      int count, int bd) {
+                                      const uint8_t *_thresh, int count,
+                                      int bd) {
   const __m128i zero = _mm_set1_epi16(0);
   __m128i blimit, limit, thresh;
   __m128i mask, hev, flat;
@@ -785,16 +752,16 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
   __m128i q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
   __m128i q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
   __m128i q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
-  const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0),
-                                        _mm_subs_epu16(p0, p1));
-  const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0),
-                                        _mm_subs_epu16(q0, q1));
+  const __m128i abs_p1p0 =
+      _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1));
+  const __m128i abs_q1q0 =
+      _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1));
   const __m128i ffff = _mm_cmpeq_epi16(abs_p1p0, abs_p1p0);
   const __m128i one = _mm_set1_epi16(1);
-  __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0),
-                                  _mm_subs_epu16(q0, p0));
-  __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1),
-                                  _mm_subs_epu16(q1, p1));
+  __m128i abs_p0q0 =
+      _mm_or_si128(_mm_subs_epu16(p0, q0), _mm_subs_epu16(q0, p0));
+  __m128i abs_p1q1 =
+      _mm_or_si128(_mm_subs_epu16(p1, q1), _mm_subs_epu16(q1, p1));
   __m128i work;
   const __m128i t4 = _mm_set1_epi16(4);
   const __m128i t3 = _mm_set1_epi16(3);
@@ -860,7 +827,7 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
   hev = _mm_subs_epu16(flat, thresh);
   hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff);
 
-  abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0);
+  abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
   abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
   mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit);
   mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
@@ -870,15 +837,13 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
   mask = _mm_max_epi16(flat, mask);
   // mask |= (abs(p1 - p0) > limit) * -1;
   // mask |= (abs(q1 - q0) > limit) * -1;
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1),
-                                    _mm_subs_epu16(p1, p2)),
-                       _mm_or_si128(_mm_subs_epu16(p3, p2),
-                                    _mm_subs_epu16(p2, p3)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(p2, p1), _mm_subs_epu16(p1, p2)),
+      _mm_or_si128(_mm_subs_epu16(p3, p2), _mm_subs_epu16(p2, p3)));
   mask = _mm_max_epi16(work, mask);
-  work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(q2, q1),
-                                    _mm_subs_epu16(q1, q2)),
-                       _mm_or_si128(_mm_subs_epu16(q3, q2),
-                                    _mm_subs_epu16(q2, q3)));
+  work = _mm_max_epi16(
+      _mm_or_si128(_mm_subs_epu16(q2, q1), _mm_subs_epu16(q1, q2)),
+      _mm_or_si128(_mm_subs_epu16(q3, q2), _mm_subs_epu16(q2, q3)));
   mask = _mm_max_epi16(work, mask);
   mask = _mm_subs_epu16(mask, limit);
   mask = _mm_cmpeq_epi16(mask, zero);
@@ -900,8 +865,8 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
   // Filter1 >> 3
   work_a = _mm_cmpgt_epi16(zero, filter1);  // get the values that are <0
   filter1 = _mm_srli_epi16(filter1, 3);
-  work_a = _mm_and_si128(work_a, tffe0);  // sign bits for the values < 0
-  filter1 = _mm_and_si128(filter1, t1f);  // clamp the range
+  work_a = _mm_and_si128(work_a, tffe0);    // sign bits for the values < 0
+  filter1 = _mm_and_si128(filter1, t1f);    // clamp the range
   filter1 = _mm_or_si128(filter1, work_a);  // reinsert the sign bits
 
   // Filter2 >> 3
@@ -923,12 +888,12 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
 
   q0 = _mm_adds_epi16(
       signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd), t80);
-  q1 = _mm_adds_epi16(
-      signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd), t80);
+  q1 = _mm_adds_epi16(signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd),
+                      t80);
   p0 = _mm_adds_epi16(
       signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd), t80);
-  p1 = _mm_adds_epi16(
-      signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd), t80);
+  p1 = _mm_adds_epi16(signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd),
+                      t80);
 
   _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
   _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
@@ -936,36 +901,39 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
   _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
 }
 
-void vpx_highbd_lpf_horizontal_4_dual_sse2(uint16_t *s, int p,
-                                           const uint8_t *_blimit0,
-                                           const uint8_t *_limit0,
-                                           const uint8_t *_thresh0,
-                                           const uint8_t *_blimit1,
-                                           const uint8_t *_limit1,
-                                           const uint8_t *_thresh1,
-                                           int bd) {
+void vpx_highbd_lpf_horizontal_4_dual_sse2(
+    uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
+    const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
+    const uint8_t *_thresh1, int bd) {
   vpx_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd);
   vpx_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 1,
                                    bd);
 }
 
-static INLINE void highbd_transpose(uint16_t *src[], int in_p,
-                                    uint16_t *dst[], int out_p,
-                                    int num_8x8_to_transpose) {
+static INLINE void highbd_transpose(uint16_t *src[], int in_p, uint16_t *dst[],
+                                    int out_p, int num_8x8_to_transpose) {
   int idx8x8 = 0;
   __m128i p0, p1, p2, p3, p4, p5, p6, p7, x0, x1, x2, x3, x4, x5, x6, x7;
   do {
     uint16_t *in = src[idx8x8];
     uint16_t *out = dst[idx8x8];
 
-    p0 = _mm_loadu_si128((__m128i *)(in + 0*in_p));  // 00 01 02 03 04 05 06 07
-    p1 = _mm_loadu_si128((__m128i *)(in + 1*in_p));  // 10 11 12 13 14 15 16 17
-    p2 = _mm_loadu_si128((__m128i *)(in + 2*in_p));  // 20 21 22 23 24 25 26 27
-    p3 = _mm_loadu_si128((__m128i *)(in + 3*in_p));  // 30 31 32 33 34 35 36 37
-    p4 = _mm_loadu_si128((__m128i *)(in + 4*in_p));  // 40 41 42 43 44 45 46 47
-    p5 = _mm_loadu_si128((__m128i *)(in + 5*in_p));  // 50 51 52 53 54 55 56 57
-    p6 = _mm_loadu_si128((__m128i *)(in + 6*in_p));  // 60 61 62 63 64 65 66 67
-    p7 = _mm_loadu_si128((__m128i *)(in + 7*in_p));  // 70 71 72 73 74 75 76 77
+    p0 =
+        _mm_loadu_si128((__m128i *)(in + 0 * in_p));  // 00 01 02 03 04 05 06 07
+    p1 =
+        _mm_loadu_si128((__m128i *)(in + 1 * in_p));  // 10 11 12 13 14 15 16 17
+    p2 =
+        _mm_loadu_si128((__m128i *)(in + 2 * in_p));  // 20 21 22 23 24 25 26 27
+    p3 =
+        _mm_loadu_si128((__m128i *)(in + 3 * in_p));  // 30 31 32 33 34 35 36 37
+    p4 =
+        _mm_loadu_si128((__m128i *)(in + 4 * in_p));  // 40 41 42 43 44 45 46 47
+    p5 =
+        _mm_loadu_si128((__m128i *)(in + 5 * in_p));  // 50 51 52 53 54 55 56 57
+    p6 =
+        _mm_loadu_si128((__m128i *)(in + 6 * in_p));  // 60 61 62 63 64 65 66 67
+    p7 =
+        _mm_loadu_si128((__m128i *)(in + 7 * in_p));  // 70 71 72 73 74 75 76 77
     // 00 10 01 11 02 12 03 13
     x0 = _mm_unpacklo_epi16(p0, p1);
     // 20 30 21 31 22 32 23 33
@@ -983,9 +951,9 @@ static INLINE void highbd_transpose(uint16_t *src[], int in_p,
     // 01 11 21 31 41 51 61 71
     x7 = _mm_unpackhi_epi64(x4, x5);
 
-    _mm_storeu_si128((__m128i *)(out + 0*out_p), x6);
+    _mm_storeu_si128((__m128i *)(out + 0 * out_p), x6);
     // 00 10 20 30 40 50 60 70
-    _mm_storeu_si128((__m128i *)(out + 1*out_p), x7);
+    _mm_storeu_si128((__m128i *)(out + 1 * out_p), x7);
     // 01 11 21 31 41 51 61 71
 
     // 02 12 22 32 03 13 23 33
@@ -997,9 +965,9 @@ static INLINE void highbd_transpose(uint16_t *src[], int in_p,
     // 03 13 23 33 43 53 63 73
     x7 = _mm_unpackhi_epi64(x4, x5);
 
-    _mm_storeu_si128((__m128i *)(out + 2*out_p), x6);
+    _mm_storeu_si128((__m128i *)(out + 2 * out_p), x6);
     // 02 12 22 32 42 52 62 72
-    _mm_storeu_si128((__m128i *)(out + 3*out_p), x7);
+    _mm_storeu_si128((__m128i *)(out + 3 * out_p), x7);
     // 03 13 23 33 43 53 63 73
 
     // 04 14 05 15 06 16 07 17
@@ -1019,9 +987,9 @@ static INLINE void highbd_transpose(uint16_t *src[], int in_p,
     // 05 15 25 35 45 55 65 75
     x7 = _mm_unpackhi_epi64(x4, x5);
 
-    _mm_storeu_si128((__m128i *)(out + 4*out_p), x6);
+    _mm_storeu_si128((__m128i *)(out + 4 * out_p), x6);
     // 04 14 24 34 44 54 64 74
-    _mm_storeu_si128((__m128i *)(out + 5*out_p), x7);
+    _mm_storeu_si128((__m128i *)(out + 5 * out_p), x7);
     // 05 15 25 35 45 55 65 75
 
     // 06 16 26 36 07 17 27 37
@@ -1033,15 +1001,15 @@ static INLINE void highbd_transpose(uint16_t *src[], int in_p,
     // 07 17 27 37 47 57 67 77
     x7 = _mm_unpackhi_epi64(x4, x5);
 
-    _mm_storeu_si128((__m128i *)(out + 6*out_p), x6);
+    _mm_storeu_si128((__m128i *)(out + 6 * out_p), x6);
     // 06 16 26 36 46 56 66 76
-    _mm_storeu_si128((__m128i *)(out + 7*out_p), x7);
+    _mm_storeu_si128((__m128i *)(out + 7 * out_p), x7);
     // 07 17 27 37 47 57 67 77
   } while (++idx8x8 < num_8x8_to_transpose);
 }
 
-static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1,
-                                        int in_p, uint16_t *out, int out_p) {
+static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1, int in_p,
+                                        uint16_t *out, int out_p) {
   uint16_t *src0[1];
   uint16_t *src1[1];
   uint16_t *dest0[1];
@@ -1054,10 +1022,8 @@ static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1,
   highbd_transpose(src1, in_p, dest1, out_p, 1);
 }
 
-void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p,
-                                    const uint8_t *blimit,
-                                    const uint8_t *limit,
-                                    const uint8_t *thresh,
+void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
+                                    const uint8_t *limit, const uint8_t *thresh,
                                     int count, int bd) {
   DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
   uint16_t *src[1];
@@ -1081,14 +1047,10 @@ void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p,
   highbd_transpose(src, 8, dst, p, 1);
 }
 
-void vpx_highbd_lpf_vertical_4_dual_sse2(uint16_t *s, int p,
-                                         const uint8_t *blimit0,
-                                         const uint8_t *limit0,
-                                         const uint8_t *thresh0,
-                                         const uint8_t *blimit1,
-                                         const uint8_t *limit1,
-                                         const uint8_t *thresh1,
-                                         int bd) {
+void vpx_highbd_lpf_vertical_4_dual_sse2(
+    uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+    const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+    const uint8_t *thresh1, int bd) {
   DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]);
   uint16_t *src[2];
   uint16_t *dst[2];
@@ -1108,10 +1070,8 @@ void vpx_highbd_lpf_vertical_4_dual_sse2(uint16_t *s, int p,
   highbd_transpose(src, 16, dst, p, 2);
 }
 
-void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p,
-                                    const uint8_t *blimit,
-                                    const uint8_t *limit,
-                                    const uint8_t *thresh,
+void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
+                                    const uint8_t *limit, const uint8_t *thresh,
                                     int count, int bd) {
   DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
   uint16_t *src[1];
@@ -1135,14 +1095,10 @@ void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p,
   highbd_transpose(src, 8, dst, p, 1);
 }
 
-void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t *s, int p,
-                                         const uint8_t *blimit0,
-                                         const uint8_t *limit0,
-                                         const uint8_t *thresh0,
-                                         const uint8_t *blimit1,
-                                         const uint8_t *limit1,
-                                         const uint8_t *thresh1,
-                                         int bd) {
+void vpx_highbd_lpf_vertical_8_dual_sse2(
+    uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+    const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+    const uint8_t *thresh1, int bd) {
   DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]);
   uint16_t *src[2];
   uint16_t *dst[2];
@@ -1163,11 +1119,9 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t *s, int p,
   highbd_transpose(src, 16, dst, p, 2);
 }
 
-void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p,
-                                     const uint8_t *blimit,
+void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
                                      const uint8_t *limit,
-                                     const uint8_t *thresh,
-                                     int bd) {
+                                     const uint8_t *thresh, int bd) {
   DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 16]);
   uint16_t *src[2];
   uint16_t *dst[2];
@@ -1192,12 +1146,10 @@ void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p,
   highbd_transpose(src, 8, dst, p, 2);
 }
 
-void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s,
-                                          int p,
+void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int p,
                                           const uint8_t *blimit,
                                           const uint8_t *limit,
-                                          const uint8_t *thresh,
-                                          int bd) {
+                                          const uint8_t *thresh, int bd) {
   DECLARE_ALIGNED(16, uint16_t, t_dst[256]);
 
   //  Transpose 16x16
diff --git a/vpx_dsp/x86/highbd_quantize_intrin_sse2.c b/vpx_dsp/x86/highbd_quantize_intrin_sse2.c
index 42be1ea1b109cb8c06d1e620e5e0e5ac034dd216..6fb8891e1ca45f92e623df909028b4ad61463bc6 100644
--- a/vpx_dsp/x86/highbd_quantize_intrin_sse2.c
+++ b/vpx_dsp/x86/highbd_quantize_intrin_sse2.c
@@ -15,26 +15,19 @@
 #include "vpx_ports/mem.h"
 
 #if CONFIG_VPX_HIGHBITDEPTH
-void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr,
-                                intptr_t count,
-                                int skip_block,
-                                const int16_t *zbin_ptr,
+void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count,
+                                int skip_block, const int16_t *zbin_ptr,
                                 const int16_t *round_ptr,
                                 const int16_t *quant_ptr,
                                 const int16_t *quant_shift_ptr,
-                                tran_low_t *qcoeff_ptr,
-                                tran_low_t *dqcoeff_ptr,
-                                const int16_t *dequant_ptr,
-                                uint16_t *eob_ptr,
-                                const int16_t *scan,
-                                const int16_t *iscan) {
+                                tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                                const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                                const int16_t *scan, const int16_t *iscan) {
   int i, j, non_zero_regs = (int)count / 4, eob_i = -1;
   __m128i zbins[2];
   __m128i nzbins[2];
 
-  zbins[0] = _mm_set_epi32((int)zbin_ptr[1],
-                           (int)zbin_ptr[1],
-                           (int)zbin_ptr[1],
+  zbins[0] = _mm_set_epi32((int)zbin_ptr[1], (int)zbin_ptr[1], (int)zbin_ptr[1],
                            (int)zbin_ptr[0]);
   zbins[1] = _mm_set1_epi32((int)zbin_ptr[1]);
 
@@ -73,14 +66,13 @@ void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr,
 
       coeffs = _mm_load_si128((const __m128i *)(coeff_ptr + i * 4));
       coeffs_sign = _mm_srai_epi32(coeffs, 31);
-      coeffs = _mm_sub_epi32(
-            _mm_xor_si128(coeffs, coeffs_sign), coeffs_sign);
+      coeffs = _mm_sub_epi32(_mm_xor_si128(coeffs, coeffs_sign), coeffs_sign);
       tmp1 = _mm_cmpgt_epi32(coeffs, zbins[i != 0]);
       tmp2 = _mm_cmpeq_epi32(coeffs, zbins[i != 0]);
       tmp1 = _mm_or_si128(tmp1, tmp2);
       test = _mm_movemask_epi8(tmp1);
-      _mm_storeu_si128((__m128i*)abs_coeff, coeffs);
-      _mm_storeu_si128((__m128i*)coeff_sign, coeffs_sign);
+      _mm_storeu_si128((__m128i *)abs_coeff, coeffs);
+      _mm_storeu_si128((__m128i *)coeff_sign, coeffs_sign);
 
       for (j = 0; j < 4; j++) {
         if (test & (1 << (4 * j))) {
@@ -91,8 +83,7 @@ void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr,
               (uint32_t)((tmp2 * quant_shift_ptr[k != 0]) >> 16);
           qcoeff_ptr[k] = (int)(abs_qcoeff ^ coeff_sign[j]) - coeff_sign[j];
           dqcoeff_ptr[k] = qcoeff_ptr[k] * dequant_ptr[k != 0];
-          if (abs_qcoeff)
-            eob_i = iscan[k] > eob_i ? iscan[k] : eob_i;
+          if (abs_qcoeff) eob_i = iscan[k] > eob_i ? iscan[k] : eob_i;
         }
       }
     }
@@ -100,20 +91,12 @@ void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr,
   *eob_ptr = eob_i + 1;
 }
 
-
-void vpx_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr,
-                                      intptr_t n_coeffs,
-                                      int skip_block,
-                                      const int16_t *zbin_ptr,
-                                      const int16_t *round_ptr,
-                                      const int16_t *quant_ptr,
-                                      const int16_t *quant_shift_ptr,
-                                      tran_low_t *qcoeff_ptr,
-                                      tran_low_t *dqcoeff_ptr,
-                                      const int16_t *dequant_ptr,
-                                      uint16_t *eob_ptr,
-                                      const int16_t *scan,
-                                      const int16_t *iscan) {
+void vpx_highbd_quantize_b_32x32_sse2(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+    const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
+    const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+    const int16_t *scan, const int16_t *iscan) {
   __m128i zbins[2];
   __m128i nzbins[2];
   int idx = 0;
@@ -122,10 +105,7 @@ void vpx_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr,
   const int zbin0_tmp = ROUND_POWER_OF_TWO(zbin_ptr[0], 1);
   const int zbin1_tmp = ROUND_POWER_OF_TWO(zbin_ptr[1], 1);
   (void)scan;
-  zbins[0] = _mm_set_epi32(zbin1_tmp,
-                           zbin1_tmp,
-                           zbin1_tmp,
-                           zbin0_tmp);
+  zbins[0] = _mm_set_epi32(zbin1_tmp, zbin1_tmp, zbin1_tmp, zbin0_tmp);
   zbins[1] = _mm_set1_epi32(zbin1_tmp);
 
   nzbins[0] = _mm_setzero_si128();
@@ -146,14 +126,10 @@ void vpx_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr,
       cmp2 = _mm_cmpgt_epi32(coeffs, nzbins[i != 0]);
       cmp1 = _mm_and_si128(cmp1, cmp2);
       test = _mm_movemask_epi8(cmp1);
-      if (!(test & 0xf))
-        idx_arr[idx++] = i * 4;
-      if (!(test & 0xf0))
-        idx_arr[idx++] = i * 4 + 1;
-      if (!(test & 0xf00))
-        idx_arr[idx++] = i * 4 + 2;
-      if (!(test & 0xf000))
-        idx_arr[idx++] = i * 4 + 3;
+      if (!(test & 0xf)) idx_arr[idx++] = i * 4;
+      if (!(test & 0xf0)) idx_arr[idx++] = i * 4 + 1;
+      if (!(test & 0xf00)) idx_arr[idx++] = i * 4 + 2;
+      if (!(test & 0xf000)) idx_arr[idx++] = i * 4 + 3;
     }
 
     // Quantization pass: only process the coefficients selected in
@@ -163,15 +139,14 @@ void vpx_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr,
       const int coeff = coeff_ptr[rc];
       const int coeff_sign = (coeff >> 31);
       const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
-      const int64_t tmp1 = abs_coeff
-                         + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
+      const int64_t tmp1 =
+          abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
       const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1;
       const uint32_t abs_qcoeff =
           (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 15);
       qcoeff_ptr[rc] = (int)(abs_qcoeff ^ coeff_sign) - coeff_sign;
       dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
-      if (abs_qcoeff)
-        eob = iscan[idx_arr[i]] > eob ? iscan[idx_arr[i]] : eob;
+      if (abs_qcoeff) eob = iscan[idx_arr[i]] > eob ? iscan[idx_arr[i]] : eob;
     }
   }
   *eob_ptr = eob + 1;
diff --git a/vpx_dsp/x86/highbd_variance_sse2.c b/vpx_dsp/x86/highbd_variance_sse2.c
index 81ec5dbdb9c371e25ff913e051826eec47b83822..8db997c7b99c224dc30a43ea0a68dd97eeaed46e 100644
--- a/vpx_dsp/x86/highbd_variance_sse2.c
+++ b/vpx_dsp/x86/highbd_variance_sse2.c
@@ -11,9 +11,9 @@
 
 #include "vpx_ports/mem.h"
 
-typedef uint32_t (*high_variance_fn_t) (const uint16_t *src, int src_stride,
-                                        const uint16_t *ref, int ref_stride,
-                                        uint32_t *sse, int *sum);
+typedef uint32_t (*high_variance_fn_t)(const uint16_t *src, int src_stride,
+                                       const uint16_t *ref, int ref_stride,
+                                       uint32_t *sse, int *sum);
 
 uint32_t vpx_highbd_calc8x8var_sse2(const uint16_t *src, int src_stride,
                                     const uint16_t *ref, int ref_stride,
@@ -24,8 +24,8 @@ uint32_t vpx_highbd_calc16x16var_sse2(const uint16_t *src, int src_stride,
                                       uint32_t *sse, int *sum);
 
 static void highbd_8_variance_sse2(const uint16_t *src, int src_stride,
-                                   const uint16_t *ref, int ref_stride,
-                                   int w, int h, uint32_t *sse, int *sum,
+                                   const uint16_t *ref, int ref_stride, int w,
+                                   int h, uint32_t *sse, int *sum,
                                    high_variance_fn_t var_fn, int block_size) {
   int i, j;
 
@@ -36,8 +36,8 @@ static void highbd_8_variance_sse2(const uint16_t *src, int src_stride,
     for (j = 0; j < w; j += block_size) {
       unsigned int sse0;
       int sum0;
-      var_fn(src + src_stride * i + j, src_stride,
-             ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
+      var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j,
+             ref_stride, &sse0, &sum0);
       *sse += sse0;
       *sum += sum0;
     }
@@ -45,8 +45,8 @@ static void highbd_8_variance_sse2(const uint16_t *src, int src_stride,
 }
 
 static void highbd_10_variance_sse2(const uint16_t *src, int src_stride,
-                                    const uint16_t *ref, int ref_stride,
-                                    int w, int h, uint32_t *sse, int *sum,
+                                    const uint16_t *ref, int ref_stride, int w,
+                                    int h, uint32_t *sse, int *sum,
                                     high_variance_fn_t var_fn, int block_size) {
   int i, j;
   uint64_t sse_long = 0;
@@ -56,8 +56,8 @@ static void highbd_10_variance_sse2(const uint16_t *src, int src_stride,
     for (j = 0; j < w; j += block_size) {
       unsigned int sse0;
       int sum0;
-      var_fn(src + src_stride * i + j, src_stride,
-             ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
+      var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j,
+             ref_stride, &sse0, &sum0);
       sse_long += sse0;
       sum_long += sum0;
     }
@@ -67,8 +67,8 @@ static void highbd_10_variance_sse2(const uint16_t *src, int src_stride,
 }
 
 static void highbd_12_variance_sse2(const uint16_t *src, int src_stride,
-                                    const uint16_t *ref, int ref_stride,
-                                    int w, int h, uint32_t *sse, int *sum,
+                                    const uint16_t *ref, int ref_stride, int w,
+                                    int h, uint32_t *sse, int *sum,
                                     high_variance_fn_t var_fn, int block_size) {
   int i, j;
   uint64_t sse_long = 0;
@@ -78,8 +78,8 @@ static void highbd_12_variance_sse2(const uint16_t *src, int src_stride,
     for (j = 0; j < w; j += block_size) {
       unsigned int sse0;
       int sum0;
-      var_fn(src + src_stride * i + j, src_stride,
-             ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
+      var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j,
+             ref_stride, &sse0, &sum0);
       sse_long += sse0;
       sum_long += sum0;
     }
@@ -88,102 +88,105 @@ static void highbd_12_variance_sse2(const uint16_t *src, int src_stride,
   *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 8);
 }
 
+#define HIGH_GET_VAR(S)                                                       \
+  void vpx_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \
+                                         const uint8_t *ref8, int ref_stride, \
+                                         uint32_t *sse, int *sum) {           \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                \
+    uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                                \
+    vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
+                                       sum);                                  \
+  }                                                                           \
+                                                                              \
+  void vpx_highbd_10_get##S##x##S##var_sse2(                                  \
+      const uint8_t *src8, int src_stride, const uint8_t *ref8,               \
+      int ref_stride, uint32_t *sse, int *sum) {                              \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                \
+    uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                                \
+    vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
+                                       sum);                                  \
+    *sum = ROUND_POWER_OF_TWO(*sum, 2);                                       \
+    *sse = ROUND_POWER_OF_TWO(*sse, 4);                                       \
+  }                                                                           \
+                                                                              \
+  void vpx_highbd_12_get##S##x##S##var_sse2(                                  \
+      const uint8_t *src8, int src_stride, const uint8_t *ref8,               \
+      int ref_stride, uint32_t *sse, int *sum) {                              \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                \
+    uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                                \
+    vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \
+                                       sum);                                  \
+    *sum = ROUND_POWER_OF_TWO(*sum, 4);                                       \
+    *sse = ROUND_POWER_OF_TWO(*sse, 8);                                       \
+  }
 
-#define HIGH_GET_VAR(S) \
-void vpx_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \
-                                       const uint8_t *ref8, int ref_stride, \
-                                       uint32_t *sse, int *sum) { \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
-  vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, \
-                                     sse, sum); \
-} \
-\
-void vpx_highbd_10_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \
-                                          const uint8_t *ref8, int ref_stride, \
-                                          uint32_t *sse, int *sum) { \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
-  vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, \
-                                     sse, sum); \
-  *sum = ROUND_POWER_OF_TWO(*sum, 2); \
-  *sse = ROUND_POWER_OF_TWO(*sse, 4); \
-} \
-\
-void vpx_highbd_12_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \
-                                          const uint8_t *ref8, int ref_stride, \
-                                          uint32_t *sse, int *sum) { \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
-  vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, \
-                                     sse, sum); \
-  *sum = ROUND_POWER_OF_TWO(*sum, 4); \
-  *sse = ROUND_POWER_OF_TWO(*sse, 8); \
-}
-
-HIGH_GET_VAR(16);
-HIGH_GET_VAR(8);
+/* clang-format off */
+HIGH_GET_VAR(16)
+HIGH_GET_VAR(8)
+/* clang-format on */
 
 #undef HIGH_GET_VAR
 
-#define VAR_FN(w, h, block_size, shift) \
-uint32_t vpx_highbd_8_variance##w##x##h##_sse2( \
-    const uint8_t *src8, int src_stride, \
-    const uint8_t *ref8, int ref_stride, uint32_t *sse) { \
-  int sum; \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
-  highbd_8_variance_sse2(src, src_stride, ref, ref_stride, w, h, sse, &sum, \
-                         vpx_highbd_calc##block_size##x##block_size##var_sse2, \
-                         block_size); \
-  return *sse - (((int64_t)sum * sum) >> shift); \
-} \
-\
-uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \
-    const uint8_t *src8, int src_stride, \
-    const uint8_t *ref8, int ref_stride, uint32_t *sse) { \
-  int sum; \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
-  highbd_10_variance_sse2( \
-      src, src_stride, ref, ref_stride, w, h, sse, &sum, \
-      vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
-  return *sse - (((int64_t)sum * sum) >> shift); \
-} \
-\
-uint32_t vpx_highbd_12_variance##w##x##h##_sse2( \
-    const uint8_t *src8, int src_stride, \
-    const uint8_t *ref8, int ref_stride, uint32_t *sse) { \
-  int sum; \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
-  highbd_12_variance_sse2( \
-      src, src_stride, ref, ref_stride, w, h, sse, &sum, \
-      vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
-  return *sse - (((int64_t)sum * sum) >> shift); \
-}
+#define VAR_FN(w, h, block_size, shift)                                    \
+  uint32_t vpx_highbd_8_variance##w##x##h##_sse2(                          \
+      const uint8_t *src8, int src_stride, const uint8_t *ref8,            \
+      int ref_stride, uint32_t *sse) {                                     \
+    int sum;                                                               \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                             \
+    uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                             \
+    highbd_8_variance_sse2(                                                \
+        src, src_stride, ref, ref_stride, w, h, sse, &sum,                 \
+        vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
+    return *sse - (((int64_t)sum * sum) >> shift);                         \
+  }                                                                        \
+                                                                           \
+  uint32_t vpx_highbd_10_variance##w##x##h##_sse2(                         \
+      const uint8_t *src8, int src_stride, const uint8_t *ref8,            \
+      int ref_stride, uint32_t *sse) {                                     \
+    int sum;                                                               \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                             \
+    uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                             \
+    highbd_10_variance_sse2(                                               \
+        src, src_stride, ref, ref_stride, w, h, sse, &sum,                 \
+        vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
+    return *sse - (((int64_t)sum * sum) >> shift);                         \
+  }                                                                        \
+                                                                           \
+  uint32_t vpx_highbd_12_variance##w##x##h##_sse2(                         \
+      const uint8_t *src8, int src_stride, const uint8_t *ref8,            \
+      int ref_stride, uint32_t *sse) {                                     \
+    int sum;                                                               \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                             \
+    uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);                             \
+    highbd_12_variance_sse2(                                               \
+        src, src_stride, ref, ref_stride, w, h, sse, &sum,                 \
+        vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
+    return *sse - (((int64_t)sum * sum) >> shift);                         \
+  }
 
-VAR_FN(64, 64, 16, 12);
-VAR_FN(64, 32, 16, 11);
-VAR_FN(32, 64, 16, 11);
-VAR_FN(32, 32, 16, 10);
-VAR_FN(32, 16, 16, 9);
-VAR_FN(16, 32, 16, 9);
-VAR_FN(16, 16, 16, 8);
-VAR_FN(16, 8, 8, 7);
-VAR_FN(8, 16, 8, 7);
-VAR_FN(8, 8, 8, 6);
+/* clang-format off */
+VAR_FN(64, 64, 16, 12)
+VAR_FN(64, 32, 16, 11)
+VAR_FN(32, 64, 16, 11)
+VAR_FN(32, 32, 16, 10)
+VAR_FN(32, 16, 16, 9)
+VAR_FN(16, 32, 16, 9)
+VAR_FN(16, 16, 16, 8)
+VAR_FN(16, 8, 8, 7)
+VAR_FN(8, 16, 8, 7)
+VAR_FN(8, 8, 8, 6)
+/* clang-format on */
 
 #undef VAR_FN
 
 unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t *src8, int src_stride,
-                                      const uint8_t *ref8, int ref_stride,
-                                      unsigned int *sse) {
+                                        const uint8_t *ref8, int ref_stride,
+                                        unsigned int *sse) {
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-  highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 16, 16,
-                         sse, &sum, vpx_highbd_calc16x16var_sse2, 16);
+  highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
+                         vpx_highbd_calc16x16var_sse2, 16);
   return *sse;
 }
 
@@ -193,8 +196,8 @@ unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t *src8, int src_stride,
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-  highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 16, 16,
-                          sse, &sum, vpx_highbd_calc16x16var_sse2, 16);
+  highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
+                          vpx_highbd_calc16x16var_sse2, 16);
   return *sse;
 }
 
@@ -204,19 +207,19 @@ unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t *src8, int src_stride,
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-  highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 16, 16,
-                          sse, &sum, vpx_highbd_calc16x16var_sse2, 16);
+  highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
+                          vpx_highbd_calc16x16var_sse2, 16);
   return *sse;
 }
 
 unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t *src8, int src_stride,
-                                    const uint8_t *ref8, int ref_stride,
-                                    unsigned int *sse) {
+                                      const uint8_t *ref8, int ref_stride,
+                                      unsigned int *sse) {
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-  highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 8, 8,
-                         sse, &sum, vpx_highbd_calc8x8var_sse2, 8);
+  highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum,
+                         vpx_highbd_calc8x8var_sse2, 8);
   return *sse;
 }
 
@@ -226,8 +229,8 @@ unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t *src8, int src_stride,
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-  highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 8, 8,
-                          sse, &sum, vpx_highbd_calc8x8var_sse2, 8);
+  highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum,
+                          vpx_highbd_calc8x8var_sse2, 8);
   return *sse;
 }
 
@@ -237,8 +240,8 @@ unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride,
   int sum;
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-  highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 8, 8,
-                          sse, &sum, vpx_highbd_calc8x8var_sse2, 8);
+  highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum,
+                          vpx_highbd_calc8x8var_sse2, 8);
   return *sse;
 }
 
@@ -246,17 +249,13 @@ unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride,
 // The 2 unused parameters are place holders for PIC enabled build.
 // These definitions are for functions defined in
 // highbd_subpel_variance_impl_sse2.asm
-#define DECL(w, opt) \
-  int vpx_highbd_sub_pixel_variance##w##xh_##opt(const uint16_t *src, \
-                                                 ptrdiff_t src_stride, \
-                                                 int x_offset, int y_offset, \
-                                                 const uint16_t *dst, \
-                                                 ptrdiff_t dst_stride, \
-                                                 int height, \
-                                                 unsigned int *sse, \
-                                                 void *unused0, void *unused);
+#define DECL(w, opt)                                                         \
+  int vpx_highbd_sub_pixel_variance##w##xh_##opt(                            \
+      const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
+      const uint16_t *dst, ptrdiff_t dst_stride, int height,                 \
+      unsigned int *sse, void *unused0, void *unused);
 #define DECLS(opt1, opt2) \
-  DECL(8, opt1); \
+  DECL(8, opt1);          \
   DECL(16, opt1)
 
 DECLS(sse2, sse);
@@ -265,152 +264,134 @@ DECLS(sse2, sse);
 #undef DECLS
 #undef DECL
 
-#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
-uint32_t vpx_highbd_8_sub_pixel_variance##w##x##h##_##opt(const uint8_t *src8, \
-                                                          int src_stride, \
-                                                          int x_offset, \
-                                                          int y_offset, \
-                                                          const uint8_t *dst8, \
-                                                          int dst_stride, \
-                                                          uint32_t *sse_ptr) { \
-  uint32_t sse; \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
-  int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src, src_stride, \
-                                                       x_offset, y_offset, \
-                                                       dst, dst_stride, h, \
-                                                       &sse, NULL, NULL); \
-  if (w > wf) { \
-    unsigned int sse2; \
-    int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 16, \
-                                                          src_stride, \
-                                                          x_offset, y_offset, \
-                                                          dst + 16, \
-                                                          dst_stride, \
-                                                          h, &sse2, \
-                                                          NULL, NULL); \
-    se += se2; \
-    sse += sse2; \
-    if (w > wf * 2) { \
-      se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 32, src_stride, \
-                                                        x_offset, y_offset, \
-                                                        dst + 32, dst_stride, \
-                                                        h, &sse2, NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-      se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
-          src + 48, src_stride, x_offset, y_offset, \
-          dst + 48, dst_stride, h, &sse2, NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-    } \
-  } \
-  *sse_ptr = sse; \
-  return sse - ((cast se * se) >> (wlog2 + hlog2)); \
-} \
-\
-uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt( \
-    const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
-    const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
-  uint32_t sse; \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
-  int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src, src_stride, \
-                                                       x_offset, y_offset, \
-                                                       dst, dst_stride, \
-                                                       h, &sse, NULL, NULL); \
-  if (w > wf) { \
-    uint32_t sse2; \
-    int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 16, \
-                                                          src_stride, \
-                                                          x_offset, y_offset, \
-                                                          dst + 16, \
-                                                          dst_stride, \
-                                                          h, &sse2, \
-                                                          NULL, NULL); \
-    se += se2; \
-    sse += sse2; \
-    if (w > wf * 2) { \
-      se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 32, src_stride, \
-                                                        x_offset, y_offset, \
-                                                        dst + 32, dst_stride, \
-                                                        h, &sse2, NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-      se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 48, src_stride, \
-                                                        x_offset, y_offset, \
-                                                        dst + 48, dst_stride, \
-                                                        h, &sse2, NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-    } \
-  } \
-  se = ROUND_POWER_OF_TWO(se, 2); \
-  sse = ROUND_POWER_OF_TWO(sse, 4); \
-  *sse_ptr = sse; \
-  return sse - ((cast se * se) >> (wlog2 + hlog2)); \
-} \
-\
-uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt( \
-    const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
-    const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
-  int start_row; \
-  uint32_t sse; \
-  int se = 0; \
-  uint64_t long_sse = 0; \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
-  for (start_row = 0; start_row < h; start_row +=16) { \
-    uint32_t sse2; \
-    int height = h - start_row < 16 ? h - start_row : 16; \
-    int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
-        src + (start_row * src_stride), src_stride, \
-        x_offset, y_offset, dst + (start_row * dst_stride), \
-        dst_stride, height, &sse2, NULL, NULL); \
-    se += se2; \
-    long_sse += sse2; \
-    if (w > wf) { \
-      se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
-          src + 16 + (start_row * src_stride), src_stride, \
-          x_offset, y_offset, dst + 16 + (start_row * dst_stride), \
-          dst_stride, height, &sse2, NULL, NULL); \
-      se += se2; \
-      long_sse += sse2; \
-      if (w > wf * 2) { \
-        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
-            src + 32 + (start_row * src_stride), src_stride, \
-            x_offset, y_offset, dst + 32 + (start_row * dst_stride), \
-            dst_stride, height, &sse2, NULL, NULL); \
-        se += se2; \
-        long_sse += sse2; \
-        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
-            src + 48 + (start_row * src_stride), src_stride, \
-            x_offset, y_offset, dst + 48 + (start_row * dst_stride), \
-            dst_stride, height, &sse2, NULL, NULL); \
-        se += se2; \
-        long_sse += sse2; \
-      }\
-    } \
-  } \
-  se = ROUND_POWER_OF_TWO(se, 4); \
-  sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \
-  *sse_ptr = sse; \
-  return sse - ((cast se * se) >> (wlog2 + hlog2)); \
-}
-
-#define FNS(opt1, opt2) \
-FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
-FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
-FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
-FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
-FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
-FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
-FN(16, 16, 16, 4, 4, opt1, (int64_t)); \
-FN(16, 8, 16, 4, 3, opt1, (int64_t)); \
-FN(8, 16, 8, 3, 4, opt1, (int64_t)); \
-FN(8, 8, 8, 3, 3, opt1, (int64_t)); \
-FN(8, 4, 8, 3, 2, opt1, (int64_t));
+#define FN(w, h, wf, wlog2, hlog2, opt, cast)                                  \
+  uint32_t vpx_highbd_8_sub_pixel_variance##w##x##h##_##opt(                   \
+      const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
+      const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) {                \
+    uint32_t sse;                                                              \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
+    uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
+    int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                      \
+        src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL,   \
+        NULL);                                                                 \
+    if (w > wf) {                                                              \
+      unsigned int sse2;                                                       \
+      int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+          src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h,   \
+          &sse2, NULL, NULL);                                                  \
+      se += se2;                                                               \
+      sse += sse2;                                                             \
+      if (w > wf * 2) {                                                        \
+        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+            src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
+            &sse2, NULL, NULL);                                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+            src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
+            &sse2, NULL, NULL);                                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+      }                                                                        \
+    }                                                                          \
+    *sse_ptr = sse;                                                            \
+    return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
+  }                                                                            \
+                                                                               \
+  uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt(                  \
+      const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
+      const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) {                \
+    uint32_t sse;                                                              \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
+    uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
+    int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                      \
+        src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL,   \
+        NULL);                                                                 \
+    if (w > wf) {                                                              \
+      uint32_t sse2;                                                           \
+      int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+          src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h,   \
+          &sse2, NULL, NULL);                                                  \
+      se += se2;                                                               \
+      sse += sse2;                                                             \
+      if (w > wf * 2) {                                                        \
+        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+            src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
+            &sse2, NULL, NULL);                                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+            src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
+            &sse2, NULL, NULL);                                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+      }                                                                        \
+    }                                                                          \
+    se = ROUND_POWER_OF_TWO(se, 2);                                            \
+    sse = ROUND_POWER_OF_TWO(sse, 4);                                          \
+    *sse_ptr = sse;                                                            \
+    return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
+  }                                                                            \
+                                                                               \
+  uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt(                  \
+      const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
+      const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) {                \
+    int start_row;                                                             \
+    uint32_t sse;                                                              \
+    int se = 0;                                                                \
+    uint64_t long_sse = 0;                                                     \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
+    uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
+    for (start_row = 0; start_row < h; start_row += 16) {                      \
+      uint32_t sse2;                                                           \
+      int height = h - start_row < 16 ? h - start_row : 16;                    \
+      int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+          src + (start_row * src_stride), src_stride, x_offset, y_offset,      \
+          dst + (start_row * dst_stride), dst_stride, height, &sse2, NULL,     \
+          NULL);                                                               \
+      se += se2;                                                               \
+      long_sse += sse2;                                                        \
+      if (w > wf) {                                                            \
+        se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                     \
+            src + 16 + (start_row * src_stride), src_stride, x_offset,         \
+            y_offset, dst + 16 + (start_row * dst_stride), dst_stride, height, \
+            &sse2, NULL, NULL);                                                \
+        se += se2;                                                             \
+        long_sse += sse2;                                                      \
+        if (w > wf * 2) {                                                      \
+          se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+              src + 32 + (start_row * src_stride), src_stride, x_offset,       \
+              y_offset, dst + 32 + (start_row * dst_stride), dst_stride,       \
+              height, &sse2, NULL, NULL);                                      \
+          se += se2;                                                           \
+          long_sse += sse2;                                                    \
+          se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(                   \
+              src + 48 + (start_row * src_stride), src_stride, x_offset,       \
+              y_offset, dst + 48 + (start_row * dst_stride), dst_stride,       \
+              height, &sse2, NULL, NULL);                                      \
+          se += se2;                                                           \
+          long_sse += sse2;                                                    \
+        }                                                                      \
+      }                                                                        \
+    }                                                                          \
+    se = ROUND_POWER_OF_TWO(se, 4);                                            \
+    sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8);                           \
+    *sse_ptr = sse;                                                            \
+    return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
+  }
 
+#define FNS(opt1, opt2)                  \
+  FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
+  FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
+  FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
+  FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
+  FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
+  FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
+  FN(16, 16, 16, 4, 4, opt1, (int64_t)); \
+  FN(16, 8, 16, 4, 3, opt1, (int64_t));  \
+  FN(8, 16, 8, 3, 4, opt1, (int64_t));   \
+  FN(8, 8, 8, 3, 3, opt1, (int64_t));    \
+  FN(8, 4, 8, 3, 2, opt1, (int64_t));
 
 FNS(sse2, sse);
 
@@ -418,173 +399,154 @@ FNS(sse2, sse);
 #undef FN
 
 // The 2 unused parameters are place holders for PIC enabled build.
-#define DECL(w, opt) \
-int vpx_highbd_sub_pixel_avg_variance##w##xh_##opt(const uint16_t *src, \
-                                                   ptrdiff_t src_stride, \
-                                                   int x_offset, int y_offset, \
-                                                   const uint16_t *dst, \
-                                                   ptrdiff_t dst_stride, \
-                                                   const uint16_t *sec, \
-                                                   ptrdiff_t sec_stride, \
-                                                   int height, \
-                                                   unsigned int *sse, \
-                                                   void *unused0, void *unused);
+#define DECL(w, opt)                                                         \
+  int vpx_highbd_sub_pixel_avg_variance##w##xh_##opt(                        \
+      const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
+      const uint16_t *dst, ptrdiff_t dst_stride, const uint16_t *sec,        \
+      ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0,    \
+      void *unused);
 #define DECLS(opt1) \
-DECL(16, opt1) \
-DECL(8, opt1)
+  DECL(16, opt1)    \
+  DECL(8, opt1)
 
 DECLS(sse2);
 #undef DECL
 #undef DECLS
 
-#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
-uint32_t vpx_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt( \
-    const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
-    const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
-    const uint8_t *sec8) { \
-  uint32_t sse; \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
-  uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \
-  int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-               src, src_stride, x_offset, \
-               y_offset, dst, dst_stride, sec, w, h, &sse, NULL, NULL); \
-  if (w > wf) { \
-    uint32_t sse2; \
-    int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                  src + 16, src_stride, x_offset, y_offset, \
-                  dst + 16, dst_stride, sec + 16, w, h, &sse2, NULL, NULL); \
-    se += se2; \
-    sse += sse2; \
-    if (w > wf * 2) { \
-      se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                src + 32, src_stride, x_offset, y_offset, \
-                dst + 32, dst_stride, sec + 32, w, h, &sse2, NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-      se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                src + 48, src_stride, x_offset, y_offset, \
-                dst + 48, dst_stride, sec + 48, w, h, &sse2, NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-    } \
-  } \
-  *sse_ptr = sse; \
-  return sse - ((cast se * se) >> (wlog2 + hlog2)); \
-} \
-\
-uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \
-    const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
-    const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
-    const uint8_t *sec8) { \
-  uint32_t sse; \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
-  uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \
-  int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                                            src, src_stride, x_offset, \
-                                            y_offset, dst, dst_stride, \
-                                            sec, w, h, &sse, NULL, NULL); \
-  if (w > wf) { \
-    uint32_t sse2; \
-    int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                                            src + 16, src_stride, \
-                                            x_offset, y_offset, \
-                                            dst + 16, dst_stride, \
-                                            sec + 16, w, h, &sse2, \
-                                            NULL, NULL); \
-    se += se2; \
-    sse += sse2; \
-    if (w > wf * 2) { \
-      se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                                            src + 32, src_stride, \
-                                            x_offset, y_offset, \
-                                            dst + 32, dst_stride, \
-                                            sec + 32, w, h, &sse2, \
-                                            NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-      se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                                            src + 48, src_stride, \
-                                            x_offset, y_offset, \
-                                            dst + 48, dst_stride, \
-                                            sec + 48, w, h, &sse2, \
-                                            NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-    } \
-  } \
-  se = ROUND_POWER_OF_TWO(se, 2); \
-  sse = ROUND_POWER_OF_TWO(sse, 4); \
-  *sse_ptr = sse; \
-  return sse - ((cast se * se) >> (wlog2 + hlog2)); \
-} \
-\
-uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \
-    const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
-    const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
-    const uint8_t *sec8) { \
-  int start_row; \
-  uint32_t sse; \
-  int se = 0; \
-  uint64_t long_sse = 0; \
-  uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
-  uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \
-  for (start_row = 0; start_row < h; start_row +=16) { \
-    uint32_t sse2; \
-    int height = h - start_row < 16 ? h - start_row : 16; \
-    int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                src + (start_row * src_stride), src_stride, x_offset, \
-                y_offset, dst + (start_row * dst_stride), dst_stride, \
-                sec + (start_row * w), w, height, &sse2, NULL, NULL); \
-    se += se2; \
-    long_sse += sse2; \
-    if (w > wf) { \
-      se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                src + 16 + (start_row * src_stride), src_stride, \
-                x_offset, y_offset, \
-                dst + 16 + (start_row * dst_stride), dst_stride, \
-                sec + 16 + (start_row * w), w, height, &sse2, NULL, NULL); \
-      se += se2; \
-      long_sse += sse2; \
-      if (w > wf * 2) { \
-        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                src + 32 + (start_row * src_stride), src_stride, \
-                x_offset, y_offset, \
-                dst + 32 + (start_row * dst_stride), dst_stride, \
-                sec + 32 + (start_row * w), w, height, &sse2, NULL, NULL); \
-        se += se2; \
-        long_sse += sse2; \
-        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
-                src + 48 + (start_row * src_stride), src_stride, \
-                x_offset, y_offset, \
-                dst + 48 + (start_row * dst_stride), dst_stride, \
-                sec + 48 + (start_row * w), w, height, &sse2, NULL, NULL); \
-        se += se2; \
-        long_sse += sse2; \
-      } \
-    } \
-  } \
-  se = ROUND_POWER_OF_TWO(se, 4); \
-  sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \
-  *sse_ptr = sse; \
-  return sse - ((cast se * se) >> (wlog2 + hlog2)); \
-}
-
+#define FN(w, h, wf, wlog2, hlog2, opt, cast)                                  \
+  uint32_t vpx_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt(               \
+      const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
+      const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr,                  \
+      const uint8_t *sec8) {                                                   \
+    uint32_t sse;                                                              \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
+    uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
+    uint16_t *sec = CONVERT_TO_SHORTPTR(sec8);                                 \
+    int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                  \
+        src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
+        NULL, NULL);                                                           \
+    if (w > wf) {                                                              \
+      uint32_t sse2;                                                           \
+      int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+          src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride,      \
+          sec + 16, w, h, &sse2, NULL, NULL);                                  \
+      se += se2;                                                               \
+      sse += sse2;                                                             \
+      if (w > wf * 2) {                                                        \
+        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+            src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride,    \
+            sec + 32, w, h, &sse2, NULL, NULL);                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+            src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride,    \
+            sec + 48, w, h, &sse2, NULL, NULL);                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+      }                                                                        \
+    }                                                                          \
+    *sse_ptr = sse;                                                            \
+    return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
+  }                                                                            \
+                                                                               \
+  uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt(              \
+      const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
+      const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr,                  \
+      const uint8_t *sec8) {                                                   \
+    uint32_t sse;                                                              \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
+    uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
+    uint16_t *sec = CONVERT_TO_SHORTPTR(sec8);                                 \
+    int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                  \
+        src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
+        NULL, NULL);                                                           \
+    if (w > wf) {                                                              \
+      uint32_t sse2;                                                           \
+      int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+          src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride,      \
+          sec + 16, w, h, &sse2, NULL, NULL);                                  \
+      se += se2;                                                               \
+      sse += sse2;                                                             \
+      if (w > wf * 2) {                                                        \
+        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+            src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride,    \
+            sec + 32, w, h, &sse2, NULL, NULL);                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+            src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride,    \
+            sec + 48, w, h, &sse2, NULL, NULL);                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+      }                                                                        \
+    }                                                                          \
+    se = ROUND_POWER_OF_TWO(se, 2);                                            \
+    sse = ROUND_POWER_OF_TWO(sse, 4);                                          \
+    *sse_ptr = sse;                                                            \
+    return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
+  }                                                                            \
+                                                                               \
+  uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt(              \
+      const uint8_t *src8, int src_stride, int x_offset, int y_offset,         \
+      const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr,                  \
+      const uint8_t *sec8) {                                                   \
+    int start_row;                                                             \
+    uint32_t sse;                                                              \
+    int se = 0;                                                                \
+    uint64_t long_sse = 0;                                                     \
+    uint16_t *src = CONVERT_TO_SHORTPTR(src8);                                 \
+    uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);                                 \
+    uint16_t *sec = CONVERT_TO_SHORTPTR(sec8);                                 \
+    for (start_row = 0; start_row < h; start_row += 16) {                      \
+      uint32_t sse2;                                                           \
+      int height = h - start_row < 16 ? h - start_row : 16;                    \
+      int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+          src + (start_row * src_stride), src_stride, x_offset, y_offset,      \
+          dst + (start_row * dst_stride), dst_stride, sec + (start_row * w),   \
+          w, height, &sse2, NULL, NULL);                                       \
+      se += se2;                                                               \
+      long_sse += sse2;                                                        \
+      if (w > wf) {                                                            \
+        se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(                 \
+            src + 16 + (start_row * src_stride), src_stride, x_offset,         \
+            y_offset, dst + 16 + (start_row * dst_stride), dst_stride,         \
+            sec + 16 + (start_row * w), w, height, &sse2, NULL, NULL);         \
+        se += se2;                                                             \
+        long_sse += sse2;                                                      \
+        if (w > wf * 2) {                                                      \
+          se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+              src + 32 + (start_row * src_stride), src_stride, x_offset,       \
+              y_offset, dst + 32 + (start_row * dst_stride), dst_stride,       \
+              sec + 32 + (start_row * w), w, height, &sse2, NULL, NULL);       \
+          se += se2;                                                           \
+          long_sse += sse2;                                                    \
+          se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt(               \
+              src + 48 + (start_row * src_stride), src_stride, x_offset,       \
+              y_offset, dst + 48 + (start_row * dst_stride), dst_stride,       \
+              sec + 48 + (start_row * w), w, height, &sse2, NULL, NULL);       \
+          se += se2;                                                           \
+          long_sse += sse2;                                                    \
+        }                                                                      \
+      }                                                                        \
+    }                                                                          \
+    se = ROUND_POWER_OF_TWO(se, 4);                                            \
+    sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8);                           \
+    *sse_ptr = sse;                                                            \
+    return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
+  }
 
-#define FNS(opt1) \
-FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
-FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
-FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
-FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
-FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
-FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
-FN(16, 16, 16, 4, 4, opt1, (int64_t)); \
-FN(16, 8, 16, 4, 3, opt1, (int64_t)); \
-FN(8, 16, 8, 4, 3, opt1, (int64_t)); \
-FN(8, 8, 8, 3, 3, opt1, (int64_t)); \
-FN(8, 4, 8, 3, 2, opt1, (int64_t));
+#define FNS(opt1)                        \
+  FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
+  FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
+  FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
+  FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
+  FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
+  FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
+  FN(16, 16, 16, 4, 4, opt1, (int64_t)); \
+  FN(16, 8, 16, 4, 3, opt1, (int64_t));  \
+  FN(8, 16, 8, 4, 3, opt1, (int64_t));   \
+  FN(8, 8, 8, 3, 3, opt1, (int64_t));    \
+  FN(8, 4, 8, 3, 2, opt1, (int64_t));
 
 FNS(sse2);
 
diff --git a/vpx_dsp/x86/inv_txfm_sse2.c b/vpx_dsp/x86/inv_txfm_sse2.c
index 7c006400f437a71b376409eec2b694f3da0ef89a..929482b738172f205610c06772167e38e812cd76 100644
--- a/vpx_dsp/x86/inv_txfm_sse2.c
+++ b/vpx_dsp/x86/inv_txfm_sse2.c
@@ -12,14 +12,14 @@
 #include "vpx_dsp/x86/inv_txfm_sse2.h"
 #include "vpx_dsp/x86/txfm_common_sse2.h"
 
-#define RECON_AND_STORE4X4(dest, in_x) \
-{                                                     \
-  __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); \
-  d0 = _mm_unpacklo_epi8(d0, zero); \
-  d0 = _mm_add_epi16(in_x, d0); \
-  d0 = _mm_packus_epi16(d0, d0); \
-  *(int *)(dest) = _mm_cvtsi128_si32(d0); \
-}
+#define RECON_AND_STORE4X4(dest, in_x)                    \
+  {                                                       \
+    __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); \
+    d0 = _mm_unpacklo_epi8(d0, zero);                     \
+    d0 = _mm_add_epi16(in_x, d0);                         \
+    d0 = _mm_packus_epi16(d0, d0);                        \
+    *(int *)(dest) = _mm_cvtsi128_si32(d0);               \
+  }
 
 void vpx_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest,
                              int stride) {
@@ -263,192 +263,189 @@ void iadst4_sse2(__m128i *in) {
   in[1] = _mm_packs_epi32(u[2], u[3]);
 }
 
-#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, \
-                      out0, out1, out2, out3, out4, out5, out6, out7) \
-  {                                                     \
-    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
-    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
-    const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \
-    const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \
-    const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \
-    const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \
-    const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5); \
-    const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7); \
-                                                        \
-    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
-    const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \
-    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
-    const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \
-    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
-    const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \
-    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
-    const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \
-                                                            \
-    out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
-    out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
-    out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
-    out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
-    out4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \
-    out5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \
-    out6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \
-    out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \
+#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                      out2, out3, out4, out5, out6, out7)                 \
+  {                                                                       \
+    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1);                   \
+    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3);                   \
+    const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1);                   \
+    const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3);                   \
+    const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5);                   \
+    const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7);                   \
+    const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5);                   \
+    const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7);                   \
+                                                                          \
+    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);               \
+    const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);               \
+    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);               \
+    const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);               \
+    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);               \
+    const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);               \
+    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);               \
+    const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);               \
+                                                                          \
+    out0 = _mm_unpacklo_epi64(tr1_0, tr1_4);                              \
+    out1 = _mm_unpackhi_epi64(tr1_0, tr1_4);                              \
+    out2 = _mm_unpacklo_epi64(tr1_2, tr1_6);                              \
+    out3 = _mm_unpackhi_epi64(tr1_2, tr1_6);                              \
+    out4 = _mm_unpacklo_epi64(tr1_1, tr1_5);                              \
+    out5 = _mm_unpackhi_epi64(tr1_1, tr1_5);                              \
+    out6 = _mm_unpacklo_epi64(tr1_3, tr1_7);                              \
+    out7 = _mm_unpackhi_epi64(tr1_3, tr1_7);                              \
   }
 
-#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, \
-                         out0, out1, out2, out3) \
-  {                                              \
-    const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1); \
-    const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0); \
-    const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3); \
-    const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2); \
-    \
-    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
-    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
-    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
-    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
-    \
-    out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
-    out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
-    out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
-    out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
+#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, out0, out1, out2, out3) \
+  {                                                                      \
+    const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1);                \
+    const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0);                \
+    const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3);                \
+    const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2);                \
+                                                                         \
+    const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);              \
+    const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);              \
+    const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);              \
+    const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);              \
+                                                                         \
+    out0 = _mm_unpacklo_epi64(tr1_0, tr1_4);                             \
+    out1 = _mm_unpackhi_epi64(tr1_0, tr1_4);                             \
+    out2 = _mm_unpacklo_epi64(tr1_2, tr1_6);                             \
+    out3 = _mm_unpackhi_epi64(tr1_2, tr1_6);                             \
   }
 
 #define TRANSPOSE_8X8_10(in0, in1, in2, in3, out0, out1) \
-  {                                            \
-    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
-    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
-    out0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
-    out1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
+  {                                                      \
+    const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1);  \
+    const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3);  \
+    out0 = _mm_unpacklo_epi32(tr0_0, tr0_1);             \
+    out1 = _mm_unpackhi_epi32(tr0_0, tr0_1);             \
   }
 
 // Define Macro for multiplying elements by constants and adding them together.
-#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, \
-                               cst0, cst1, cst2, cst3, res0, res1, res2, res3) \
-  {   \
-      tmp0 = _mm_madd_epi16(lo_0, cst0); \
-      tmp1 = _mm_madd_epi16(hi_0, cst0); \
-      tmp2 = _mm_madd_epi16(lo_0, cst1); \
-      tmp3 = _mm_madd_epi16(hi_0, cst1); \
-      tmp4 = _mm_madd_epi16(lo_1, cst2); \
-      tmp5 = _mm_madd_epi16(hi_1, cst2); \
-      tmp6 = _mm_madd_epi16(lo_1, cst3); \
-      tmp7 = _mm_madd_epi16(hi_1, cst3); \
-      \
-      tmp0 = _mm_add_epi32(tmp0, rounding); \
-      tmp1 = _mm_add_epi32(tmp1, rounding); \
-      tmp2 = _mm_add_epi32(tmp2, rounding); \
-      tmp3 = _mm_add_epi32(tmp3, rounding); \
-      tmp4 = _mm_add_epi32(tmp4, rounding); \
-      tmp5 = _mm_add_epi32(tmp5, rounding); \
-      tmp6 = _mm_add_epi32(tmp6, rounding); \
-      tmp7 = _mm_add_epi32(tmp7, rounding); \
-      \
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-      tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
-      tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
-      tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
-      tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
-      \
-      res0 = _mm_packs_epi32(tmp0, tmp1); \
-      res1 = _mm_packs_epi32(tmp2, tmp3); \
-      res2 = _mm_packs_epi32(tmp4, tmp5); \
-      res3 = _mm_packs_epi32(tmp6, tmp7); \
+#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, cst0, cst1, cst2, cst3, \
+                               res0, res1, res2, res3)                         \
+  {                                                                            \
+    tmp0 = _mm_madd_epi16(lo_0, cst0);                                         \
+    tmp1 = _mm_madd_epi16(hi_0, cst0);                                         \
+    tmp2 = _mm_madd_epi16(lo_0, cst1);                                         \
+    tmp3 = _mm_madd_epi16(hi_0, cst1);                                         \
+    tmp4 = _mm_madd_epi16(lo_1, cst2);                                         \
+    tmp5 = _mm_madd_epi16(hi_1, cst2);                                         \
+    tmp6 = _mm_madd_epi16(lo_1, cst3);                                         \
+    tmp7 = _mm_madd_epi16(hi_1, cst3);                                         \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+    tmp4 = _mm_add_epi32(tmp4, rounding);                                      \
+    tmp5 = _mm_add_epi32(tmp5, rounding);                                      \
+    tmp6 = _mm_add_epi32(tmp6, rounding);                                      \
+    tmp7 = _mm_add_epi32(tmp7, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+    tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);                               \
+    tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);                               \
+    tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);                               \
+    tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);                               \
+                                                                               \
+    res0 = _mm_packs_epi32(tmp0, tmp1);                                        \
+    res1 = _mm_packs_epi32(tmp2, tmp3);                                        \
+    res2 = _mm_packs_epi32(tmp4, tmp5);                                        \
+    res3 = _mm_packs_epi32(tmp6, tmp7);                                        \
   }
 
 #define MULTIPLICATION_AND_ADD_2(lo_0, hi_0, cst0, cst1, res0, res1) \
-  {   \
-      tmp0 = _mm_madd_epi16(lo_0, cst0); \
-      tmp1 = _mm_madd_epi16(hi_0, cst0); \
-      tmp2 = _mm_madd_epi16(lo_0, cst1); \
-      tmp3 = _mm_madd_epi16(hi_0, cst1); \
-      \
-      tmp0 = _mm_add_epi32(tmp0, rounding); \
-      tmp1 = _mm_add_epi32(tmp1, rounding); \
-      tmp2 = _mm_add_epi32(tmp2, rounding); \
-      tmp3 = _mm_add_epi32(tmp3, rounding); \
-      \
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-      \
-      res0 = _mm_packs_epi32(tmp0, tmp1); \
-      res1 = _mm_packs_epi32(tmp2, tmp3); \
+  {                                                                  \
+    tmp0 = _mm_madd_epi16(lo_0, cst0);                               \
+    tmp1 = _mm_madd_epi16(hi_0, cst0);                               \
+    tmp2 = _mm_madd_epi16(lo_0, cst1);                               \
+    tmp3 = _mm_madd_epi16(hi_0, cst1);                               \
+                                                                     \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                            \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                            \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                            \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                            \
+                                                                     \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                     \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                     \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                     \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                     \
+                                                                     \
+    res0 = _mm_packs_epi32(tmp0, tmp1);                              \
+    res1 = _mm_packs_epi32(tmp2, tmp3);                              \
   }
 
-#define IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, \
-              out0, out1, out2, out3, out4, out5, out6, out7)  \
-  { \
-  /* Stage1 */      \
-  { \
-    const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7); \
-    const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7); \
-    const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5); \
-    const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5); \
-    \
-    MULTIPLICATION_AND_ADD(lo_17, hi_17, lo_35, hi_35, stg1_0, \
-                          stg1_1, stg1_2, stg1_3, stp1_4,      \
-                          stp1_7, stp1_5, stp1_6)              \
-  } \
-    \
-  /* Stage2 */ \
-  { \
-    const __m128i lo_04 = _mm_unpacklo_epi16(in0, in4); \
-    const __m128i hi_04 = _mm_unpackhi_epi16(in0, in4); \
-    const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6); \
-    const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6); \
-    \
-    MULTIPLICATION_AND_ADD(lo_04, hi_04, lo_26, hi_26, stg2_0, \
-                           stg2_1, stg2_2, stg2_3, stp2_0,     \
-                           stp2_1, stp2_2, stp2_3)             \
-    \
-    stp2_4 = _mm_adds_epi16(stp1_4, stp1_5); \
-    stp2_5 = _mm_subs_epi16(stp1_4, stp1_5); \
-    stp2_6 = _mm_subs_epi16(stp1_7, stp1_6); \
-    stp2_7 = _mm_adds_epi16(stp1_7, stp1_6); \
-  } \
-    \
-  /* Stage3 */ \
-  { \
-    const __m128i lo_56 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-    const __m128i hi_56 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-    \
-    stp1_0 = _mm_adds_epi16(stp2_0, stp2_3); \
-    stp1_1 = _mm_adds_epi16(stp2_1, stp2_2); \
-    stp1_2 = _mm_subs_epi16(stp2_1, stp2_2); \
-    stp1_3 = _mm_subs_epi16(stp2_0, stp2_3); \
-    \
-    tmp0 = _mm_madd_epi16(lo_56, stg2_1); \
-    tmp1 = _mm_madd_epi16(hi_56, stg2_1); \
-    tmp2 = _mm_madd_epi16(lo_56, stg2_0); \
-    tmp3 = _mm_madd_epi16(hi_56, stg2_0); \
-    \
-    tmp0 = _mm_add_epi32(tmp0, rounding); \
-    tmp1 = _mm_add_epi32(tmp1, rounding); \
-    tmp2 = _mm_add_epi32(tmp2, rounding); \
-    tmp3 = _mm_add_epi32(tmp3, rounding); \
-    \
-    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-    \
-    stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-    stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-  } \
-  \
-  /* Stage4  */ \
-  out0 = _mm_adds_epi16(stp1_0, stp2_7); \
-  out1 = _mm_adds_epi16(stp1_1, stp1_6); \
-  out2 = _mm_adds_epi16(stp1_2, stp1_5); \
-  out3 = _mm_adds_epi16(stp1_3, stp2_4); \
-  out4 = _mm_subs_epi16(stp1_3, stp2_4); \
-  out5 = _mm_subs_epi16(stp1_2, stp1_5); \
-  out6 = _mm_subs_epi16(stp1_1, stp1_6); \
-  out7 = _mm_subs_epi16(stp1_0, stp2_7); \
+#define IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, \
+              out4, out5, out6, out7)                                         \
+  {                                                                           \
+    /* Stage1 */                                                              \
+    {                                                                         \
+      const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7);                     \
+      const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7);                     \
+      const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5);                     \
+      const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5);                     \
+                                                                              \
+      MULTIPLICATION_AND_ADD(lo_17, hi_17, lo_35, hi_35, stg1_0, stg1_1,      \
+                             stg1_2, stg1_3, stp1_4, stp1_7, stp1_5, stp1_6)  \
+    }                                                                         \
+                                                                              \
+    /* Stage2 */                                                              \
+    {                                                                         \
+      const __m128i lo_04 = _mm_unpacklo_epi16(in0, in4);                     \
+      const __m128i hi_04 = _mm_unpackhi_epi16(in0, in4);                     \
+      const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6);                     \
+      const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6);                     \
+                                                                              \
+      MULTIPLICATION_AND_ADD(lo_04, hi_04, lo_26, hi_26, stg2_0, stg2_1,      \
+                             stg2_2, stg2_3, stp2_0, stp2_1, stp2_2, stp2_3)  \
+                                                                              \
+      stp2_4 = _mm_adds_epi16(stp1_4, stp1_5);                                \
+      stp2_5 = _mm_subs_epi16(stp1_4, stp1_5);                                \
+      stp2_6 = _mm_subs_epi16(stp1_7, stp1_6);                                \
+      stp2_7 = _mm_adds_epi16(stp1_7, stp1_6);                                \
+    }                                                                         \
+                                                                              \
+    /* Stage3 */                                                              \
+    {                                                                         \
+      const __m128i lo_56 = _mm_unpacklo_epi16(stp2_6, stp2_5);               \
+      const __m128i hi_56 = _mm_unpackhi_epi16(stp2_6, stp2_5);               \
+                                                                              \
+      stp1_0 = _mm_adds_epi16(stp2_0, stp2_3);                                \
+      stp1_1 = _mm_adds_epi16(stp2_1, stp2_2);                                \
+      stp1_2 = _mm_subs_epi16(stp2_1, stp2_2);                                \
+      stp1_3 = _mm_subs_epi16(stp2_0, stp2_3);                                \
+                                                                              \
+      tmp0 = _mm_madd_epi16(lo_56, stg2_1);                                   \
+      tmp1 = _mm_madd_epi16(hi_56, stg2_1);                                   \
+      tmp2 = _mm_madd_epi16(lo_56, stg2_0);                                   \
+      tmp3 = _mm_madd_epi16(hi_56, stg2_0);                                   \
+                                                                              \
+      tmp0 = _mm_add_epi32(tmp0, rounding);                                   \
+      tmp1 = _mm_add_epi32(tmp1, rounding);                                   \
+      tmp2 = _mm_add_epi32(tmp2, rounding);                                   \
+      tmp3 = _mm_add_epi32(tmp3, rounding);                                   \
+                                                                              \
+      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                            \
+      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                            \
+      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                            \
+      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                            \
+                                                                              \
+      stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                   \
+      stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                   \
+    }                                                                         \
+                                                                              \
+    /* Stage4  */                                                             \
+    out0 = _mm_adds_epi16(stp1_0, stp2_7);                                    \
+    out1 = _mm_adds_epi16(stp1_1, stp1_6);                                    \
+    out2 = _mm_adds_epi16(stp1_2, stp1_5);                                    \
+    out3 = _mm_adds_epi16(stp1_3, stp2_4);                                    \
+    out4 = _mm_subs_epi16(stp1_3, stp2_4);                                    \
+    out5 = _mm_subs_epi16(stp1_2, stp1_5);                                    \
+    out6 = _mm_subs_epi16(stp1_1, stp1_6);                                    \
+    out7 = _mm_subs_epi16(stp1_0, stp2_7);                                    \
   }
 
 void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
@@ -484,12 +481,12 @@ void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
   // 2-D
   for (i = 0; i < 2; i++) {
     // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
-    TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7,
-                  in0, in1, in2, in3, in4, in5, in6, in7);
+    TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                  in4, in5, in6, in7);
 
     // 4-stage 1D idct8x8
-    IDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-          in0, in1, in2, in3, in4, in5, in6, in7);
+    IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5,
+          in6, in7);
   }
 
   // Final rounding and shift
@@ -560,12 +557,12 @@ void idct8_sse2(__m128i *in) {
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
 
   // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
-  TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7],
-                in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0,
+                in1, in2, in3, in4, in5, in6, in7);
 
   // 4-stage 1D idct8x8
-  IDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
-        in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7]);
+  IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in[0], in[1], in[2], in[3],
+        in[4], in[5], in[6], in[7]);
 }
 
 void iadst8_sse2(__m128i *in) {
@@ -906,8 +903,8 @@ void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
 
   TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, in0, in1, in2, in3)
 
-  IDCT8(in0, in1, in2, in3, zero, zero, zero, zero,
-        in0, in1, in2, in3, in4, in5, in6, in7);
+  IDCT8(in0, in1, in2, in3, zero, zero, zero, zero, in0, in1, in2, in3, in4,
+        in5, in6, in7);
   // Final rounding and shift
   in0 = _mm_adds_epi16(in0, final_rounding);
   in1 = _mm_adds_epi16(in1, final_rounding);
@@ -937,242 +934,234 @@ void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
   RECON_AND_STORE(dest + 7 * stride, in7);
 }
 
-#define IDCT16 \
-  /* Stage2 */ \
-  { \
-    const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], in[15]); \
-    const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], in[15]); \
-    const __m128i lo_9_7 = _mm_unpacklo_epi16(in[9], in[7]);   \
-    const __m128i hi_9_7 = _mm_unpackhi_epi16(in[9], in[7]);   \
-    const __m128i lo_5_11 = _mm_unpacklo_epi16(in[5], in[11]); \
-    const __m128i hi_5_11 = _mm_unpackhi_epi16(in[5], in[11]); \
-    const __m128i lo_13_3 = _mm_unpacklo_epi16(in[13], in[3]); \
-    const __m128i hi_13_3 = _mm_unpackhi_epi16(in[13], in[3]); \
-    \
-    MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, \
-                           stg2_0, stg2_1, stg2_2, stg2_3, \
-                           stp2_8, stp2_15, stp2_9, stp2_14) \
-    \
-    MULTIPLICATION_AND_ADD(lo_5_11, hi_5_11, lo_13_3, hi_13_3, \
-                           stg2_4, stg2_5, stg2_6, stg2_7, \
-                           stp2_10, stp2_13, stp2_11, stp2_12) \
-  } \
-    \
-  /* Stage3 */ \
-  { \
-    const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], in[14]); \
-    const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], in[14]); \
-    const __m128i lo_10_6 = _mm_unpacklo_epi16(in[10], in[6]); \
-    const __m128i hi_10_6 = _mm_unpackhi_epi16(in[10], in[6]); \
-    \
-    MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, \
-                           stg3_0, stg3_1, stg3_2, stg3_3, \
-                           stp1_4, stp1_7, stp1_5, stp1_6) \
-    \
-    stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9);  \
-    stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);    \
-    stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \
-    stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \
-    \
-    stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13); \
-    stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \
-    stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \
-    stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \
-  } \
-  \
-  /* Stage4 */ \
-  { \
-    const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]); \
-    const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], in[8]); \
-    const __m128i lo_4_12 = _mm_unpacklo_epi16(in[4], in[12]); \
-    const __m128i hi_4_12 = _mm_unpackhi_epi16(in[4], in[12]); \
-    \
-    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
-    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
-    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-    \
-    MULTIPLICATION_AND_ADD(lo_0_8, hi_0_8, lo_4_12, hi_4_12, \
-                           stg4_0, stg4_1, stg4_2, stg4_3, \
-                           stp2_0, stp2_1, stp2_2, stp2_3) \
-    \
-    stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \
-    stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \
-    stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \
-    stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \
-    \
-    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \
-                           stg4_4, stg4_5, stg4_6, stg4_7, \
-                           stp2_9, stp2_14, stp2_10, stp2_13) \
-  } \
-    \
-  /* Stage5 */ \
-  { \
-    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-    \
-    stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \
-    stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \
-    stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \
-    stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \
-    \
-    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
-    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
-    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
-    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
-    \
-    tmp0 = _mm_add_epi32(tmp0, rounding); \
-    tmp1 = _mm_add_epi32(tmp1, rounding); \
-    tmp2 = _mm_add_epi32(tmp2, rounding); \
-    tmp3 = _mm_add_epi32(tmp3, rounding); \
-    \
-    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-    \
-    stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-    stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-    \
-    stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);  \
-    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);    \
-    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);   \
-    stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \
-    \
-    stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \
-    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);   \
-    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);   \
-    stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \
-  } \
-    \
-  /* Stage6 */ \
-  { \
-    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
-    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
-    \
-    stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \
-    stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
-    stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
-    stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \
-    stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \
-    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
-    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
-    stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \
-    \
-    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
-                           stg6_0, stg4_0, stg6_0, stg4_0, \
-                           stp2_10, stp2_13, stp2_11, stp2_12) \
+#define IDCT16                                                                 \
+  /* Stage2 */                                                                 \
+  {                                                                            \
+    const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], in[15]);                 \
+    const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], in[15]);                 \
+    const __m128i lo_9_7 = _mm_unpacklo_epi16(in[9], in[7]);                   \
+    const __m128i hi_9_7 = _mm_unpackhi_epi16(in[9], in[7]);                   \
+    const __m128i lo_5_11 = _mm_unpacklo_epi16(in[5], in[11]);                 \
+    const __m128i hi_5_11 = _mm_unpackhi_epi16(in[5], in[11]);                 \
+    const __m128i lo_13_3 = _mm_unpacklo_epi16(in[13], in[3]);                 \
+    const __m128i hi_13_3 = _mm_unpackhi_epi16(in[13], in[3]);                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, stg2_0, stg2_1,   \
+                           stg2_2, stg2_3, stp2_8, stp2_15, stp2_9, stp2_14)   \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_5_11, hi_5_11, lo_13_3, hi_13_3, stg2_4, stg2_5, \
+                           stg2_6, stg2_7, stp2_10, stp2_13, stp2_11, stp2_12) \
+  }                                                                            \
+                                                                               \
+  /* Stage3 */                                                                 \
+  {                                                                            \
+    const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], in[14]);                 \
+    const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], in[14]);                 \
+    const __m128i lo_10_6 = _mm_unpacklo_epi16(in[10], in[6]);                 \
+    const __m128i hi_10_6 = _mm_unpackhi_epi16(in[10], in[6]);                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, stg3_0, stg3_1, \
+                           stg3_2, stg3_3, stp1_4, stp1_7, stp1_5, stp1_6)     \
+                                                                               \
+    stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9);                                  \
+    stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);                                    \
+    stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);                                 \
+    stp1_11 = _mm_add_epi16(stp2_11, stp2_10);                                 \
+                                                                               \
+    stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13);                               \
+    stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);                                 \
+    stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);                                 \
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_14);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage4 */                                                                 \
+  {                                                                            \
+    const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]);                   \
+    const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], in[8]);                   \
+    const __m128i lo_4_12 = _mm_unpacklo_epi16(in[4], in[12]);                 \
+    const __m128i hi_4_12 = _mm_unpackhi_epi16(in[4], in[12]);                 \
+                                                                               \
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);               \
+    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);               \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_0_8, hi_0_8, lo_4_12, hi_4_12, stg4_0, stg4_1,   \
+                           stg4_2, stg4_3, stp2_0, stp2_1, stp2_2, stp2_3)     \
+                                                                               \
+    stp2_4 = _mm_add_epi16(stp1_4, stp1_5);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);                                    \
+    stp2_7 = _mm_add_epi16(stp1_7, stp1_6);                                    \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,       \
+                           stg4_5, stg4_6, stg4_7, stp2_9, stp2_14, stp2_10,   \
+                           stp2_13)                                            \
+  }                                                                            \
+                                                                               \
+  /* Stage5 */                                                                 \
+  {                                                                            \
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);                 \
+    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);                 \
+                                                                               \
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_3);                                    \
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_2);                                    \
+    stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);                                    \
+    stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);                                    \
+                                                                               \
+    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);                                     \
+    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);                                     \
+    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);                                     \
+    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);                                     \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+                                                                               \
+    stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                      \
+    stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                      \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);                                 \
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);                                   \
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11);                                \
+                                                                               \
+    stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0);                               \
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);                                 \
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);                                 \
+    stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0);                               \
+  }                                                                            \
+                                                                               \
+  /* Stage6 */                                                                 \
+  {                                                                            \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);             \
+    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);             \
+                                                                               \
+    stp2_0 = _mm_add_epi16(stp1_0, stp2_7);                                    \
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6);                                    \
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5);                                    \
+    stp2_3 = _mm_add_epi16(stp1_3, stp2_4);                                    \
+    stp2_4 = _mm_sub_epi16(stp1_3, stp2_4);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);                                    \
+    stp2_7 = _mm_sub_epi16(stp1_0, stp2_7);                                    \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
   }
 
-#define IDCT16_10 \
-    /* Stage2 */ \
-    { \
-      const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], zero); \
-      const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], zero); \
-      const __m128i lo_13_3 = _mm_unpacklo_epi16(zero, in[3]); \
-      const __m128i hi_13_3 = _mm_unpackhi_epi16(zero, in[3]); \
-      \
-      MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_13_3, hi_13_3, \
-                             stg2_0, stg2_1, stg2_6, stg2_7, \
-                             stp1_8_0, stp1_15, stp1_11, stp1_12_0) \
-    } \
-      \
-    /* Stage3 */ \
-    { \
-      const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], zero); \
-      const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], zero); \
-      \
-      MULTIPLICATION_AND_ADD_2(lo_2_14, hi_2_14, \
-                               stg3_0, stg3_1,  \
-                               stp2_4, stp2_7) \
-      \
-      stp1_9  =  stp1_8_0; \
-      stp1_10 =  stp1_11;  \
-      \
-      stp1_13 = stp1_12_0; \
-      stp1_14 = stp1_15;   \
-    } \
-    \
-    /* Stage4 */ \
-    { \
-      const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero); \
-      const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], zero); \
-      \
-      const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
-      const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
-      const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-      const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-      \
-      MULTIPLICATION_AND_ADD_2(lo_0_8, hi_0_8, \
-                               stg4_0, stg4_1, \
-                               stp1_0, stp1_1) \
-      stp2_5 = stp2_4; \
-      stp2_6 = stp2_7; \
-      \
-      MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \
-                             stg4_4, stg4_5, stg4_6, stg4_7, \
-                             stp2_9, stp2_14, stp2_10, stp2_13) \
-    } \
-      \
-    /* Stage5 */ \
-    { \
-      const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-      const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-      \
-      stp1_2 = stp1_1; \
-      stp1_3 = stp1_0; \
-      \
-      tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
-      tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
-      tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
-      tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
-      \
-      tmp0 = _mm_add_epi32(tmp0, rounding); \
-      tmp1 = _mm_add_epi32(tmp1, rounding); \
-      tmp2 = _mm_add_epi32(tmp2, rounding); \
-      tmp3 = _mm_add_epi32(tmp3, rounding); \
-      \
-      tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-      tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-      tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-      tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-      \
-      stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-      stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-      \
-      stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);  \
-      stp1_9 = _mm_add_epi16(stp2_9, stp2_10);    \
-      stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);   \
-      stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \
-      \
-      stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \
-      stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);   \
-      stp1_14 = _mm_add_epi16(stp2_14, stp2_13);   \
-      stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \
-    } \
-      \
-    /* Stage6 */ \
-    { \
-      const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-      const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-      const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
-      const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
-      \
-      stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \
-      stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
-      stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
-      stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \
-      stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \
-      stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
-      stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
-      stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \
-      \
-      MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
-                             stg6_0, stg4_0, stg6_0, stg4_0, \
-                             stp2_10, stp2_13, stp2_11, stp2_12) \
-    }
+#define IDCT16_10                                                              \
+  /* Stage2 */                                                                 \
+  {                                                                            \
+    const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], zero);                   \
+    const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], zero);                   \
+    const __m128i lo_13_3 = _mm_unpacklo_epi16(zero, in[3]);                   \
+    const __m128i hi_13_3 = _mm_unpackhi_epi16(zero, in[3]);                   \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_13_3, hi_13_3, stg2_0, stg2_1, \
+                           stg2_6, stg2_7, stp1_8_0, stp1_15, stp1_11,         \
+                           stp1_12_0)                                          \
+  }                                                                            \
+                                                                               \
+  /* Stage3 */                                                                 \
+  {                                                                            \
+    const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], zero);                   \
+    const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], zero);                   \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_2_14, hi_2_14, stg3_0, stg3_1, stp2_4, stp2_7) \
+                                                                               \
+    stp1_9 = stp1_8_0;                                                         \
+    stp1_10 = stp1_11;                                                         \
+                                                                               \
+    stp1_13 = stp1_12_0;                                                       \
+    stp1_14 = stp1_15;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage4 */                                                                 \
+  {                                                                            \
+    const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero);                    \
+    const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], zero);                    \
+                                                                               \
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);               \
+    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);               \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_0_8, hi_0_8, stg4_0, stg4_1, stp1_0, stp1_1)   \
+    stp2_5 = stp2_4;                                                           \
+    stp2_6 = stp2_7;                                                           \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,       \
+                           stg4_5, stg4_6, stg4_7, stp2_9, stp2_14, stp2_10,   \
+                           stp2_13)                                            \
+  }                                                                            \
+                                                                               \
+  /* Stage5 */                                                                 \
+  {                                                                            \
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);                 \
+    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);                 \
+                                                                               \
+    stp1_2 = stp1_1;                                                           \
+    stp1_3 = stp1_0;                                                           \
+                                                                               \
+    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);                                     \
+    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);                                     \
+    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);                                     \
+    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);                                     \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+                                                                               \
+    stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                      \
+    stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                      \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);                                 \
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);                                   \
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11);                                \
+                                                                               \
+    stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0);                               \
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);                                 \
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);                                 \
+    stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0);                               \
+  }                                                                            \
+                                                                               \
+  /* Stage6 */                                                                 \
+  {                                                                            \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);             \
+    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);             \
+                                                                               \
+    stp2_0 = _mm_add_epi16(stp1_0, stp2_7);                                    \
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6);                                    \
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5);                                    \
+    stp2_3 = _mm_add_epi16(stp1_3, stp2_4);                                    \
+    stp2_4 = _mm_sub_epi16(stp1_3, stp2_4);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);                                    \
+    stp2_7 = _mm_sub_epi16(stp1_0, stp2_7);                                    \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
+  }
 
 void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
                                 int stride) {
@@ -1207,10 +1196,10 @@ void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
 
   __m128i in[16], l[16], r[16], *curr1;
   __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
-          stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
-          stp1_8_0, stp1_12_0;
+      stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+      stp1_8_0, stp1_12_0;
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
-          stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
+      stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
   int i;
 
@@ -1312,16 +1301,16 @@ void vpx_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest,
   dc_value = _mm_set1_epi16(a);
 
   for (i = 0; i < 2; ++i) {
-    RECON_AND_STORE(dest +  0 * stride, dc_value);
-    RECON_AND_STORE(dest +  1 * stride, dc_value);
-    RECON_AND_STORE(dest +  2 * stride, dc_value);
-    RECON_AND_STORE(dest +  3 * stride, dc_value);
-    RECON_AND_STORE(dest +  4 * stride, dc_value);
-    RECON_AND_STORE(dest +  5 * stride, dc_value);
-    RECON_AND_STORE(dest +  6 * stride, dc_value);
-    RECON_AND_STORE(dest +  7 * stride, dc_value);
-    RECON_AND_STORE(dest +  8 * stride, dc_value);
-    RECON_AND_STORE(dest +  9 * stride, dc_value);
+    RECON_AND_STORE(dest + 0 * stride, dc_value);
+    RECON_AND_STORE(dest + 1 * stride, dc_value);
+    RECON_AND_STORE(dest + 2 * stride, dc_value);
+    RECON_AND_STORE(dest + 3 * stride, dc_value);
+    RECON_AND_STORE(dest + 4 * stride, dc_value);
+    RECON_AND_STORE(dest + 5 * stride, dc_value);
+    RECON_AND_STORE(dest + 6 * stride, dc_value);
+    RECON_AND_STORE(dest + 7 * stride, dc_value);
+    RECON_AND_STORE(dest + 8 * stride, dc_value);
+    RECON_AND_STORE(dest + 9 * stride, dc_value);
     RECON_AND_STORE(dest + 10 * stride, dc_value);
     RECON_AND_STORE(dest + 11 * stride, dc_value);
     RECON_AND_STORE(dest + 12 * stride, dc_value);
@@ -1905,9 +1894,9 @@ static void idct16_8col(__m128i *in) {
   u[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
   u[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
 
-  s[8]  = _mm_packs_epi32(u[0], u[1]);
+  s[8] = _mm_packs_epi32(u[0], u[1]);
   s[15] = _mm_packs_epi32(u[2], u[3]);
-  s[9]  = _mm_packs_epi32(u[4], u[5]);
+  s[9] = _mm_packs_epi32(u[4], u[5]);
   s[14] = _mm_packs_epi32(u[6], u[7]);
   s[10] = _mm_packs_epi32(u[8], u[9]);
   s[13] = _mm_packs_epi32(u[10], u[11]);
@@ -2035,7 +2024,7 @@ static void idct16_8col(__m128i *in) {
   s[7] = _mm_add_epi16(t[6], t[7]);
   s[8] = t[8];
   s[15] = t[15];
-  s[9]  = _mm_packs_epi32(u[8], u[9]);
+  s[9] = _mm_packs_epi32(u[8], u[9]);
   s[14] = _mm_packs_epi32(u[10], u[11]);
   s[10] = _mm_packs_epi32(u[12], u[13]);
   s[13] = _mm_packs_epi32(u[14], u[15]);
@@ -2181,11 +2170,11 @@ void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
 
   const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
   __m128i in[16], l[16];
-  __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6,
-          stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
-          stp1_8_0, stp1_12_0;
+  __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_8,
+      stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, stp1_8_0,
+      stp1_12_0;
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
-          stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14;
+      stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
   int i;
   // First 1-D inverse DCT
@@ -2217,7 +2206,7 @@ void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
     tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
     tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
 
-    stp2_8  = _mm_packs_epi32(tmp0, tmp2);
+    stp2_8 = _mm_packs_epi32(tmp0, tmp2);
     stp2_11 = _mm_packs_epi32(tmp5, tmp7);
   }
 
@@ -2281,9 +2270,9 @@ void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
     tmp2 = _mm_add_epi16(stp2_9, stp2_10);
     tmp3 = _mm_sub_epi16(stp2_9, stp2_10);
 
-    stp1_9  = _mm_unpacklo_epi64(tmp2, zero);
+    stp1_9 = _mm_unpacklo_epi64(tmp2, zero);
     stp1_10 = _mm_unpacklo_epi64(tmp3, zero);
-    stp1_8  = _mm_unpacklo_epi64(tmp0, zero);
+    stp1_8 = _mm_unpacklo_epi64(tmp0, zero);
     stp1_11 = _mm_unpacklo_epi64(tmp1, zero);
 
     stp1_13 = _mm_unpackhi_epi64(tmp3, zero);
@@ -2395,650 +2384,647 @@ void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-#define LOAD_DQCOEFF(reg, input) \
-  {  \
+#define LOAD_DQCOEFF(reg, input)  \
+  {                               \
     reg = load_input_data(input); \
-    input += 8; \
-  }  \
-
-#define IDCT32_34 \
-/* Stage1 */ \
-{ \
-  const __m128i zero = _mm_setzero_si128();\
-  const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], zero); \
-  const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], zero); \
-  \
-  const __m128i lo_25_7= _mm_unpacklo_epi16(zero, in[7]); \
-  const __m128i hi_25_7 = _mm_unpackhi_epi16(zero, in[7]); \
-  \
-  const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], zero); \
-  const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], zero); \
-  \
-  const __m128i lo_29_3 = _mm_unpacklo_epi16(zero, in[3]); \
-  const __m128i hi_29_3 = _mm_unpackhi_epi16(zero, in[3]); \
-  \
-  MULTIPLICATION_AND_ADD_2(lo_1_31, hi_1_31, stg1_0, \
-                         stg1_1, stp1_16, stp1_31); \
-  MULTIPLICATION_AND_ADD_2(lo_25_7, hi_25_7, stg1_6, \
-                         stg1_7, stp1_19, stp1_28); \
-  MULTIPLICATION_AND_ADD_2(lo_5_27, hi_5_27, stg1_8, \
-                         stg1_9, stp1_20, stp1_27); \
-  MULTIPLICATION_AND_ADD_2(lo_29_3, hi_29_3, stg1_14, \
-                         stg1_15, stp1_23, stp1_24); \
-} \
-\
-/* Stage2 */ \
-{ \
-  const __m128i zero = _mm_setzero_si128();\
-  const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], zero); \
-  const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], zero); \
-  \
-  const __m128i lo_26_6 = _mm_unpacklo_epi16(zero, in[6]); \
-  const __m128i hi_26_6 = _mm_unpackhi_epi16(zero, in[6]); \
-  \
-  MULTIPLICATION_AND_ADD_2(lo_2_30, hi_2_30, stg2_0, \
-                         stg2_1, stp2_8, stp2_15); \
-  MULTIPLICATION_AND_ADD_2(lo_26_6, hi_26_6, stg2_6, \
-                         stg2_7, stp2_11, stp2_12); \
-  \
-  stp2_16 = stp1_16; \
-  stp2_19 = stp1_19; \
-  \
-  stp2_20 = stp1_20; \
-  stp2_23 = stp1_23; \
-  \
-  stp2_24 = stp1_24; \
-  stp2_27 = stp1_27; \
-  \
-  stp2_28 = stp1_28; \
-  stp2_31 = stp1_31; \
-} \
-\
-/* Stage3 */ \
-{ \
-  const __m128i zero = _mm_setzero_si128();\
-  const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], zero); \
-  const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], zero); \
-  \
-  const __m128i lo_17_30 = _mm_unpacklo_epi16(stp1_16, stp1_31); \
-  const __m128i hi_17_30 = _mm_unpackhi_epi16(stp1_16, stp1_31); \
-  const __m128i lo_18_29 = _mm_unpacklo_epi16(stp1_19, stp1_28); \
-  const __m128i hi_18_29 = _mm_unpackhi_epi16(stp1_19, stp1_28); \
-  \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp1_20, stp1_27); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp1_20, stp1_27); \
-  const __m128i lo_22_25 = _mm_unpacklo_epi16(stp1_23, stp1_24); \
-  const __m128i hi_22_25 = _mm_unpackhi_epi16(stp1_23, stp2_24); \
-  \
-  MULTIPLICATION_AND_ADD_2(lo_4_28, hi_4_28, stg3_0, \
-                         stg3_1, stp1_4, stp1_7); \
-  \
-  stp1_8 = stp2_8; \
-  stp1_11 = stp2_11; \
-  stp1_12 = stp2_12; \
-  stp1_15 = stp2_15; \
-  \
-  MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \
-                         stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, \
-                         stp1_18, stp1_29) \
-  MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \
-                         stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, \
-                         stp1_22, stp1_25) \
-  \
-  stp1_16 = stp2_16; \
-  stp1_31 = stp2_31; \
-  stp1_19 = stp2_19; \
-  stp1_20 = stp2_20; \
-  stp1_23 = stp2_23; \
-  stp1_24 = stp2_24; \
-  stp1_27 = stp2_27; \
-  stp1_28 = stp2_28; \
-} \
-\
-/* Stage4 */ \
-{ \
-  const __m128i zero = _mm_setzero_si128();\
-  const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], zero); \
-  const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], zero); \
-  \
-  const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp2_15); \
-  const __m128i hi_9_14 = _mm_unpackhi_epi16(stp2_8, stp2_15); \
-  const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp2_12); \
-  const __m128i hi_10_13 = _mm_unpackhi_epi16(stp2_11, stp2_12); \
-  \
-  MULTIPLICATION_AND_ADD_2(lo_0_16, hi_0_16, stg4_0, \
-                         stg4_1, stp2_0, stp2_1); \
-  \
-  stp2_4 = stp1_4; \
-  stp2_5 = stp1_4; \
-  stp2_6 = stp1_7; \
-  stp2_7 = stp1_7; \
-  \
-  MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \
-                         stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, \
-                         stp2_10, stp2_13) \
-  \
-  stp2_8 = stp1_8; \
-  stp2_15 = stp1_15; \
-  stp2_11 = stp1_11; \
-  stp2_12 = stp1_12; \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \
-  stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \
-  stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \
-  stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \
-  stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \
-  stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \
-  stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \
-  stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \
-  \
-  stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \
-  stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \
-  stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \
-  stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \
-  stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \
-  stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \
-  stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \
-} \
-\
-/* Stage5 */ \
-{ \
-  const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-  const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-  const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \
-  const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \
-  \
-  const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \
-  const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \
-  const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \
-  const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \
-  \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  \
-  stp1_0 = stp2_0; \
-  stp1_1 = stp2_1; \
-  stp1_2 = stp2_1; \
-  stp1_3 = stp2_0; \
-  \
-  tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
-  tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
-  tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
-  tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
-  \
-  tmp0 = _mm_add_epi32(tmp0, rounding); \
-  tmp1 = _mm_add_epi32(tmp1, rounding); \
-  tmp2 = _mm_add_epi32(tmp2, rounding); \
-  tmp3 = _mm_add_epi32(tmp3, rounding); \
-  \
-  tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-  tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-  tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-  tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-  \
-  stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-  stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-  \
-  stp1_4 = stp2_4; \
-  stp1_7 = stp2_7; \
-  \
-  stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \
-  stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \
-  stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \
-  stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \
-  stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \
-  stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \
-  stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \
-  stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \
-  \
-  stp1_16 = stp2_16; \
-  stp1_17 = stp2_17; \
-  \
-  MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \
-                         stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, \
-                         stp1_19, stp1_28) \
-  MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \
-                         stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  \
-  stp1_22 = stp2_22; \
-  stp1_23 = stp2_23; \
-  stp1_24 = stp2_24; \
-  stp1_25 = stp2_25; \
-  stp1_30 = stp2_30; \
-  stp1_31 = stp2_31; \
-} \
-\
-/* Stage6 */ \
-{ \
-  const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-  const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-  const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
-  const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
-  \
-  stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \
-  stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
-  stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
-  stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \
-  stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \
-  stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
-  stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
-  stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \
-  \
-  stp2_8 = stp1_8; \
-  stp2_9 = stp1_9; \
-  stp2_14 = stp1_14; \
-  stp2_15 = stp1_15; \
-  \
-  MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
-                         stg6_0, stg4_0, stg6_0, stg4_0, stp2_10, \
-                         stp2_13, stp2_11, stp2_12) \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \
-  stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \
-  stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \
-  stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \
-  stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \
-  stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \
-  stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \
-  stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \
-  \
-  stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \
-  stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \
-  stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \
-  stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \
-  stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \
-  stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \
-  stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \
-} \
-\
-/* Stage7 */ \
-{ \
-  const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \
-  const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  \
-  const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \
-  const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \
-  const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \
-  const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \
-  \
-  stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \
-  stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \
-  stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \
-  stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \
-  stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \
-  stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \
-  stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \
-  stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \
-  stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \
-  stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \
-  stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \
-  stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \
-  stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \
-  stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \
-  stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \
-  stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \
-  \
-  stp1_16 = stp2_16; \
-  stp1_17 = stp2_17; \
-  stp1_18 = stp2_18; \
-  stp1_19 = stp2_19; \
-  \
-  MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \
-                         stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \
-                         stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, \
-                         stp1_23, stp1_24) \
-  \
-  stp1_28 = stp2_28; \
-  stp1_29 = stp2_29; \
-  stp1_30 = stp2_30; \
-  stp1_31 = stp2_31; \
-}
+    input += 8;                   \
+  }
 
+#define IDCT32_34                                                              \
+  /* Stage1 */                                                                 \
+  {                                                                            \
+    const __m128i zero = _mm_setzero_si128();                                  \
+    const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], zero);                   \
+    const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], zero);                   \
+                                                                               \
+    const __m128i lo_25_7 = _mm_unpacklo_epi16(zero, in[7]);                   \
+    const __m128i hi_25_7 = _mm_unpackhi_epi16(zero, in[7]);                   \
+                                                                               \
+    const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], zero);                   \
+    const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], zero);                   \
+                                                                               \
+    const __m128i lo_29_3 = _mm_unpacklo_epi16(zero, in[3]);                   \
+    const __m128i hi_29_3 = _mm_unpackhi_epi16(zero, in[3]);                   \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_1_31, hi_1_31, stg1_0, stg1_1, stp1_16,        \
+                             stp1_31);                                         \
+    MULTIPLICATION_AND_ADD_2(lo_25_7, hi_25_7, stg1_6, stg1_7, stp1_19,        \
+                             stp1_28);                                         \
+    MULTIPLICATION_AND_ADD_2(lo_5_27, hi_5_27, stg1_8, stg1_9, stp1_20,        \
+                             stp1_27);                                         \
+    MULTIPLICATION_AND_ADD_2(lo_29_3, hi_29_3, stg1_14, stg1_15, stp1_23,      \
+                             stp1_24);                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage2 */                                                                 \
+  {                                                                            \
+    const __m128i zero = _mm_setzero_si128();                                  \
+    const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], zero);                   \
+    const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], zero);                   \
+                                                                               \
+    const __m128i lo_26_6 = _mm_unpacklo_epi16(zero, in[6]);                   \
+    const __m128i hi_26_6 = _mm_unpackhi_epi16(zero, in[6]);                   \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_2_30, hi_2_30, stg2_0, stg2_1, stp2_8,         \
+                             stp2_15);                                         \
+    MULTIPLICATION_AND_ADD_2(lo_26_6, hi_26_6, stg2_6, stg2_7, stp2_11,        \
+                             stp2_12);                                         \
+                                                                               \
+    stp2_16 = stp1_16;                                                         \
+    stp2_19 = stp1_19;                                                         \
+                                                                               \
+    stp2_20 = stp1_20;                                                         \
+    stp2_23 = stp1_23;                                                         \
+                                                                               \
+    stp2_24 = stp1_24;                                                         \
+    stp2_27 = stp1_27;                                                         \
+                                                                               \
+    stp2_28 = stp1_28;                                                         \
+    stp2_31 = stp1_31;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage3 */                                                                 \
+  {                                                                            \
+    const __m128i zero = _mm_setzero_si128();                                  \
+    const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], zero);                   \
+    const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], zero);                   \
+                                                                               \
+    const __m128i lo_17_30 = _mm_unpacklo_epi16(stp1_16, stp1_31);             \
+    const __m128i hi_17_30 = _mm_unpackhi_epi16(stp1_16, stp1_31);             \
+    const __m128i lo_18_29 = _mm_unpacklo_epi16(stp1_19, stp1_28);             \
+    const __m128i hi_18_29 = _mm_unpackhi_epi16(stp1_19, stp1_28);             \
+                                                                               \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp1_20, stp1_27);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp1_20, stp1_27);             \
+    const __m128i lo_22_25 = _mm_unpacklo_epi16(stp1_23, stp1_24);             \
+    const __m128i hi_22_25 = _mm_unpackhi_epi16(stp1_23, stp2_24);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_4_28, hi_4_28, stg3_0, stg3_1, stp1_4,         \
+                             stp1_7);                                          \
+                                                                               \
+    stp1_8 = stp2_8;                                                           \
+    stp1_11 = stp2_11;                                                         \
+    stp1_12 = stp2_12;                                                         \
+    stp1_15 = stp2_15;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4,     \
+                           stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, stp1_18,  \
+                           stp1_29)                                            \
+    MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8,     \
+                           stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, stp1_22, \
+                           stp1_25)                                            \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_31 = stp2_31;                                                         \
+    stp1_19 = stp2_19;                                                         \
+    stp1_20 = stp2_20;                                                         \
+    stp1_23 = stp2_23;                                                         \
+    stp1_24 = stp2_24;                                                         \
+    stp1_27 = stp2_27;                                                         \
+    stp1_28 = stp2_28;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage4 */                                                                 \
+  {                                                                            \
+    const __m128i zero = _mm_setzero_si128();                                  \
+    const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], zero);                   \
+    const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], zero);                   \
+                                                                               \
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp2_15);               \
+    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp2_8, stp2_15);               \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp2_12);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp2_11, stp2_12);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD_2(lo_0_16, hi_0_16, stg4_0, stg4_1, stp2_0,         \
+                             stp2_1);                                          \
+                                                                               \
+    stp2_4 = stp1_4;                                                           \
+    stp2_5 = stp1_4;                                                           \
+    stp2_6 = stp1_7;                                                           \
+    stp2_7 = stp1_7;                                                           \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,       \
+                           stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, stp2_10,   \
+                           stp2_13)                                            \
+                                                                               \
+    stp2_8 = stp1_8;                                                           \
+    stp2_15 = stp1_15;                                                         \
+    stp2_11 = stp1_11;                                                         \
+    stp2_12 = stp1_12;                                                         \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_19);                                 \
+    stp2_17 = _mm_add_epi16(stp1_17, stp1_18);                                 \
+    stp2_18 = _mm_sub_epi16(stp1_17, stp1_18);                                 \
+    stp2_19 = _mm_sub_epi16(stp1_16, stp1_19);                                 \
+    stp2_20 = _mm_sub_epi16(stp1_23, stp1_20);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_22, stp1_21);                                 \
+    stp2_22 = _mm_add_epi16(stp1_22, stp1_21);                                 \
+    stp2_23 = _mm_add_epi16(stp1_23, stp1_20);                                 \
+                                                                               \
+    stp2_24 = _mm_add_epi16(stp1_24, stp1_27);                                 \
+    stp2_25 = _mm_add_epi16(stp1_25, stp1_26);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_25, stp1_26);                                 \
+    stp2_27 = _mm_sub_epi16(stp1_24, stp1_27);                                 \
+    stp2_28 = _mm_sub_epi16(stp1_31, stp1_28);                                 \
+    stp2_29 = _mm_sub_epi16(stp1_30, stp1_29);                                 \
+    stp2_30 = _mm_add_epi16(stp1_29, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_28, stp1_31);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage5 */                                                                 \
+  {                                                                            \
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);                 \
+    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);                 \
+    const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);             \
+    const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);             \
+                                                                               \
+    const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28);             \
+    const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28);             \
+    const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);             \
+    const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);             \
+                                                                               \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+                                                                               \
+    stp1_0 = stp2_0;                                                           \
+    stp1_1 = stp2_1;                                                           \
+    stp1_2 = stp2_1;                                                           \
+    stp1_3 = stp2_0;                                                           \
+                                                                               \
+    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);                                     \
+    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);                                     \
+    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);                                     \
+    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);                                     \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+                                                                               \
+    stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                      \
+    stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                      \
+                                                                               \
+    stp1_4 = stp2_4;                                                           \
+    stp1_7 = stp2_7;                                                           \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp2_8, stp2_11);                                   \
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);                                   \
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp2_8, stp2_11);                                  \
+    stp1_12 = _mm_sub_epi16(stp2_15, stp2_12);                                 \
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);                                 \
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);                                 \
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_12);                                 \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_17 = stp2_17;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4,     \
+                           stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, stp1_19,  \
+                           stp1_28)                                            \
+    MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6,     \
+                           stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, stp1_21,  \
+                           stp1_26)                                            \
+                                                                               \
+    stp1_22 = stp2_22;                                                         \
+    stp1_23 = stp2_23;                                                         \
+    stp1_24 = stp2_24;                                                         \
+    stp1_25 = stp2_25;                                                         \
+    stp1_30 = stp2_30;                                                         \
+    stp1_31 = stp2_31;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage6 */                                                                 \
+  {                                                                            \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);             \
+    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);             \
+                                                                               \
+    stp2_0 = _mm_add_epi16(stp1_0, stp1_7);                                    \
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6);                                    \
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5);                                    \
+    stp2_3 = _mm_add_epi16(stp1_3, stp1_4);                                    \
+    stp2_4 = _mm_sub_epi16(stp1_3, stp1_4);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);                                    \
+    stp2_7 = _mm_sub_epi16(stp1_0, stp1_7);                                    \
+                                                                               \
+    stp2_8 = stp1_8;                                                           \
+    stp2_9 = stp1_9;                                                           \
+    stp2_14 = stp1_14;                                                         \
+    stp2_15 = stp1_15;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_23);                                 \
+    stp2_17 = _mm_add_epi16(stp1_17, stp1_22);                                 \
+    stp2_18 = _mm_add_epi16(stp1_18, stp1_21);                                 \
+    stp2_19 = _mm_add_epi16(stp1_19, stp1_20);                                 \
+    stp2_20 = _mm_sub_epi16(stp1_19, stp1_20);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_18, stp1_21);                                 \
+    stp2_22 = _mm_sub_epi16(stp1_17, stp1_22);                                 \
+    stp2_23 = _mm_sub_epi16(stp1_16, stp1_23);                                 \
+                                                                               \
+    stp2_24 = _mm_sub_epi16(stp1_31, stp1_24);                                 \
+    stp2_25 = _mm_sub_epi16(stp1_30, stp1_25);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_29, stp1_26);                                 \
+    stp2_27 = _mm_sub_epi16(stp1_28, stp1_27);                                 \
+    stp2_28 = _mm_add_epi16(stp1_27, stp1_28);                                 \
+    stp2_29 = _mm_add_epi16(stp1_26, stp1_29);                                 \
+    stp2_30 = _mm_add_epi16(stp1_25, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_24, stp1_31);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage7 */                                                                 \
+  {                                                                            \
+    const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);             \
+    const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);             \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+                                                                               \
+    const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);             \
+    const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);             \
+    const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24);             \
+    const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24);             \
+                                                                               \
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_15);                                   \
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_14);                                   \
+    stp1_2 = _mm_add_epi16(stp2_2, stp2_13);                                   \
+    stp1_3 = _mm_add_epi16(stp2_3, stp2_12);                                   \
+    stp1_4 = _mm_add_epi16(stp2_4, stp2_11);                                   \
+    stp1_5 = _mm_add_epi16(stp2_5, stp2_10);                                   \
+    stp1_6 = _mm_add_epi16(stp2_6, stp2_9);                                    \
+    stp1_7 = _mm_add_epi16(stp2_7, stp2_8);                                    \
+    stp1_8 = _mm_sub_epi16(stp2_7, stp2_8);                                    \
+    stp1_9 = _mm_sub_epi16(stp2_6, stp2_9);                                    \
+    stp1_10 = _mm_sub_epi16(stp2_5, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp2_4, stp2_11);                                  \
+    stp1_12 = _mm_sub_epi16(stp2_3, stp2_12);                                  \
+    stp1_13 = _mm_sub_epi16(stp2_2, stp2_13);                                  \
+    stp1_14 = _mm_sub_epi16(stp2_1, stp2_14);                                  \
+    stp1_15 = _mm_sub_epi16(stp2_0, stp2_15);                                  \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_17 = stp2_17;                                                         \
+    stp1_18 = stp2_18;                                                         \
+    stp1_19 = stp2_19;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, stp1_21,  \
+                           stp1_26)                                            \
+    MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, stp1_23,  \
+                           stp1_24)                                            \
+                                                                               \
+    stp1_28 = stp2_28;                                                         \
+    stp1_29 = stp2_29;                                                         \
+    stp1_30 = stp2_30;                                                         \
+    stp1_31 = stp2_31;                                                         \
+  }
 
-#define IDCT32 \
-/* Stage1 */ \
-{ \
-  const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], in[31]); \
-  const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], in[31]); \
-  const __m128i lo_17_15 = _mm_unpacklo_epi16(in[17], in[15]); \
-  const __m128i hi_17_15 = _mm_unpackhi_epi16(in[17], in[15]); \
-  \
-  const __m128i lo_9_23 = _mm_unpacklo_epi16(in[9], in[23]); \
-  const __m128i hi_9_23 = _mm_unpackhi_epi16(in[9], in[23]); \
-  const __m128i lo_25_7= _mm_unpacklo_epi16(in[25], in[7]); \
-  const __m128i hi_25_7 = _mm_unpackhi_epi16(in[25], in[7]); \
-  \
-  const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], in[27]); \
-  const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], in[27]); \
-  const __m128i lo_21_11 = _mm_unpacklo_epi16(in[21], in[11]); \
-  const __m128i hi_21_11 = _mm_unpackhi_epi16(in[21], in[11]); \
-  \
-  const __m128i lo_13_19 = _mm_unpacklo_epi16(in[13], in[19]); \
-  const __m128i hi_13_19 = _mm_unpackhi_epi16(in[13], in[19]); \
-  const __m128i lo_29_3 = _mm_unpacklo_epi16(in[29], in[3]); \
-  const __m128i hi_29_3 = _mm_unpackhi_epi16(in[29], in[3]); \
-  \
-  MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0, \
-                         stg1_1, stg1_2, stg1_3, stp1_16, stp1_31, \
-                         stp1_17, stp1_30) \
-  MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4, \
-                         stg1_5, stg1_6, stg1_7, stp1_18, stp1_29, \
-                         stp1_19, stp1_28) \
-  MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8, \
-                         stg1_9, stg1_10, stg1_11, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12, \
-                         stg1_13, stg1_14, stg1_15, stp1_22, stp1_25, \
-                         stp1_23, stp1_24) \
-} \
-\
-/* Stage2 */ \
-{ \
-  const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], in[30]); \
-  const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], in[30]); \
-  const __m128i lo_18_14 = _mm_unpacklo_epi16(in[18], in[14]); \
-  const __m128i hi_18_14 = _mm_unpackhi_epi16(in[18], in[14]); \
-  \
-  const __m128i lo_10_22 = _mm_unpacklo_epi16(in[10], in[22]); \
-  const __m128i hi_10_22 = _mm_unpackhi_epi16(in[10], in[22]); \
-  const __m128i lo_26_6 = _mm_unpacklo_epi16(in[26], in[6]); \
-  const __m128i hi_26_6 = _mm_unpackhi_epi16(in[26], in[6]); \
-  \
-  MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0, \
-                         stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9, \
-                         stp2_14) \
-  MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4, \
-                         stg2_5, stg2_6, stg2_7, stp2_10, stp2_13, \
-                         stp2_11, stp2_12) \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_17); \
-  stp2_17 = _mm_sub_epi16(stp1_16, stp1_17); \
-  stp2_18 = _mm_sub_epi16(stp1_19, stp1_18); \
-  stp2_19 = _mm_add_epi16(stp1_19, stp1_18); \
-  \
-  stp2_20 = _mm_add_epi16(stp1_20, stp1_21); \
-  stp2_21 = _mm_sub_epi16(stp1_20, stp1_21); \
-  stp2_22 = _mm_sub_epi16(stp1_23, stp1_22); \
-  stp2_23 = _mm_add_epi16(stp1_23, stp1_22); \
-  \
-  stp2_24 = _mm_add_epi16(stp1_24, stp1_25); \
-  stp2_25 = _mm_sub_epi16(stp1_24, stp1_25); \
-  stp2_26 = _mm_sub_epi16(stp1_27, stp1_26); \
-  stp2_27 = _mm_add_epi16(stp1_27, stp1_26); \
-  \
-  stp2_28 = _mm_add_epi16(stp1_28, stp1_29); \
-  stp2_29 = _mm_sub_epi16(stp1_28, stp1_29); \
-  stp2_30 = _mm_sub_epi16(stp1_31, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_31, stp1_30); \
-} \
-\
-/* Stage3 */ \
-{ \
-  const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], in[28]); \
-  const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], in[28]); \
-  const __m128i lo_20_12 = _mm_unpacklo_epi16(in[20], in[12]); \
-  const __m128i hi_20_12 = _mm_unpackhi_epi16(in[20], in[12]); \
-  \
-  const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30); \
-  const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30); \
-  const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \
-  const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \
-  \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \
-  const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \
-  \
-  MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0, \
-                         stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5, \
-                         stp1_6) \
-  \
-  stp1_8 = _mm_add_epi16(stp2_8, stp2_9); \
-  stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \
-  stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \
-  stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \
-  stp1_12 = _mm_add_epi16(stp2_12, stp2_13); \
-  stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \
-  stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \
-  stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \
-  \
-  MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \
-                         stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, \
-                         stp1_18, stp1_29) \
-  MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \
-                         stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, \
-                         stp1_22, stp1_25) \
-  \
-  stp1_16 = stp2_16; \
-  stp1_31 = stp2_31; \
-  stp1_19 = stp2_19; \
-  stp1_20 = stp2_20; \
-  stp1_23 = stp2_23; \
-  stp1_24 = stp2_24; \
-  stp1_27 = stp2_27; \
-  stp1_28 = stp2_28; \
-} \
-\
-/* Stage4 */ \
-{ \
-  const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], in[16]); \
-  const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], in[16]); \
-  const __m128i lo_8_24 = _mm_unpacklo_epi16(in[8], in[24]); \
-  const __m128i hi_8_24 = _mm_unpackhi_epi16(in[8], in[24]); \
-  \
-  const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
-  const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
-  const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-  const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-  \
-  MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0, \
-                         stg4_1, stg4_2, stg4_3, stp2_0, stp2_1, \
-                         stp2_2, stp2_3) \
-  \
-  stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \
-  stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \
-  stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \
-  stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \
-  \
-  MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \
-                         stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, \
-                         stp2_10, stp2_13) \
-  \
-  stp2_8 = stp1_8; \
-  stp2_15 = stp1_15; \
-  stp2_11 = stp1_11; \
-  stp2_12 = stp1_12; \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \
-  stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \
-  stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \
-  stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \
-  stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \
-  stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \
-  stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \
-  stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \
-  \
-  stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \
-  stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \
-  stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \
-  stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \
-  stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \
-  stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \
-  stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \
-} \
-\
-/* Stage5 */ \
-{ \
-  const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
-  const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
-  const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \
-  const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \
-  \
-  const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \
-  const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \
-  const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \
-  const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \
-  \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  \
-  stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \
-  stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \
-  stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \
-  stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \
-  \
-  tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
-  tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
-  tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
-  tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
-  \
-  tmp0 = _mm_add_epi32(tmp0, rounding); \
-  tmp1 = _mm_add_epi32(tmp1, rounding); \
-  tmp2 = _mm_add_epi32(tmp2, rounding); \
-  tmp3 = _mm_add_epi32(tmp3, rounding); \
-  \
-  tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
-  tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
-  tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
-  tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
-  \
-  stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
-  stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
-  \
-  stp1_4 = stp2_4; \
-  stp1_7 = stp2_7; \
-  \
-  stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \
-  stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \
-  stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \
-  stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \
-  stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \
-  stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \
-  stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \
-  stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \
-  \
-  stp1_16 = stp2_16; \
-  stp1_17 = stp2_17; \
-  \
-  MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \
-                         stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, \
-                         stp1_19, stp1_28) \
-  MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \
-                         stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  \
-  stp1_22 = stp2_22; \
-  stp1_23 = stp2_23; \
-  stp1_24 = stp2_24; \
-  stp1_25 = stp2_25; \
-  stp1_30 = stp2_30; \
-  stp1_31 = stp2_31; \
-} \
-\
-/* Stage6 */ \
-{ \
-  const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
-  const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
-  const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
-  const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
-  \
-  stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \
-  stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
-  stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
-  stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \
-  stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \
-  stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
-  stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
-  stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \
-  \
-  stp2_8 = stp1_8; \
-  stp2_9 = stp1_9; \
-  stp2_14 = stp1_14; \
-  stp2_15 = stp1_15; \
-  \
-  MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
-                         stg6_0, stg4_0, stg6_0, stg4_0, stp2_10, \
-                         stp2_13, stp2_11, stp2_12) \
-  \
-  stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \
-  stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \
-  stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \
-  stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \
-  stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \
-  stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \
-  stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \
-  stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \
-  \
-  stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \
-  stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \
-  stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \
-  stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \
-  stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \
-  stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \
-  stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \
-  stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \
-} \
-\
-/* Stage7 */ \
-{ \
-  const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \
-  const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \
-  const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \
-  const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \
-  \
-  const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \
-  const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \
-  const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \
-  const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \
-  \
-  stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \
-  stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \
-  stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \
-  stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \
-  stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \
-  stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \
-  stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \
-  stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \
-  stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \
-  stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \
-  stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \
-  stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \
-  stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \
-  stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \
-  stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \
-  stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \
-  \
-  stp1_16 = stp2_16; \
-  stp1_17 = stp2_17; \
-  stp1_18 = stp2_18; \
-  stp1_19 = stp2_19; \
-  \
-  MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \
-                         stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, \
-                         stp1_21, stp1_26) \
-  MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \
-                         stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, \
-                         stp1_23, stp1_24) \
-  \
-  stp1_28 = stp2_28; \
-  stp1_29 = stp2_29; \
-  stp1_30 = stp2_30; \
-  stp1_31 = stp2_31; \
-}
+#define IDCT32                                                                 \
+  /* Stage1 */                                                                 \
+  {                                                                            \
+    const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], in[31]);                 \
+    const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], in[31]);                 \
+    const __m128i lo_17_15 = _mm_unpacklo_epi16(in[17], in[15]);               \
+    const __m128i hi_17_15 = _mm_unpackhi_epi16(in[17], in[15]);               \
+                                                                               \
+    const __m128i lo_9_23 = _mm_unpacklo_epi16(in[9], in[23]);                 \
+    const __m128i hi_9_23 = _mm_unpackhi_epi16(in[9], in[23]);                 \
+    const __m128i lo_25_7 = _mm_unpacklo_epi16(in[25], in[7]);                 \
+    const __m128i hi_25_7 = _mm_unpackhi_epi16(in[25], in[7]);                 \
+                                                                               \
+    const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], in[27]);                 \
+    const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], in[27]);                 \
+    const __m128i lo_21_11 = _mm_unpacklo_epi16(in[21], in[11]);               \
+    const __m128i hi_21_11 = _mm_unpackhi_epi16(in[21], in[11]);               \
+                                                                               \
+    const __m128i lo_13_19 = _mm_unpacklo_epi16(in[13], in[19]);               \
+    const __m128i hi_13_19 = _mm_unpackhi_epi16(in[13], in[19]);               \
+    const __m128i lo_29_3 = _mm_unpacklo_epi16(in[29], in[3]);                 \
+    const __m128i hi_29_3 = _mm_unpackhi_epi16(in[29], in[3]);                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0,       \
+                           stg1_1, stg1_2, stg1_3, stp1_16, stp1_31, stp1_17,  \
+                           stp1_30)                                            \
+    MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4, stg1_5, \
+                           stg1_6, stg1_7, stp1_18, stp1_29, stp1_19, stp1_28) \
+    MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8,       \
+                           stg1_9, stg1_10, stg1_11, stp1_20, stp1_27,         \
+                           stp1_21, stp1_26)                                   \
+    MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12,      \
+                           stg1_13, stg1_14, stg1_15, stp1_22, stp1_25,        \
+                           stp1_23, stp1_24)                                   \
+  }                                                                            \
+                                                                               \
+  /* Stage2 */                                                                 \
+  {                                                                            \
+    const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], in[30]);                 \
+    const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], in[30]);                 \
+    const __m128i lo_18_14 = _mm_unpacklo_epi16(in[18], in[14]);               \
+    const __m128i hi_18_14 = _mm_unpackhi_epi16(in[18], in[14]);               \
+                                                                               \
+    const __m128i lo_10_22 = _mm_unpacklo_epi16(in[10], in[22]);               \
+    const __m128i hi_10_22 = _mm_unpackhi_epi16(in[10], in[22]);               \
+    const __m128i lo_26_6 = _mm_unpacklo_epi16(in[26], in[6]);                 \
+    const __m128i hi_26_6 = _mm_unpackhi_epi16(in[26], in[6]);                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0,       \
+                           stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9,    \
+                           stp2_14)                                            \
+    MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4,       \
+                           stg2_5, stg2_6, stg2_7, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_17);                                 \
+    stp2_17 = _mm_sub_epi16(stp1_16, stp1_17);                                 \
+    stp2_18 = _mm_sub_epi16(stp1_19, stp1_18);                                 \
+    stp2_19 = _mm_add_epi16(stp1_19, stp1_18);                                 \
+                                                                               \
+    stp2_20 = _mm_add_epi16(stp1_20, stp1_21);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_20, stp1_21);                                 \
+    stp2_22 = _mm_sub_epi16(stp1_23, stp1_22);                                 \
+    stp2_23 = _mm_add_epi16(stp1_23, stp1_22);                                 \
+                                                                               \
+    stp2_24 = _mm_add_epi16(stp1_24, stp1_25);                                 \
+    stp2_25 = _mm_sub_epi16(stp1_24, stp1_25);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_27, stp1_26);                                 \
+    stp2_27 = _mm_add_epi16(stp1_27, stp1_26);                                 \
+                                                                               \
+    stp2_28 = _mm_add_epi16(stp1_28, stp1_29);                                 \
+    stp2_29 = _mm_sub_epi16(stp1_28, stp1_29);                                 \
+    stp2_30 = _mm_sub_epi16(stp1_31, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_31, stp1_30);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage3 */                                                                 \
+  {                                                                            \
+    const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], in[28]);                 \
+    const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], in[28]);                 \
+    const __m128i lo_20_12 = _mm_unpacklo_epi16(in[20], in[12]);               \
+    const __m128i hi_20_12 = _mm_unpackhi_epi16(in[20], in[12]);               \
+                                                                               \
+    const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30);             \
+    const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30);             \
+    const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);             \
+    const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);             \
+                                                                               \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+    const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);             \
+    const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0,       \
+                           stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5,     \
+                           stp1_6)                                             \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp2_8, stp2_9);                                    \
+    stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);                                    \
+    stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);                                 \
+    stp1_11 = _mm_add_epi16(stp2_11, stp2_10);                                 \
+    stp1_12 = _mm_add_epi16(stp2_12, stp2_13);                                 \
+    stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);                                 \
+    stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);                                 \
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_14);                                 \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4,     \
+                           stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, stp1_18,  \
+                           stp1_29)                                            \
+    MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8,     \
+                           stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, stp1_22, \
+                           stp1_25)                                            \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_31 = stp2_31;                                                         \
+    stp1_19 = stp2_19;                                                         \
+    stp1_20 = stp2_20;                                                         \
+    stp1_23 = stp2_23;                                                         \
+    stp1_24 = stp2_24;                                                         \
+    stp1_27 = stp2_27;                                                         \
+    stp1_28 = stp2_28;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage4 */                                                                 \
+  {                                                                            \
+    const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], in[16]);                 \
+    const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], in[16]);                 \
+    const __m128i lo_8_24 = _mm_unpacklo_epi16(in[8], in[24]);                 \
+    const __m128i hi_8_24 = _mm_unpackhi_epi16(in[8], in[24]);                 \
+                                                                               \
+    const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);               \
+    const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);               \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0, stg4_1, \
+                           stg4_2, stg4_3, stp2_0, stp2_1, stp2_2, stp2_3)     \
+                                                                               \
+    stp2_4 = _mm_add_epi16(stp1_4, stp1_5);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);                                    \
+    stp2_7 = _mm_add_epi16(stp1_7, stp1_6);                                    \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,       \
+                           stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, stp2_10,   \
+                           stp2_13)                                            \
+                                                                               \
+    stp2_8 = stp1_8;                                                           \
+    stp2_15 = stp1_15;                                                         \
+    stp2_11 = stp1_11;                                                         \
+    stp2_12 = stp1_12;                                                         \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_19);                                 \
+    stp2_17 = _mm_add_epi16(stp1_17, stp1_18);                                 \
+    stp2_18 = _mm_sub_epi16(stp1_17, stp1_18);                                 \
+    stp2_19 = _mm_sub_epi16(stp1_16, stp1_19);                                 \
+    stp2_20 = _mm_sub_epi16(stp1_23, stp1_20);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_22, stp1_21);                                 \
+    stp2_22 = _mm_add_epi16(stp1_22, stp1_21);                                 \
+    stp2_23 = _mm_add_epi16(stp1_23, stp1_20);                                 \
+                                                                               \
+    stp2_24 = _mm_add_epi16(stp1_24, stp1_27);                                 \
+    stp2_25 = _mm_add_epi16(stp1_25, stp1_26);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_25, stp1_26);                                 \
+    stp2_27 = _mm_sub_epi16(stp1_24, stp1_27);                                 \
+    stp2_28 = _mm_sub_epi16(stp1_31, stp1_28);                                 \
+    stp2_29 = _mm_sub_epi16(stp1_30, stp1_29);                                 \
+    stp2_30 = _mm_add_epi16(stp1_29, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_28, stp1_31);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage5 */                                                                 \
+  {                                                                            \
+    const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);                 \
+    const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);                 \
+    const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);             \
+    const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);             \
+                                                                               \
+    const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28);             \
+    const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28);             \
+    const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);             \
+    const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);             \
+                                                                               \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+                                                                               \
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_3);                                    \
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_2);                                    \
+    stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);                                    \
+    stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);                                    \
+                                                                               \
+    tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);                                     \
+    tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);                                     \
+    tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);                                     \
+    tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);                                     \
+                                                                               \
+    tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
+    tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
+    tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
+    tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
+                                                                               \
+    tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
+    tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
+    tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
+    tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
+                                                                               \
+    stp1_5 = _mm_packs_epi32(tmp0, tmp1);                                      \
+    stp1_6 = _mm_packs_epi32(tmp2, tmp3);                                      \
+                                                                               \
+    stp1_4 = stp2_4;                                                           \
+    stp1_7 = stp2_7;                                                           \
+                                                                               \
+    stp1_8 = _mm_add_epi16(stp2_8, stp2_11);                                   \
+    stp1_9 = _mm_add_epi16(stp2_9, stp2_10);                                   \
+    stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp2_8, stp2_11);                                  \
+    stp1_12 = _mm_sub_epi16(stp2_15, stp2_12);                                 \
+    stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);                                 \
+    stp1_14 = _mm_add_epi16(stp2_14, stp2_13);                                 \
+    stp1_15 = _mm_add_epi16(stp2_15, stp2_12);                                 \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_17 = stp2_17;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4,     \
+                           stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, stp1_19,  \
+                           stp1_28)                                            \
+    MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6,     \
+                           stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, stp1_21,  \
+                           stp1_26)                                            \
+                                                                               \
+    stp1_22 = stp2_22;                                                         \
+    stp1_23 = stp2_23;                                                         \
+    stp1_24 = stp2_24;                                                         \
+    stp1_25 = stp2_25;                                                         \
+    stp1_30 = stp2_30;                                                         \
+    stp1_31 = stp2_31;                                                         \
+  }                                                                            \
+                                                                               \
+  /* Stage6 */                                                                 \
+  {                                                                            \
+    const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);             \
+    const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);             \
+    const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);             \
+    const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);             \
+                                                                               \
+    stp2_0 = _mm_add_epi16(stp1_0, stp1_7);                                    \
+    stp2_1 = _mm_add_epi16(stp1_1, stp1_6);                                    \
+    stp2_2 = _mm_add_epi16(stp1_2, stp1_5);                                    \
+    stp2_3 = _mm_add_epi16(stp1_3, stp1_4);                                    \
+    stp2_4 = _mm_sub_epi16(stp1_3, stp1_4);                                    \
+    stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);                                    \
+    stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);                                    \
+    stp2_7 = _mm_sub_epi16(stp1_0, stp1_7);                                    \
+                                                                               \
+    stp2_8 = stp1_8;                                                           \
+    stp2_9 = stp1_9;                                                           \
+    stp2_14 = stp1_14;                                                         \
+    stp2_15 = stp1_15;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11,  \
+                           stp2_12)                                            \
+                                                                               \
+    stp2_16 = _mm_add_epi16(stp1_16, stp1_23);                                 \
+    stp2_17 = _mm_add_epi16(stp1_17, stp1_22);                                 \
+    stp2_18 = _mm_add_epi16(stp1_18, stp1_21);                                 \
+    stp2_19 = _mm_add_epi16(stp1_19, stp1_20);                                 \
+    stp2_20 = _mm_sub_epi16(stp1_19, stp1_20);                                 \
+    stp2_21 = _mm_sub_epi16(stp1_18, stp1_21);                                 \
+    stp2_22 = _mm_sub_epi16(stp1_17, stp1_22);                                 \
+    stp2_23 = _mm_sub_epi16(stp1_16, stp1_23);                                 \
+                                                                               \
+    stp2_24 = _mm_sub_epi16(stp1_31, stp1_24);                                 \
+    stp2_25 = _mm_sub_epi16(stp1_30, stp1_25);                                 \
+    stp2_26 = _mm_sub_epi16(stp1_29, stp1_26);                                 \
+    stp2_27 = _mm_sub_epi16(stp1_28, stp1_27);                                 \
+    stp2_28 = _mm_add_epi16(stp1_27, stp1_28);                                 \
+    stp2_29 = _mm_add_epi16(stp1_26, stp1_29);                                 \
+    stp2_30 = _mm_add_epi16(stp1_25, stp1_30);                                 \
+    stp2_31 = _mm_add_epi16(stp1_24, stp1_31);                                 \
+  }                                                                            \
+                                                                               \
+  /* Stage7 */                                                                 \
+  {                                                                            \
+    const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);             \
+    const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);             \
+    const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);             \
+    const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);             \
+                                                                               \
+    const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);             \
+    const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);             \
+    const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24);             \
+    const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24);             \
+                                                                               \
+    stp1_0 = _mm_add_epi16(stp2_0, stp2_15);                                   \
+    stp1_1 = _mm_add_epi16(stp2_1, stp2_14);                                   \
+    stp1_2 = _mm_add_epi16(stp2_2, stp2_13);                                   \
+    stp1_3 = _mm_add_epi16(stp2_3, stp2_12);                                   \
+    stp1_4 = _mm_add_epi16(stp2_4, stp2_11);                                   \
+    stp1_5 = _mm_add_epi16(stp2_5, stp2_10);                                   \
+    stp1_6 = _mm_add_epi16(stp2_6, stp2_9);                                    \
+    stp1_7 = _mm_add_epi16(stp2_7, stp2_8);                                    \
+    stp1_8 = _mm_sub_epi16(stp2_7, stp2_8);                                    \
+    stp1_9 = _mm_sub_epi16(stp2_6, stp2_9);                                    \
+    stp1_10 = _mm_sub_epi16(stp2_5, stp2_10);                                  \
+    stp1_11 = _mm_sub_epi16(stp2_4, stp2_11);                                  \
+    stp1_12 = _mm_sub_epi16(stp2_3, stp2_12);                                  \
+    stp1_13 = _mm_sub_epi16(stp2_2, stp2_13);                                  \
+    stp1_14 = _mm_sub_epi16(stp2_1, stp2_14);                                  \
+    stp1_15 = _mm_sub_epi16(stp2_0, stp2_15);                                  \
+                                                                               \
+    stp1_16 = stp2_16;                                                         \
+    stp1_17 = stp2_17;                                                         \
+    stp1_18 = stp2_18;                                                         \
+    stp1_19 = stp2_19;                                                         \
+                                                                               \
+    MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, stp1_21,  \
+                           stp1_26)                                            \
+    MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0,     \
+                           stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, stp1_23,  \
+                           stp1_24)                                            \
+                                                                               \
+    stp1_28 = stp2_28;                                                         \
+    stp1_29 = stp2_29;                                                         \
+    stp1_30 = stp2_30;                                                         \
+    stp1_31 = stp2_31;                                                         \
+  }
 
 // Only upper-left 8x8 has non-zero coeff
 void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
                                int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
-  const __m128i final_rounding = _mm_set1_epi16(1<<5);
+  const __m128i final_rounding = _mm_set1_epi16(1 << 5);
 
   // idct constants for each stage
   const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
@@ -3074,15 +3060,13 @@ void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
 
   __m128i in[32], col[32];
   __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
-          stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
-          stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22,
-          stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29,
-          stp1_30, stp1_31;
+      stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+      stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, stp1_23,
+      stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, stp1_30, stp1_31;
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
-          stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
-          stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22,
-          stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29,
-          stp2_30, stp2_31;
+      stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
+      stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, stp2_23,
+      stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, stp2_30, stp2_31;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
   int i;
 
@@ -3250,15 +3234,13 @@ void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,
 
   __m128i in[32], col[128], zero_idx[16];
   __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
-          stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
-          stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22,
-          stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29,
-          stp1_30, stp1_31;
+      stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+      stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, stp1_23,
+      stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, stp1_30, stp1_31;
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
-          stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
-          stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22,
-          stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29,
-          stp2_30, stp2_31;
+      stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
+      stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, stp2_23,
+      stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, stp2_30, stp2_31;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
   int i, j, i32;
 
@@ -3483,8 +3465,8 @@ void vpx_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest,
   dc_value = _mm_set1_epi16(a);
 
   for (j = 0; j < 32; ++j) {
-    RECON_AND_STORE(dest +  0 + j * stride, dc_value);
-    RECON_AND_STORE(dest +  8 + j * stride, dc_value);
+    RECON_AND_STORE(dest + 0 + j * stride, dc_value);
+    RECON_AND_STORE(dest + 8 + j * stride, dc_value);
     RECON_AND_STORE(dest + 16 + j * stride, dc_value);
     RECON_AND_STORE(dest + 24 + j * stride, dc_value);
   }
@@ -3609,8 +3591,7 @@ void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
     tran_low_t temp_in[4], temp_out[4];
     // Columns
     for (i = 0; i < 4; ++i) {
-      for (j = 0; j < 4; ++j)
-        temp_in[j] = out[j * 4 + i];
+      for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
       vpx_highbd_idct4_c(temp_in, temp_out, bd);
       for (j = 0; j < 4; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
@@ -3699,19 +3680,18 @@ void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
       __m128i d[8];
       for (i = 0; i < 8; i++) {
         inptr[i] = _mm_add_epi16(inptr[i], sixteen);
-        d[i] = _mm_loadu_si128((const __m128i *)(dest + stride*i));
+        d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
         inptr[i] = _mm_srai_epi16(inptr[i], 5);
         d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd);
         // Store
-        _mm_storeu_si128((__m128i *)(dest + stride*i), d[i]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]);
       }
     }
   } else {
     // Run the un-optimised column transform
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
-      for (j = 0; j < 8; ++j)
-        temp_in[j] = out[j * 8 + i];
+      for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
       vpx_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
@@ -3803,19 +3783,18 @@ void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
       __m128i d[8];
       for (i = 0; i < 8; i++) {
         inptr[i] = _mm_add_epi16(inptr[i], sixteen);
-        d[i] = _mm_loadu_si128((const __m128i *)(dest + stride*i));
+        d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
         inptr[i] = _mm_srai_epi16(inptr[i], 5);
         d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd);
         // Store
-        _mm_storeu_si128((__m128i *)(dest + stride*i), d[i]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]);
       }
     }
   } else {
     // Run the un-optimised column transform
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
-      for (j = 0; j < 8; ++j)
-        temp_in[j] = out[j * 8 + i];
+      for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
       vpx_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
@@ -3911,25 +3890,24 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
     {
       __m128i d[2];
       for (i = 0; i < 16; i++) {
-        inptr[i   ] = _mm_add_epi16(inptr[i   ], rounding);
-        inptr[i+16] = _mm_add_epi16(inptr[i+16], rounding);
-        d[0] = _mm_loadu_si128((const __m128i *)(dest + stride*i));
-        d[1] = _mm_loadu_si128((const __m128i *)(dest + stride*i + 8));
-        inptr[i   ] = _mm_srai_epi16(inptr[i   ], 6);
-        inptr[i+16] = _mm_srai_epi16(inptr[i+16], 6);
-        d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i   ]), bd);
-        d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i+16]), bd);
+        inptr[i] = _mm_add_epi16(inptr[i], rounding);
+        inptr[i + 16] = _mm_add_epi16(inptr[i + 16], rounding);
+        d[0] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
+        d[1] = _mm_loadu_si128((const __m128i *)(dest + stride * i + 8));
+        inptr[i] = _mm_srai_epi16(inptr[i], 6);
+        inptr[i + 16] = _mm_srai_epi16(inptr[i + 16], 6);
+        d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i]), bd);
+        d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i + 16]), bd);
         // Store
-        _mm_storeu_si128((__m128i *)(dest + stride*i), d[0]);
-        _mm_storeu_si128((__m128i *)(dest + stride*i + 8), d[1]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i), d[0]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i + 8), d[1]);
       }
     }
   } else {
     // Run the un-optimised column transform
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
-      for (j = 0; j < 16; ++j)
-        temp_in[j] = out[j * 16 + i];
+      for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
       vpx_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
@@ -4030,25 +4008,24 @@ void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
     {
       __m128i d[2];
       for (i = 0; i < 16; i++) {
-        inptr[i   ] = _mm_add_epi16(inptr[i   ], rounding);
-        inptr[i+16] = _mm_add_epi16(inptr[i+16], rounding);
-        d[0] = _mm_loadu_si128((const __m128i *)(dest + stride*i));
-        d[1] = _mm_loadu_si128((const __m128i *)(dest + stride*i + 8));
-        inptr[i   ] = _mm_srai_epi16(inptr[i   ], 6);
-        inptr[i+16] = _mm_srai_epi16(inptr[i+16], 6);
-        d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i   ]), bd);
-        d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i+16]), bd);
+        inptr[i] = _mm_add_epi16(inptr[i], rounding);
+        inptr[i + 16] = _mm_add_epi16(inptr[i + 16], rounding);
+        d[0] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
+        d[1] = _mm_loadu_si128((const __m128i *)(dest + stride * i + 8));
+        inptr[i] = _mm_srai_epi16(inptr[i], 6);
+        inptr[i + 16] = _mm_srai_epi16(inptr[i + 16], 6);
+        d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i]), bd);
+        d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i + 16]), bd);
         // Store
-        _mm_storeu_si128((__m128i *)(dest + stride*i), d[0]);
-        _mm_storeu_si128((__m128i *)(dest + stride*i + 8), d[1]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i), d[0]);
+        _mm_storeu_si128((__m128i *)(dest + stride * i + 8), d[1]);
       }
     }
   } else {
     // Run the un-optimised column transform
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
-      for (j = 0; j < 16; ++j)
-        temp_in[j] = out[j * 16 + i];
+      for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
       vpx_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
diff --git a/vpx_dsp/x86/inv_txfm_sse2.h b/vpx_dsp/x86/inv_txfm_sse2.h
index b9f39839ade25b05db49375423fe20905e99cfa0..4ce25049a0b85277455ab4ac40427fe8b12ba9d2 100644
--- a/vpx_dsp/x86/inv_txfm_sse2.h
+++ b/vpx_dsp/x86/inv_txfm_sse2.h
@@ -47,16 +47,16 @@ static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
   res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
 }
 
-#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \
+#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1)   \
   {                                                     \
     const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
     const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
                                                         \
-    in0 = _mm_unpacklo_epi32(tr0_0, tr0_1);  /* i1 i0 */  \
-    in1 = _mm_unpackhi_epi32(tr0_0, tr0_1);  /* i3 i2 */  \
+    in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \
+    in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \
   }
 
-static INLINE void array_transpose_4X8(__m128i *in, __m128i * out) {
+static INLINE void array_transpose_4X8(__m128i *in, __m128i *out) {
   const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
   const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
   const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
@@ -95,43 +95,43 @@ static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
 static INLINE __m128i load_input_data(const tran_low_t *data) {
 #if CONFIG_VPX_HIGHBITDEPTH
   return octa_set_epi16(data[0], data[1], data[2], data[3], data[4], data[5],
-      data[6], data[7]);
+                        data[6], data[7]);
 #else
   return _mm_load_si128((const __m128i *)data);
 #endif
 }
 
 static INLINE void load_buffer_8x16(const tran_low_t *input, __m128i *in) {
-  in[0]  = load_input_data(input + 0 * 16);
-  in[1]  = load_input_data(input + 1 * 16);
-  in[2]  = load_input_data(input + 2 * 16);
-  in[3]  = load_input_data(input + 3 * 16);
-  in[4]  = load_input_data(input + 4 * 16);
-  in[5]  = load_input_data(input + 5 * 16);
-  in[6]  = load_input_data(input + 6 * 16);
-  in[7]  = load_input_data(input + 7 * 16);
-
-  in[8]  = load_input_data(input + 8 * 16);
-  in[9]  = load_input_data(input + 9 * 16);
-  in[10]  = load_input_data(input + 10 * 16);
-  in[11]  = load_input_data(input + 11 * 16);
-  in[12]  = load_input_data(input + 12 * 16);
-  in[13]  = load_input_data(input + 13 * 16);
-  in[14]  = load_input_data(input + 14 * 16);
-  in[15]  = load_input_data(input + 15 * 16);
+  in[0] = load_input_data(input + 0 * 16);
+  in[1] = load_input_data(input + 1 * 16);
+  in[2] = load_input_data(input + 2 * 16);
+  in[3] = load_input_data(input + 3 * 16);
+  in[4] = load_input_data(input + 4 * 16);
+  in[5] = load_input_data(input + 5 * 16);
+  in[6] = load_input_data(input + 6 * 16);
+  in[7] = load_input_data(input + 7 * 16);
+
+  in[8] = load_input_data(input + 8 * 16);
+  in[9] = load_input_data(input + 9 * 16);
+  in[10] = load_input_data(input + 10 * 16);
+  in[11] = load_input_data(input + 11 * 16);
+  in[12] = load_input_data(input + 12 * 16);
+  in[13] = load_input_data(input + 13 * 16);
+  in[14] = load_input_data(input + 14 * 16);
+  in[15] = load_input_data(input + 15 * 16);
 }
 
-#define RECON_AND_STORE(dest, in_x) \
-  {                                                     \
-     __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
-      d0 = _mm_unpacklo_epi8(d0, zero); \
-      d0 = _mm_add_epi16(in_x, d0); \
-      d0 = _mm_packus_epi16(d0, d0); \
-      _mm_storel_epi64((__m128i *)(dest), d0); \
+#define RECON_AND_STORE(dest, in_x)                  \
+  {                                                  \
+    __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
+    d0 = _mm_unpacklo_epi8(d0, zero);                \
+    d0 = _mm_add_epi16(in_x, d0);                    \
+    d0 = _mm_packus_epi16(d0, d0);                   \
+    _mm_storel_epi64((__m128i *)(dest), d0);         \
   }
 
 static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
-  const __m128i final_rounding = _mm_set1_epi16(1<<5);
+  const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
   // Final rounding and shift
   in[0] = _mm_adds_epi16(in[0], final_rounding);
@@ -168,16 +168,16 @@ static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
   in[14] = _mm_srai_epi16(in[14], 6);
   in[15] = _mm_srai_epi16(in[15], 6);
 
-  RECON_AND_STORE(dest +  0 * stride, in[0]);
-  RECON_AND_STORE(dest +  1 * stride, in[1]);
-  RECON_AND_STORE(dest +  2 * stride, in[2]);
-  RECON_AND_STORE(dest +  3 * stride, in[3]);
-  RECON_AND_STORE(dest +  4 * stride, in[4]);
-  RECON_AND_STORE(dest +  5 * stride, in[5]);
-  RECON_AND_STORE(dest +  6 * stride, in[6]);
-  RECON_AND_STORE(dest +  7 * stride, in[7]);
-  RECON_AND_STORE(dest +  8 * stride, in[8]);
-  RECON_AND_STORE(dest +  9 * stride, in[9]);
+  RECON_AND_STORE(dest + 0 * stride, in[0]);
+  RECON_AND_STORE(dest + 1 * stride, in[1]);
+  RECON_AND_STORE(dest + 2 * stride, in[2]);
+  RECON_AND_STORE(dest + 3 * stride, in[3]);
+  RECON_AND_STORE(dest + 4 * stride, in[4]);
+  RECON_AND_STORE(dest + 5 * stride, in[5]);
+  RECON_AND_STORE(dest + 6 * stride, in[6]);
+  RECON_AND_STORE(dest + 7 * stride, in[7]);
+  RECON_AND_STORE(dest + 8 * stride, in[8]);
+  RECON_AND_STORE(dest + 9 * stride, in[9]);
   RECON_AND_STORE(dest + 10 * stride, in[10]);
   RECON_AND_STORE(dest + 11 * stride, in[11]);
   RECON_AND_STORE(dest + 12 * stride, in[12]);
diff --git a/vpx_dsp/x86/loopfilter_avx2.c b/vpx_dsp/x86/loopfilter_avx2.c
index 23a97dd05f78ca1007bcffafdfaf6c56571c76f5..91058388b60467d1b87fbf56c496f346c05033fb 100644
--- a/vpx_dsp/x86/loopfilter_avx2.c
+++ b/vpx_dsp/x86/loopfilter_avx2.c
@@ -8,979 +8,916 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include <immintrin.h>  /* AVX2 */
+#include <immintrin.h> /* AVX2 */
 
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_ports/mem.h"
 
 static void mb_lpf_horizontal_edge_w_avx2_8(unsigned char *s, int p,
-        const unsigned char *_blimit, const unsigned char *_limit,
-        const unsigned char *_thresh) {
-    __m128i mask, hev, flat, flat2;
-    const __m128i zero = _mm_set1_epi16(0);
-    const __m128i one = _mm_set1_epi8(1);
-    __m128i q7p7, q6p6, q5p5, q4p4, q3p3, q2p2, q1p1, q0p0, p0q0, p1q1;
-    __m128i abs_p1p0;
-
-    const __m128i thresh = _mm_broadcastb_epi8(
-            _mm_cvtsi32_si128((int) _thresh[0]));
-    const __m128i limit = _mm_broadcastb_epi8(
-            _mm_cvtsi32_si128((int) _limit[0]));
-    const __m128i blimit = _mm_broadcastb_epi8(
-            _mm_cvtsi32_si128((int) _blimit[0]));
-
-    q4p4 = _mm_loadl_epi64((__m128i *) (s - 5 * p));
-    q4p4 = _mm_castps_si128(
-            _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *) (s + 4 * p)));
-    q3p3 = _mm_loadl_epi64((__m128i *) (s - 4 * p));
-    q3p3 = _mm_castps_si128(
-            _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *) (s + 3 * p)));
-    q2p2 = _mm_loadl_epi64((__m128i *) (s - 3 * p));
-    q2p2 = _mm_castps_si128(
-            _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *) (s + 2 * p)));
-    q1p1 = _mm_loadl_epi64((__m128i *) (s - 2 * p));
-    q1p1 = _mm_castps_si128(
-            _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *) (s + 1 * p)));
-    p1q1 = _mm_shuffle_epi32(q1p1, 78);
-    q0p0 = _mm_loadl_epi64((__m128i *) (s - 1 * p));
-    q0p0 = _mm_castps_si128(
-            _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *) (s - 0 * p)));
-    p0q0 = _mm_shuffle_epi32(q0p0, 78);
+                                            const unsigned char *_blimit,
+                                            const unsigned char *_limit,
+                                            const unsigned char *_thresh) {
+  __m128i mask, hev, flat, flat2;
+  const __m128i zero = _mm_set1_epi16(0);
+  const __m128i one = _mm_set1_epi8(1);
+  __m128i q7p7, q6p6, q5p5, q4p4, q3p3, q2p2, q1p1, q0p0, p0q0, p1q1;
+  __m128i abs_p1p0;
+
+  const __m128i thresh =
+      _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_thresh[0]));
+  const __m128i limit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_limit[0]));
+  const __m128i blimit =
+      _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_blimit[0]));
+
+  q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p));
+  q4p4 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *)(s + 4 * p)));
+  q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
+  q3p3 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *)(s + 3 * p)));
+  q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
+  q2p2 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *)(s + 2 * p)));
+  q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
+  q1p1 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *)(s + 1 * p)));
+  p1q1 = _mm_shuffle_epi32(q1p1, 78);
+  q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
+  q0p0 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *)(s - 0 * p)));
+  p0q0 = _mm_shuffle_epi32(q0p0, 78);
+
+  {
+    __m128i abs_p1q1, abs_p0q0, abs_q1q0, fe, ff, work;
+    abs_p1p0 =
+        _mm_or_si128(_mm_subs_epu8(q1p1, q0p0), _mm_subs_epu8(q0p0, q1p1));
+    abs_q1q0 = _mm_srli_si128(abs_p1p0, 8);
+    fe = _mm_set1_epi8(0xfe);
+    ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
+    abs_p0q0 =
+        _mm_or_si128(_mm_subs_epu8(q0p0, p0q0), _mm_subs_epu8(p0q0, q0p0));
+    abs_p1q1 =
+        _mm_or_si128(_mm_subs_epu8(q1p1, p1q1), _mm_subs_epu8(p1q1, q1p1));
+    flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
+    hev = _mm_subs_epu8(flat, thresh);
+    hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
+
+    abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
+    abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
+    mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+    mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
+    // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
+    mask = _mm_max_epu8(abs_p1p0, mask);
+    // mask |= (abs(p1 - p0) > limit) * -1;
+    // mask |= (abs(q1 - q0) > limit) * -1;
+
+    work = _mm_max_epu8(
+        _mm_or_si128(_mm_subs_epu8(q2p2, q1p1), _mm_subs_epu8(q1p1, q2p2)),
+        _mm_or_si128(_mm_subs_epu8(q3p3, q2p2), _mm_subs_epu8(q2p2, q3p3)));
+    mask = _mm_max_epu8(work, mask);
+    mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8));
+    mask = _mm_subs_epu8(mask, limit);
+    mask = _mm_cmpeq_epi8(mask, zero);
+  }
+
+  // lp filter
+  {
+    const __m128i t4 = _mm_set1_epi8(4);
+    const __m128i t3 = _mm_set1_epi8(3);
+    const __m128i t80 = _mm_set1_epi8(0x80);
+    const __m128i t1 = _mm_set1_epi16(0x1);
+    __m128i qs1ps1 = _mm_xor_si128(q1p1, t80);
+    __m128i qs0ps0 = _mm_xor_si128(q0p0, t80);
+    __m128i qs0 = _mm_xor_si128(p0q0, t80);
+    __m128i qs1 = _mm_xor_si128(p1q1, t80);
+    __m128i filt;
+    __m128i work_a;
+    __m128i filter1, filter2;
+    __m128i flat2_q6p6, flat2_q5p5, flat2_q4p4, flat2_q3p3, flat2_q2p2;
+    __m128i flat2_q1p1, flat2_q0p0, flat_q2p2, flat_q1p1, flat_q0p0;
+
+    filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev);
+    work_a = _mm_subs_epi8(qs0, qs0ps0);
+    filt = _mm_adds_epi8(filt, work_a);
+    filt = _mm_adds_epi8(filt, work_a);
+    filt = _mm_adds_epi8(filt, work_a);
+    /* (vpx_filter + 3 * (qs0 - ps0)) & mask */
+    filt = _mm_and_si128(filt, mask);
+
+    filter1 = _mm_adds_epi8(filt, t4);
+    filter2 = _mm_adds_epi8(filt, t3);
+
+    filter1 = _mm_unpacklo_epi8(zero, filter1);
+    filter1 = _mm_srai_epi16(filter1, 0xB);
+    filter2 = _mm_unpacklo_epi8(zero, filter2);
+    filter2 = _mm_srai_epi16(filter2, 0xB);
+
+    /* Filter1 >> 3 */
+    filt = _mm_packs_epi16(filter2, _mm_subs_epi16(zero, filter1));
+    qs0ps0 = _mm_xor_si128(_mm_adds_epi8(qs0ps0, filt), t80);
+
+    /* filt >> 1 */
+    filt = _mm_adds_epi16(filter1, t1);
+    filt = _mm_srai_epi16(filt, 1);
+    filt = _mm_andnot_si128(_mm_srai_epi16(_mm_unpacklo_epi8(zero, hev), 0x8),
+                            filt);
+    filt = _mm_packs_epi16(filt, _mm_subs_epi16(zero, filt));
+    qs1ps1 = _mm_xor_si128(_mm_adds_epi8(qs1ps1, filt), t80);
+    // loopfilter done
 
     {
-        __m128i abs_p1q1, abs_p0q0, abs_q1q0, fe, ff, work;
-        abs_p1p0 = _mm_or_si128(_mm_subs_epu8(q1p1, q0p0),
-                _mm_subs_epu8(q0p0, q1p1));
-        abs_q1q0 = _mm_srli_si128(abs_p1p0, 8);
-        fe = _mm_set1_epi8(0xfe);
-        ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
-        abs_p0q0 = _mm_or_si128(_mm_subs_epu8(q0p0, p0q0),
-                _mm_subs_epu8(p0q0, q0p0));
-        abs_p1q1 = _mm_or_si128(_mm_subs_epu8(q1p1, p1q1),
-                _mm_subs_epu8(p1q1, q1p1));
-        flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
-        hev = _mm_subs_epu8(flat, thresh);
-        hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
-
-        abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
-        abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
-        mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
-        mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
-        // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
-        mask = _mm_max_epu8(abs_p1p0, mask);
-        // mask |= (abs(p1 - p0) > limit) * -1;
-        // mask |= (abs(q1 - q0) > limit) * -1;
-
-        work = _mm_max_epu8(
-                _mm_or_si128(_mm_subs_epu8(q2p2, q1p1),
-                        _mm_subs_epu8(q1p1, q2p2)),
-                _mm_or_si128(_mm_subs_epu8(q3p3, q2p2),
-                        _mm_subs_epu8(q2p2, q3p3)));
-        mask = _mm_max_epu8(work, mask);
-        mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8));
-        mask = _mm_subs_epu8(mask, limit);
-        mask = _mm_cmpeq_epi8(mask, zero);
+      __m128i work;
+      flat = _mm_max_epu8(
+          _mm_or_si128(_mm_subs_epu8(q2p2, q0p0), _mm_subs_epu8(q0p0, q2p2)),
+          _mm_or_si128(_mm_subs_epu8(q3p3, q0p0), _mm_subs_epu8(q0p0, q3p3)));
+      flat = _mm_max_epu8(abs_p1p0, flat);
+      flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8));
+      flat = _mm_subs_epu8(flat, one);
+      flat = _mm_cmpeq_epi8(flat, zero);
+      flat = _mm_and_si128(flat, mask);
+
+      q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * p));
+      q5p5 = _mm_castps_si128(
+          _mm_loadh_pi(_mm_castsi128_ps(q5p5), (__m64 *)(s + 5 * p)));
+
+      q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * p));
+      q6p6 = _mm_castps_si128(
+          _mm_loadh_pi(_mm_castsi128_ps(q6p6), (__m64 *)(s + 6 * p)));
+
+      flat2 = _mm_max_epu8(
+          _mm_or_si128(_mm_subs_epu8(q4p4, q0p0), _mm_subs_epu8(q0p0, q4p4)),
+          _mm_or_si128(_mm_subs_epu8(q5p5, q0p0), _mm_subs_epu8(q0p0, q5p5)));
+
+      q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * p));
+      q7p7 = _mm_castps_si128(
+          _mm_loadh_pi(_mm_castsi128_ps(q7p7), (__m64 *)(s + 7 * p)));
+
+      work = _mm_max_epu8(
+          _mm_or_si128(_mm_subs_epu8(q6p6, q0p0), _mm_subs_epu8(q0p0, q6p6)),
+          _mm_or_si128(_mm_subs_epu8(q7p7, q0p0), _mm_subs_epu8(q0p0, q7p7)));
+
+      flat2 = _mm_max_epu8(work, flat2);
+      flat2 = _mm_max_epu8(flat2, _mm_srli_si128(flat2, 8));
+      flat2 = _mm_subs_epu8(flat2, one);
+      flat2 = _mm_cmpeq_epi8(flat2, zero);
+      flat2 = _mm_and_si128(flat2, flat);  // flat2 & flat & mask
     }
 
-    // lp filter
+    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    // flat and wide flat calculations
     {
-        const __m128i t4 = _mm_set1_epi8(4);
-        const __m128i t3 = _mm_set1_epi8(3);
-        const __m128i t80 = _mm_set1_epi8(0x80);
-        const __m128i t1 = _mm_set1_epi16(0x1);
-        __m128i qs1ps1 = _mm_xor_si128(q1p1, t80);
-        __m128i qs0ps0 = _mm_xor_si128(q0p0, t80);
-        __m128i qs0 = _mm_xor_si128(p0q0, t80);
-        __m128i qs1 = _mm_xor_si128(p1q1, t80);
-        __m128i filt;
-        __m128i work_a;
-        __m128i filter1, filter2;
-        __m128i flat2_q6p6, flat2_q5p5, flat2_q4p4, flat2_q3p3, flat2_q2p2;
-        __m128i flat2_q1p1, flat2_q0p0, flat_q2p2, flat_q1p1, flat_q0p0;
-
-        filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev);
-        work_a = _mm_subs_epi8(qs0, qs0ps0);
-        filt = _mm_adds_epi8(filt, work_a);
-        filt = _mm_adds_epi8(filt, work_a);
-        filt = _mm_adds_epi8(filt, work_a);
-        /* (vpx_filter + 3 * (qs0 - ps0)) & mask */
-        filt = _mm_and_si128(filt, mask);
-
-        filter1 = _mm_adds_epi8(filt, t4);
-        filter2 = _mm_adds_epi8(filt, t3);
-
-        filter1 = _mm_unpacklo_epi8(zero, filter1);
-        filter1 = _mm_srai_epi16(filter1, 0xB);
-        filter2 = _mm_unpacklo_epi8(zero, filter2);
-        filter2 = _mm_srai_epi16(filter2, 0xB);
-
-        /* Filter1 >> 3 */
-        filt = _mm_packs_epi16(filter2, _mm_subs_epi16(zero, filter1));
-        qs0ps0 = _mm_xor_si128(_mm_adds_epi8(qs0ps0, filt), t80);
-
-        /* filt >> 1 */
-        filt = _mm_adds_epi16(filter1, t1);
-        filt = _mm_srai_epi16(filt, 1);
-        filt = _mm_andnot_si128(
-                _mm_srai_epi16(_mm_unpacklo_epi8(zero, hev), 0x8), filt);
-        filt = _mm_packs_epi16(filt, _mm_subs_epi16(zero, filt));
-        qs1ps1 = _mm_xor_si128(_mm_adds_epi8(qs1ps1, filt), t80);
-        // loopfilter done
-
-        {
-            __m128i work;
-            flat = _mm_max_epu8(
-                    _mm_or_si128(_mm_subs_epu8(q2p2, q0p0),
-                            _mm_subs_epu8(q0p0, q2p2)),
-                    _mm_or_si128(_mm_subs_epu8(q3p3, q0p0),
-                            _mm_subs_epu8(q0p0, q3p3)));
-            flat = _mm_max_epu8(abs_p1p0, flat);
-            flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8));
-            flat = _mm_subs_epu8(flat, one);
-            flat = _mm_cmpeq_epi8(flat, zero);
-            flat = _mm_and_si128(flat, mask);
-
-            q5p5 = _mm_loadl_epi64((__m128i *) (s - 6 * p));
-            q5p5 = _mm_castps_si128(
-                    _mm_loadh_pi(_mm_castsi128_ps(q5p5),
-                            (__m64 *) (s + 5 * p)));
-
-            q6p6 = _mm_loadl_epi64((__m128i *) (s - 7 * p));
-            q6p6 = _mm_castps_si128(
-                    _mm_loadh_pi(_mm_castsi128_ps(q6p6),
-                            (__m64 *) (s + 6 * p)));
-
-            flat2 = _mm_max_epu8(
-                    _mm_or_si128(_mm_subs_epu8(q4p4, q0p0),
-                            _mm_subs_epu8(q0p0, q4p4)),
-                    _mm_or_si128(_mm_subs_epu8(q5p5, q0p0),
-                            _mm_subs_epu8(q0p0, q5p5)));
-
-            q7p7 = _mm_loadl_epi64((__m128i *) (s - 8 * p));
-            q7p7 = _mm_castps_si128(
-                    _mm_loadh_pi(_mm_castsi128_ps(q7p7),
-                            (__m64 *) (s + 7 * p)));
-
-            work = _mm_max_epu8(
-                    _mm_or_si128(_mm_subs_epu8(q6p6, q0p0),
-                            _mm_subs_epu8(q0p0, q6p6)),
-                    _mm_or_si128(_mm_subs_epu8(q7p7, q0p0),
-                            _mm_subs_epu8(q0p0, q7p7)));
-
-            flat2 = _mm_max_epu8(work, flat2);
-            flat2 = _mm_max_epu8(flat2, _mm_srli_si128(flat2, 8));
-            flat2 = _mm_subs_epu8(flat2, one);
-            flat2 = _mm_cmpeq_epi8(flat2, zero);
-            flat2 = _mm_and_si128(flat2, flat);  // flat2 & flat & mask
-        }
-
-        // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-        // flat and wide flat calculations
-        {
-            const __m128i eight = _mm_set1_epi16(8);
-            const __m128i four = _mm_set1_epi16(4);
-            __m128i p7_16, p6_16, p5_16, p4_16, p3_16, p2_16, p1_16, p0_16;
-            __m128i q7_16, q6_16, q5_16, q4_16, q3_16, q2_16, q1_16, q0_16;
-            __m128i pixelFilter_p, pixelFilter_q;
-            __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0;
-            __m128i sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q;
-
-            p7_16 = _mm_unpacklo_epi8(q7p7, zero);
-            p6_16 = _mm_unpacklo_epi8(q6p6, zero);
-            p5_16 = _mm_unpacklo_epi8(q5p5, zero);
-            p4_16 = _mm_unpacklo_epi8(q4p4, zero);
-            p3_16 = _mm_unpacklo_epi8(q3p3, zero);
-            p2_16 = _mm_unpacklo_epi8(q2p2, zero);
-            p1_16 = _mm_unpacklo_epi8(q1p1, zero);
-            p0_16 = _mm_unpacklo_epi8(q0p0, zero);
-            q0_16 = _mm_unpackhi_epi8(q0p0, zero);
-            q1_16 = _mm_unpackhi_epi8(q1p1, zero);
-            q2_16 = _mm_unpackhi_epi8(q2p2, zero);
-            q3_16 = _mm_unpackhi_epi8(q3p3, zero);
-            q4_16 = _mm_unpackhi_epi8(q4p4, zero);
-            q5_16 = _mm_unpackhi_epi8(q5p5, zero);
-            q6_16 = _mm_unpackhi_epi8(q6p6, zero);
-            q7_16 = _mm_unpackhi_epi8(q7p7, zero);
-
-            pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6_16, p5_16),
-                    _mm_add_epi16(p4_16, p3_16));
-            pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6_16, q5_16),
-                    _mm_add_epi16(q4_16, q3_16));
-
-            pixetFilter_p2p1p0 = _mm_add_epi16(p0_16,
-                    _mm_add_epi16(p2_16, p1_16));
-            pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
-
-            pixetFilter_q2q1q0 = _mm_add_epi16(q0_16,
-                    _mm_add_epi16(q2_16, q1_16));
-            pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
-            pixelFilter_p = _mm_add_epi16(eight,
-                    _mm_add_epi16(pixelFilter_p, pixelFilter_q));
-            pixetFilter_p2p1p0 = _mm_add_epi16(four,
-                    _mm_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0));
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_p, _mm_add_epi16(p7_16, p0_16)),
-                    4);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_p, _mm_add_epi16(q7_16, q0_16)),
-                    4);
-            flat2_q0p0 = _mm_packus_epi16(res_p, res_q);
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixetFilter_p2p1p0,
-                            _mm_add_epi16(p3_16, p0_16)), 3);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixetFilter_p2p1p0,
-                            _mm_add_epi16(q3_16, q0_16)), 3);
-
-            flat_q0p0 = _mm_packus_epi16(res_p, res_q);
-
-            sum_p7 = _mm_add_epi16(p7_16, p7_16);
-            sum_q7 = _mm_add_epi16(q7_16, q7_16);
-            sum_p3 = _mm_add_epi16(p3_16, p3_16);
-            sum_q3 = _mm_add_epi16(q3_16, q3_16);
-
-            pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6_16);
-            pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6_16);
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p1_16)),
-                    4);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q1_16)),
-                    4);
-            flat2_q1p1 = _mm_packus_epi16(res_p, res_q);
-
-            pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2_16);
-            pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2_16);
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixetFilter_p2p1p0,
-                            _mm_add_epi16(sum_p3, p1_16)), 3);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixetFilter_q2q1q0,
-                            _mm_add_epi16(sum_q3, q1_16)), 3);
-            flat_q1p1 = _mm_packus_epi16(res_p, res_q);
-
-            sum_p7 = _mm_add_epi16(sum_p7, p7_16);
-            sum_q7 = _mm_add_epi16(sum_q7, q7_16);
-            sum_p3 = _mm_add_epi16(sum_p3, p3_16);
-            sum_q3 = _mm_add_epi16(sum_q3, q3_16);
-
-            pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5_16);
-            pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5_16);
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p2_16)),
-                    4);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q2_16)),
-                    4);
-            flat2_q2p2 = _mm_packus_epi16(res_p, res_q);
-
-            pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1_16);
-            pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1_16);
-
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixetFilter_p2p1p0,
-                            _mm_add_epi16(sum_p3, p2_16)), 3);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixetFilter_q2q1q0,
-                            _mm_add_epi16(sum_q3, q2_16)), 3);
-            flat_q2p2 = _mm_packus_epi16(res_p, res_q);
-
-            sum_p7 = _mm_add_epi16(sum_p7, p7_16);
-            sum_q7 = _mm_add_epi16(sum_q7, q7_16);
-            pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4_16);
-            pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4_16);
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p3_16)),
-                    4);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q3_16)),
-                    4);
-            flat2_q3p3 = _mm_packus_epi16(res_p, res_q);
-
-            sum_p7 = _mm_add_epi16(sum_p7, p7_16);
-            sum_q7 = _mm_add_epi16(sum_q7, q7_16);
-            pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3_16);
-            pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3_16);
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p4_16)),
-                    4);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q4_16)),
-                    4);
-            flat2_q4p4 = _mm_packus_epi16(res_p, res_q);
-
-            sum_p7 = _mm_add_epi16(sum_p7, p7_16);
-            sum_q7 = _mm_add_epi16(sum_q7, q7_16);
-            pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2_16);
-            pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2_16);
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p5_16)),
-                    4);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q5_16)),
-                    4);
-            flat2_q5p5 = _mm_packus_epi16(res_p, res_q);
-
-            sum_p7 = _mm_add_epi16(sum_p7, p7_16);
-            sum_q7 = _mm_add_epi16(sum_q7, q7_16);
-            pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1_16);
-            pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1_16);
-            res_p = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p6_16)),
-                    4);
-            res_q = _mm_srli_epi16(
-                    _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q6_16)),
-                    4);
-            flat2_q6p6 = _mm_packus_epi16(res_p, res_q);
-        }
-        // wide flat
-        // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-        flat = _mm_shuffle_epi32(flat, 68);
-        flat2 = _mm_shuffle_epi32(flat2, 68);
-
-        q2p2 = _mm_andnot_si128(flat, q2p2);
-        flat_q2p2 = _mm_and_si128(flat, flat_q2p2);
-        q2p2 = _mm_or_si128(q2p2, flat_q2p2);
-
-        qs1ps1 = _mm_andnot_si128(flat, qs1ps1);
-        flat_q1p1 = _mm_and_si128(flat, flat_q1p1);
-        q1p1 = _mm_or_si128(qs1ps1, flat_q1p1);
-
-        qs0ps0 = _mm_andnot_si128(flat, qs0ps0);
-        flat_q0p0 = _mm_and_si128(flat, flat_q0p0);
-        q0p0 = _mm_or_si128(qs0ps0, flat_q0p0);
-
-        q6p6 = _mm_andnot_si128(flat2, q6p6);
-        flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6);
-        q6p6 = _mm_or_si128(q6p6, flat2_q6p6);
-        _mm_storel_epi64((__m128i *) (s - 7 * p), q6p6);
-        _mm_storeh_pi((__m64 *) (s + 6 * p), _mm_castsi128_ps(q6p6));
-
-        q5p5 = _mm_andnot_si128(flat2, q5p5);
-        flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5);
-        q5p5 = _mm_or_si128(q5p5, flat2_q5p5);
-        _mm_storel_epi64((__m128i *) (s - 6 * p), q5p5);
-        _mm_storeh_pi((__m64 *) (s + 5 * p), _mm_castsi128_ps(q5p5));
-
-        q4p4 = _mm_andnot_si128(flat2, q4p4);
-        flat2_q4p4 = _mm_and_si128(flat2, flat2_q4p4);
-        q4p4 = _mm_or_si128(q4p4, flat2_q4p4);
-        _mm_storel_epi64((__m128i *) (s - 5 * p), q4p4);
-        _mm_storeh_pi((__m64 *) (s + 4 * p), _mm_castsi128_ps(q4p4));
-
-        q3p3 = _mm_andnot_si128(flat2, q3p3);
-        flat2_q3p3 = _mm_and_si128(flat2, flat2_q3p3);
-        q3p3 = _mm_or_si128(q3p3, flat2_q3p3);
-        _mm_storel_epi64((__m128i *) (s - 4 * p), q3p3);
-        _mm_storeh_pi((__m64 *) (s + 3 * p), _mm_castsi128_ps(q3p3));
-
-        q2p2 = _mm_andnot_si128(flat2, q2p2);
-        flat2_q2p2 = _mm_and_si128(flat2, flat2_q2p2);
-        q2p2 = _mm_or_si128(q2p2, flat2_q2p2);
-        _mm_storel_epi64((__m128i *) (s - 3 * p), q2p2);
-        _mm_storeh_pi((__m64 *) (s + 2 * p), _mm_castsi128_ps(q2p2));
-
-        q1p1 = _mm_andnot_si128(flat2, q1p1);
-        flat2_q1p1 = _mm_and_si128(flat2, flat2_q1p1);
-        q1p1 = _mm_or_si128(q1p1, flat2_q1p1);
-        _mm_storel_epi64((__m128i *) (s - 2 * p), q1p1);
-        _mm_storeh_pi((__m64 *) (s + 1 * p), _mm_castsi128_ps(q1p1));
-
-        q0p0 = _mm_andnot_si128(flat2, q0p0);
-        flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0);
-        q0p0 = _mm_or_si128(q0p0, flat2_q0p0);
-        _mm_storel_epi64((__m128i *) (s - 1 * p), q0p0);
-        _mm_storeh_pi((__m64 *) (s - 0 * p), _mm_castsi128_ps(q0p0));
+      const __m128i eight = _mm_set1_epi16(8);
+      const __m128i four = _mm_set1_epi16(4);
+      __m128i p7_16, p6_16, p5_16, p4_16, p3_16, p2_16, p1_16, p0_16;
+      __m128i q7_16, q6_16, q5_16, q4_16, q3_16, q2_16, q1_16, q0_16;
+      __m128i pixelFilter_p, pixelFilter_q;
+      __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0;
+      __m128i sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q;
+
+      p7_16 = _mm_unpacklo_epi8(q7p7, zero);
+      p6_16 = _mm_unpacklo_epi8(q6p6, zero);
+      p5_16 = _mm_unpacklo_epi8(q5p5, zero);
+      p4_16 = _mm_unpacklo_epi8(q4p4, zero);
+      p3_16 = _mm_unpacklo_epi8(q3p3, zero);
+      p2_16 = _mm_unpacklo_epi8(q2p2, zero);
+      p1_16 = _mm_unpacklo_epi8(q1p1, zero);
+      p0_16 = _mm_unpacklo_epi8(q0p0, zero);
+      q0_16 = _mm_unpackhi_epi8(q0p0, zero);
+      q1_16 = _mm_unpackhi_epi8(q1p1, zero);
+      q2_16 = _mm_unpackhi_epi8(q2p2, zero);
+      q3_16 = _mm_unpackhi_epi8(q3p3, zero);
+      q4_16 = _mm_unpackhi_epi8(q4p4, zero);
+      q5_16 = _mm_unpackhi_epi8(q5p5, zero);
+      q6_16 = _mm_unpackhi_epi8(q6p6, zero);
+      q7_16 = _mm_unpackhi_epi8(q7p7, zero);
+
+      pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6_16, p5_16),
+                                    _mm_add_epi16(p4_16, p3_16));
+      pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6_16, q5_16),
+                                    _mm_add_epi16(q4_16, q3_16));
+
+      pixetFilter_p2p1p0 = _mm_add_epi16(p0_16, _mm_add_epi16(p2_16, p1_16));
+      pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
+
+      pixetFilter_q2q1q0 = _mm_add_epi16(q0_16, _mm_add_epi16(q2_16, q1_16));
+      pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
+      pixelFilter_p =
+          _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, pixelFilter_q));
+      pixetFilter_p2p1p0 = _mm_add_epi16(
+          four, _mm_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0));
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(p7_16, p0_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(q7_16, q0_16)), 4);
+      flat2_q0p0 = _mm_packus_epi16(res_p, res_q);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(p3_16, p0_16)), 3);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(q3_16, q0_16)), 3);
+
+      flat_q0p0 = _mm_packus_epi16(res_p, res_q);
+
+      sum_p7 = _mm_add_epi16(p7_16, p7_16);
+      sum_q7 = _mm_add_epi16(q7_16, q7_16);
+      sum_p3 = _mm_add_epi16(p3_16, p3_16);
+      sum_q3 = _mm_add_epi16(q3_16, q3_16);
+
+      pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6_16);
+      pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6_16);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p1_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q1_16)), 4);
+      flat2_q1p1 = _mm_packus_epi16(res_p, res_q);
+
+      pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2_16);
+      pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2_16);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p1_16)), 3);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q1_16)), 3);
+      flat_q1p1 = _mm_packus_epi16(res_p, res_q);
+
+      sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+      sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+      sum_p3 = _mm_add_epi16(sum_p3, p3_16);
+      sum_q3 = _mm_add_epi16(sum_q3, q3_16);
+
+      pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5_16);
+      pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5_16);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p2_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q2_16)), 4);
+      flat2_q2p2 = _mm_packus_epi16(res_p, res_q);
+
+      pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1_16);
+      pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1_16);
+
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p2_16)), 3);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q2_16)), 3);
+      flat_q2p2 = _mm_packus_epi16(res_p, res_q);
+
+      sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+      sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+      pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4_16);
+      pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4_16);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p3_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q3_16)), 4);
+      flat2_q3p3 = _mm_packus_epi16(res_p, res_q);
+
+      sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+      sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+      pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3_16);
+      pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3_16);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p4_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q4_16)), 4);
+      flat2_q4p4 = _mm_packus_epi16(res_p, res_q);
+
+      sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+      sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+      pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2_16);
+      pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2_16);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p5_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q5_16)), 4);
+      flat2_q5p5 = _mm_packus_epi16(res_p, res_q);
+
+      sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+      sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+      pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1_16);
+      pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1_16);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p6_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q6_16)), 4);
+      flat2_q6p6 = _mm_packus_epi16(res_p, res_q);
     }
+    // wide flat
+    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    flat = _mm_shuffle_epi32(flat, 68);
+    flat2 = _mm_shuffle_epi32(flat2, 68);
+
+    q2p2 = _mm_andnot_si128(flat, q2p2);
+    flat_q2p2 = _mm_and_si128(flat, flat_q2p2);
+    q2p2 = _mm_or_si128(q2p2, flat_q2p2);
+
+    qs1ps1 = _mm_andnot_si128(flat, qs1ps1);
+    flat_q1p1 = _mm_and_si128(flat, flat_q1p1);
+    q1p1 = _mm_or_si128(qs1ps1, flat_q1p1);
+
+    qs0ps0 = _mm_andnot_si128(flat, qs0ps0);
+    flat_q0p0 = _mm_and_si128(flat, flat_q0p0);
+    q0p0 = _mm_or_si128(qs0ps0, flat_q0p0);
+
+    q6p6 = _mm_andnot_si128(flat2, q6p6);
+    flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6);
+    q6p6 = _mm_or_si128(q6p6, flat2_q6p6);
+    _mm_storel_epi64((__m128i *)(s - 7 * p), q6p6);
+    _mm_storeh_pi((__m64 *)(s + 6 * p), _mm_castsi128_ps(q6p6));
+
+    q5p5 = _mm_andnot_si128(flat2, q5p5);
+    flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5);
+    q5p5 = _mm_or_si128(q5p5, flat2_q5p5);
+    _mm_storel_epi64((__m128i *)(s - 6 * p), q5p5);
+    _mm_storeh_pi((__m64 *)(s + 5 * p), _mm_castsi128_ps(q5p5));
+
+    q4p4 = _mm_andnot_si128(flat2, q4p4);
+    flat2_q4p4 = _mm_and_si128(flat2, flat2_q4p4);
+    q4p4 = _mm_or_si128(q4p4, flat2_q4p4);
+    _mm_storel_epi64((__m128i *)(s - 5 * p), q4p4);
+    _mm_storeh_pi((__m64 *)(s + 4 * p), _mm_castsi128_ps(q4p4));
+
+    q3p3 = _mm_andnot_si128(flat2, q3p3);
+    flat2_q3p3 = _mm_and_si128(flat2, flat2_q3p3);
+    q3p3 = _mm_or_si128(q3p3, flat2_q3p3);
+    _mm_storel_epi64((__m128i *)(s - 4 * p), q3p3);
+    _mm_storeh_pi((__m64 *)(s + 3 * p), _mm_castsi128_ps(q3p3));
+
+    q2p2 = _mm_andnot_si128(flat2, q2p2);
+    flat2_q2p2 = _mm_and_si128(flat2, flat2_q2p2);
+    q2p2 = _mm_or_si128(q2p2, flat2_q2p2);
+    _mm_storel_epi64((__m128i *)(s - 3 * p), q2p2);
+    _mm_storeh_pi((__m64 *)(s + 2 * p), _mm_castsi128_ps(q2p2));
+
+    q1p1 = _mm_andnot_si128(flat2, q1p1);
+    flat2_q1p1 = _mm_and_si128(flat2, flat2_q1p1);
+    q1p1 = _mm_or_si128(q1p1, flat2_q1p1);
+    _mm_storel_epi64((__m128i *)(s - 2 * p), q1p1);
+    _mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(q1p1));
+
+    q0p0 = _mm_andnot_si128(flat2, q0p0);
+    flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0);
+    q0p0 = _mm_or_si128(q0p0, flat2_q0p0);
+    _mm_storel_epi64((__m128i *)(s - 1 * p), q0p0);
+    _mm_storeh_pi((__m64 *)(s - 0 * p), _mm_castsi128_ps(q0p0));
+  }
 }
 
 DECLARE_ALIGNED(32, static const uint8_t, filt_loopfilter_avx2[32]) = {
-  0, 128, 1, 128, 2, 128, 3, 128, 4, 128, 5, 128, 6, 128, 7, 128,
+  0, 128, 1, 128, 2,  128, 3,  128, 4,  128, 5,  128, 6,  128, 7,  128,
   8, 128, 9, 128, 10, 128, 11, 128, 12, 128, 13, 128, 14, 128, 15, 128
 };
 
 static void mb_lpf_horizontal_edge_w_avx2_16(unsigned char *s, int p,
-        const unsigned char *_blimit, const unsigned char *_limit,
-        const unsigned char *_thresh) {
-    __m128i mask, hev, flat, flat2;
-    const __m128i zero = _mm_set1_epi16(0);
-    const __m128i one = _mm_set1_epi8(1);
-    __m128i p7, p6, p5;
-    __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4;
-    __m128i q5, q6, q7;
-    __m256i p256_7, q256_7, p256_6, q256_6, p256_5, q256_5, p256_4,
-            q256_4, p256_3, q256_3, p256_2, q256_2, p256_1, q256_1,
-            p256_0, q256_0;
-
-    const __m128i thresh = _mm_broadcastb_epi8(
-            _mm_cvtsi32_si128((int) _thresh[0]));
-    const __m128i limit = _mm_broadcastb_epi8(
-            _mm_cvtsi32_si128((int) _limit[0]));
-    const __m128i blimit = _mm_broadcastb_epi8(
-            _mm_cvtsi32_si128((int) _blimit[0]));
-
-    p256_4 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s - 5 * p)));
-    p256_3 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s - 4 * p)));
-    p256_2 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s - 3 * p)));
-    p256_1 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s - 2 * p)));
-    p256_0 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s - 1 * p)));
-    q256_0 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s - 0 * p)));
-    q256_1 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s + 1 * p)));
-    q256_2 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s + 2 * p)));
-    q256_3 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s + 3 * p)));
-    q256_4 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                (__m128d const *)(s + 4 * p)));
-
-    p4 = _mm256_castsi256_si128(p256_4);
-    p3 = _mm256_castsi256_si128(p256_3);
-    p2 = _mm256_castsi256_si128(p256_2);
-    p1 = _mm256_castsi256_si128(p256_1);
-    p0 = _mm256_castsi256_si128(p256_0);
-    q0 = _mm256_castsi256_si128(q256_0);
-    q1 = _mm256_castsi256_si128(q256_1);
-    q2 = _mm256_castsi256_si128(q256_2);
-    q3 = _mm256_castsi256_si128(q256_3);
-    q4 = _mm256_castsi256_si128(q256_4);
+                                             const unsigned char *_blimit,
+                                             const unsigned char *_limit,
+                                             const unsigned char *_thresh) {
+  __m128i mask, hev, flat, flat2;
+  const __m128i zero = _mm_set1_epi16(0);
+  const __m128i one = _mm_set1_epi8(1);
+  __m128i p7, p6, p5;
+  __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4;
+  __m128i q5, q6, q7;
+  __m256i p256_7, q256_7, p256_6, q256_6, p256_5, q256_5, p256_4, q256_4,
+      p256_3, q256_3, p256_2, q256_2, p256_1, q256_1, p256_0, q256_0;
+
+  const __m128i thresh =
+      _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_thresh[0]));
+  const __m128i limit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_limit[0]));
+  const __m128i blimit =
+      _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_blimit[0]));
+
+  p256_4 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 5 * p)));
+  p256_3 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 4 * p)));
+  p256_2 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 3 * p)));
+  p256_1 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 2 * p)));
+  p256_0 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 1 * p)));
+  q256_0 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 0 * p)));
+  q256_1 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 1 * p)));
+  q256_2 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 2 * p)));
+  q256_3 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 3 * p)));
+  q256_4 =
+      _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 4 * p)));
+
+  p4 = _mm256_castsi256_si128(p256_4);
+  p3 = _mm256_castsi256_si128(p256_3);
+  p2 = _mm256_castsi256_si128(p256_2);
+  p1 = _mm256_castsi256_si128(p256_1);
+  p0 = _mm256_castsi256_si128(p256_0);
+  q0 = _mm256_castsi256_si128(q256_0);
+  q1 = _mm256_castsi256_si128(q256_1);
+  q2 = _mm256_castsi256_si128(q256_2);
+  q3 = _mm256_castsi256_si128(q256_3);
+  q4 = _mm256_castsi256_si128(q256_4);
+
+  {
+    const __m128i abs_p1p0 =
+        _mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1));
+    const __m128i abs_q1q0 =
+        _mm_or_si128(_mm_subs_epu8(q1, q0), _mm_subs_epu8(q0, q1));
+    const __m128i fe = _mm_set1_epi8(0xfe);
+    const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
+    __m128i abs_p0q0 =
+        _mm_or_si128(_mm_subs_epu8(p0, q0), _mm_subs_epu8(q0, p0));
+    __m128i abs_p1q1 =
+        _mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1));
+    __m128i work;
+    flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
+    hev = _mm_subs_epu8(flat, thresh);
+    hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
+
+    abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
+    abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
+    mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+    mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
+    // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
+    mask = _mm_max_epu8(flat, mask);
+    // mask |= (abs(p1 - p0) > limit) * -1;
+    // mask |= (abs(q1 - q0) > limit) * -1;
+    work = _mm_max_epu8(
+        _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)),
+        _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3)));
+    mask = _mm_max_epu8(work, mask);
+    work = _mm_max_epu8(
+        _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)),
+        _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3)));
+    mask = _mm_max_epu8(work, mask);
+    mask = _mm_subs_epu8(mask, limit);
+    mask = _mm_cmpeq_epi8(mask, zero);
+  }
+
+  // lp filter
+  {
+    const __m128i t4 = _mm_set1_epi8(4);
+    const __m128i t3 = _mm_set1_epi8(3);
+    const __m128i t80 = _mm_set1_epi8(0x80);
+    const __m128i te0 = _mm_set1_epi8(0xe0);
+    const __m128i t1f = _mm_set1_epi8(0x1f);
+    const __m128i t1 = _mm_set1_epi8(0x1);
+    const __m128i t7f = _mm_set1_epi8(0x7f);
+
+    __m128i ps1 = _mm_xor_si128(p1, t80);
+    __m128i ps0 = _mm_xor_si128(p0, t80);
+    __m128i qs0 = _mm_xor_si128(q0, t80);
+    __m128i qs1 = _mm_xor_si128(q1, t80);
+    __m128i filt;
+    __m128i work_a;
+    __m128i filter1, filter2;
+    __m128i flat2_p6, flat2_p5, flat2_p4, flat2_p3, flat2_p2, flat2_p1,
+        flat2_p0, flat2_q0, flat2_q1, flat2_q2, flat2_q3, flat2_q4, flat2_q5,
+        flat2_q6, flat_p2, flat_p1, flat_p0, flat_q0, flat_q1, flat_q2;
+
+    filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev);
+    work_a = _mm_subs_epi8(qs0, ps0);
+    filt = _mm_adds_epi8(filt, work_a);
+    filt = _mm_adds_epi8(filt, work_a);
+    filt = _mm_adds_epi8(filt, work_a);
+    /* (vpx_filter + 3 * (qs0 - ps0)) & mask */
+    filt = _mm_and_si128(filt, mask);
+
+    filter1 = _mm_adds_epi8(filt, t4);
+    filter2 = _mm_adds_epi8(filt, t3);
+
+    /* Filter1 >> 3 */
+    work_a = _mm_cmpgt_epi8(zero, filter1);
+    filter1 = _mm_srli_epi16(filter1, 3);
+    work_a = _mm_and_si128(work_a, te0);
+    filter1 = _mm_and_si128(filter1, t1f);
+    filter1 = _mm_or_si128(filter1, work_a);
+    qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80);
+
+    /* Filter2 >> 3 */
+    work_a = _mm_cmpgt_epi8(zero, filter2);
+    filter2 = _mm_srli_epi16(filter2, 3);
+    work_a = _mm_and_si128(work_a, te0);
+    filter2 = _mm_and_si128(filter2, t1f);
+    filter2 = _mm_or_si128(filter2, work_a);
+    ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80);
+
+    /* filt >> 1 */
+    filt = _mm_adds_epi8(filter1, t1);
+    work_a = _mm_cmpgt_epi8(zero, filt);
+    filt = _mm_srli_epi16(filt, 1);
+    work_a = _mm_and_si128(work_a, t80);
+    filt = _mm_and_si128(filt, t7f);
+    filt = _mm_or_si128(filt, work_a);
+    filt = _mm_andnot_si128(hev, filt);
+    ps1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80);
+    qs1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80);
+    // loopfilter done
 
     {
-        const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0),
-                _mm_subs_epu8(p0, p1));
-        const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
-                _mm_subs_epu8(q0, q1));
-        const __m128i fe = _mm_set1_epi8(0xfe);
-        const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
-        __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
-                _mm_subs_epu8(q0, p0));
-        __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1),
-                _mm_subs_epu8(q1, p1));
-        __m128i work;
-        flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
-        hev = _mm_subs_epu8(flat, thresh);
-        hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
-
-        abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
-        abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
-        mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
-        mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
-        // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
-        mask = _mm_max_epu8(flat, mask);
-        // mask |= (abs(p1 - p0) > limit) * -1;
-        // mask |= (abs(q1 - q0) > limit) * -1;
-        work = _mm_max_epu8(
-                _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)),
-                _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3)));
-        mask = _mm_max_epu8(work, mask);
-        work = _mm_max_epu8(
-                _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)),
-                _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3)));
-        mask = _mm_max_epu8(work, mask);
-        mask = _mm_subs_epu8(mask, limit);
-        mask = _mm_cmpeq_epi8(mask, zero);
+      __m128i work;
+      work = _mm_max_epu8(
+          _mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)),
+          _mm_or_si128(_mm_subs_epu8(q2, q0), _mm_subs_epu8(q0, q2)));
+      flat = _mm_max_epu8(work, flat);
+      work = _mm_max_epu8(
+          _mm_or_si128(_mm_subs_epu8(p3, p0), _mm_subs_epu8(p0, p3)),
+          _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3)));
+      flat = _mm_max_epu8(work, flat);
+      work = _mm_max_epu8(
+          _mm_or_si128(_mm_subs_epu8(p4, p0), _mm_subs_epu8(p0, p4)),
+          _mm_or_si128(_mm_subs_epu8(q4, q0), _mm_subs_epu8(q0, q4)));
+      flat = _mm_subs_epu8(flat, one);
+      flat = _mm_cmpeq_epi8(flat, zero);
+      flat = _mm_and_si128(flat, mask);
+
+      p256_5 = _mm256_castpd_si256(
+          _mm256_broadcast_pd((__m128d const *)(s - 6 * p)));
+      q256_5 = _mm256_castpd_si256(
+          _mm256_broadcast_pd((__m128d const *)(s + 5 * p)));
+      p5 = _mm256_castsi256_si128(p256_5);
+      q5 = _mm256_castsi256_si128(q256_5);
+      flat2 = _mm_max_epu8(
+          _mm_or_si128(_mm_subs_epu8(p5, p0), _mm_subs_epu8(p0, p5)),
+          _mm_or_si128(_mm_subs_epu8(q5, q0), _mm_subs_epu8(q0, q5)));
+
+      flat2 = _mm_max_epu8(work, flat2);
+      p256_6 = _mm256_castpd_si256(
+          _mm256_broadcast_pd((__m128d const *)(s - 7 * p)));
+      q256_6 = _mm256_castpd_si256(
+          _mm256_broadcast_pd((__m128d const *)(s + 6 * p)));
+      p6 = _mm256_castsi256_si128(p256_6);
+      q6 = _mm256_castsi256_si128(q256_6);
+      work = _mm_max_epu8(
+          _mm_or_si128(_mm_subs_epu8(p6, p0), _mm_subs_epu8(p0, p6)),
+          _mm_or_si128(_mm_subs_epu8(q6, q0), _mm_subs_epu8(q0, q6)));
+
+      flat2 = _mm_max_epu8(work, flat2);
+
+      p256_7 = _mm256_castpd_si256(
+          _mm256_broadcast_pd((__m128d const *)(s - 8 * p)));
+      q256_7 = _mm256_castpd_si256(
+          _mm256_broadcast_pd((__m128d const *)(s + 7 * p)));
+      p7 = _mm256_castsi256_si128(p256_7);
+      q7 = _mm256_castsi256_si128(q256_7);
+      work = _mm_max_epu8(
+          _mm_or_si128(_mm_subs_epu8(p7, p0), _mm_subs_epu8(p0, p7)),
+          _mm_or_si128(_mm_subs_epu8(q7, q0), _mm_subs_epu8(q0, q7)));
+
+      flat2 = _mm_max_epu8(work, flat2);
+      flat2 = _mm_subs_epu8(flat2, one);
+      flat2 = _mm_cmpeq_epi8(flat2, zero);
+      flat2 = _mm_and_si128(flat2, flat);  // flat2 & flat & mask
     }
 
-    // lp filter
+    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    // flat and wide flat calculations
     {
-        const __m128i t4 = _mm_set1_epi8(4);
-        const __m128i t3 = _mm_set1_epi8(3);
-        const __m128i t80 = _mm_set1_epi8(0x80);
-        const __m128i te0 = _mm_set1_epi8(0xe0);
-        const __m128i t1f = _mm_set1_epi8(0x1f);
-        const __m128i t1 = _mm_set1_epi8(0x1);
-        const __m128i t7f = _mm_set1_epi8(0x7f);
-
-        __m128i ps1 = _mm_xor_si128(p1, t80);
-        __m128i ps0 = _mm_xor_si128(p0, t80);
-        __m128i qs0 = _mm_xor_si128(q0, t80);
-        __m128i qs1 = _mm_xor_si128(q1, t80);
-        __m128i filt;
-        __m128i work_a;
-        __m128i filter1, filter2;
-        __m128i flat2_p6, flat2_p5, flat2_p4, flat2_p3, flat2_p2, flat2_p1,
-                flat2_p0, flat2_q0, flat2_q1, flat2_q2, flat2_q3, flat2_q4,
-                flat2_q5, flat2_q6, flat_p2, flat_p1, flat_p0, flat_q0, flat_q1,
-                flat_q2;
-
-        filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev);
-        work_a = _mm_subs_epi8(qs0, ps0);
-        filt = _mm_adds_epi8(filt, work_a);
-        filt = _mm_adds_epi8(filt, work_a);
-        filt = _mm_adds_epi8(filt, work_a);
-        /* (vpx_filter + 3 * (qs0 - ps0)) & mask */
-        filt = _mm_and_si128(filt, mask);
-
-        filter1 = _mm_adds_epi8(filt, t4);
-        filter2 = _mm_adds_epi8(filt, t3);
-
-        /* Filter1 >> 3 */
-        work_a = _mm_cmpgt_epi8(zero, filter1);
-        filter1 = _mm_srli_epi16(filter1, 3);
-        work_a = _mm_and_si128(work_a, te0);
-        filter1 = _mm_and_si128(filter1, t1f);
-        filter1 = _mm_or_si128(filter1, work_a);
-        qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80);
-
-        /* Filter2 >> 3 */
-        work_a = _mm_cmpgt_epi8(zero, filter2);
-        filter2 = _mm_srli_epi16(filter2, 3);
-        work_a = _mm_and_si128(work_a, te0);
-        filter2 = _mm_and_si128(filter2, t1f);
-        filter2 = _mm_or_si128(filter2, work_a);
-        ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80);
-
-        /* filt >> 1 */
-        filt = _mm_adds_epi8(filter1, t1);
-        work_a = _mm_cmpgt_epi8(zero, filt);
-        filt = _mm_srli_epi16(filt, 1);
-        work_a = _mm_and_si128(work_a, t80);
-        filt = _mm_and_si128(filt, t7f);
-        filt = _mm_or_si128(filt, work_a);
-        filt = _mm_andnot_si128(hev, filt);
-        ps1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80);
-        qs1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80);
-        // loopfilter done
-
-        {
-            __m128i work;
-            work = _mm_max_epu8(
-                    _mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)),
-                    _mm_or_si128(_mm_subs_epu8(q2, q0), _mm_subs_epu8(q0, q2)));
-            flat = _mm_max_epu8(work, flat);
-            work = _mm_max_epu8(
-                    _mm_or_si128(_mm_subs_epu8(p3, p0), _mm_subs_epu8(p0, p3)),
-                    _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3)));
-            flat = _mm_max_epu8(work, flat);
-            work = _mm_max_epu8(
-                    _mm_or_si128(_mm_subs_epu8(p4, p0), _mm_subs_epu8(p0, p4)),
-                    _mm_or_si128(_mm_subs_epu8(q4, q0), _mm_subs_epu8(q0, q4)));
-            flat = _mm_subs_epu8(flat, one);
-            flat = _mm_cmpeq_epi8(flat, zero);
-            flat = _mm_and_si128(flat, mask);
-
-            p256_5 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                        (__m128d const *)(s - 6 * p)));
-            q256_5 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                        (__m128d const *)(s + 5 * p)));
-            p5 = _mm256_castsi256_si128(p256_5);
-            q5 = _mm256_castsi256_si128(q256_5);
-            flat2 = _mm_max_epu8(
-                    _mm_or_si128(_mm_subs_epu8(p5, p0), _mm_subs_epu8(p0, p5)),
-                    _mm_or_si128(_mm_subs_epu8(q5, q0), _mm_subs_epu8(q0, q5)));
-
-            flat2 = _mm_max_epu8(work, flat2);
-            p256_6 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                        (__m128d const *)(s - 7 * p)));
-            q256_6 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                        (__m128d const *)(s + 6 * p)));
-            p6 = _mm256_castsi256_si128(p256_6);
-            q6 = _mm256_castsi256_si128(q256_6);
-            work = _mm_max_epu8(
-                    _mm_or_si128(_mm_subs_epu8(p6, p0), _mm_subs_epu8(p0, p6)),
-                    _mm_or_si128(_mm_subs_epu8(q6, q0), _mm_subs_epu8(q0, q6)));
-
-            flat2 = _mm_max_epu8(work, flat2);
-
-            p256_7 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                        (__m128d const *)(s - 8 * p)));
-            q256_7 = _mm256_castpd_si256(_mm256_broadcast_pd(
-                                        (__m128d const *)(s + 7 * p)));
-            p7 = _mm256_castsi256_si128(p256_7);
-            q7 = _mm256_castsi256_si128(q256_7);
-            work = _mm_max_epu8(
-                    _mm_or_si128(_mm_subs_epu8(p7, p0), _mm_subs_epu8(p0, p7)),
-                    _mm_or_si128(_mm_subs_epu8(q7, q0), _mm_subs_epu8(q0, q7)));
-
-            flat2 = _mm_max_epu8(work, flat2);
-            flat2 = _mm_subs_epu8(flat2, one);
-            flat2 = _mm_cmpeq_epi8(flat2, zero);
-            flat2 = _mm_and_si128(flat2, flat);  // flat2 & flat & mask
-        }
-
-        // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-        // flat and wide flat calculations
-        {
-            const __m256i eight = _mm256_set1_epi16(8);
-            const __m256i four = _mm256_set1_epi16(4);
-            __m256i pixelFilter_p, pixelFilter_q, pixetFilter_p2p1p0,
-                    pixetFilter_q2q1q0, sum_p7, sum_q7, sum_p3, sum_q3, res_p,
-                    res_q;
-
-            const __m256i filter = _mm256_load_si256(
-                                  (__m256i const *)filt_loopfilter_avx2);
-            p256_7 = _mm256_shuffle_epi8(p256_7, filter);
-            p256_6 = _mm256_shuffle_epi8(p256_6, filter);
-            p256_5 = _mm256_shuffle_epi8(p256_5, filter);
-            p256_4 = _mm256_shuffle_epi8(p256_4, filter);
-            p256_3 = _mm256_shuffle_epi8(p256_3, filter);
-            p256_2 = _mm256_shuffle_epi8(p256_2, filter);
-            p256_1 = _mm256_shuffle_epi8(p256_1, filter);
-            p256_0 = _mm256_shuffle_epi8(p256_0, filter);
-            q256_0 = _mm256_shuffle_epi8(q256_0, filter);
-            q256_1 = _mm256_shuffle_epi8(q256_1, filter);
-            q256_2 = _mm256_shuffle_epi8(q256_2, filter);
-            q256_3 = _mm256_shuffle_epi8(q256_3, filter);
-            q256_4 = _mm256_shuffle_epi8(q256_4, filter);
-            q256_5 = _mm256_shuffle_epi8(q256_5, filter);
-            q256_6 = _mm256_shuffle_epi8(q256_6, filter);
-            q256_7 = _mm256_shuffle_epi8(q256_7, filter);
-
-            pixelFilter_p = _mm256_add_epi16(_mm256_add_epi16(p256_6, p256_5),
-                    _mm256_add_epi16(p256_4, p256_3));
-            pixelFilter_q = _mm256_add_epi16(_mm256_add_epi16(q256_6, q256_5),
-                    _mm256_add_epi16(q256_4, q256_3));
-
-            pixetFilter_p2p1p0 = _mm256_add_epi16(p256_0,
-                    _mm256_add_epi16(p256_2, p256_1));
-            pixelFilter_p = _mm256_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
-
-            pixetFilter_q2q1q0 = _mm256_add_epi16(q256_0,
-                    _mm256_add_epi16(q256_2, q256_1));
-            pixelFilter_q = _mm256_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
-
-            pixelFilter_p = _mm256_add_epi16(eight,
-                    _mm256_add_epi16(pixelFilter_p, pixelFilter_q));
-
-            pixetFilter_p2p1p0 = _mm256_add_epi16(four,
-                    _mm256_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0));
-
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_p,
-                            _mm256_add_epi16(p256_7, p256_0)), 4);
-
-            flat2_p0 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
-
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_p,
-                            _mm256_add_epi16(q256_7, q256_0)), 4);
-
-            flat2_q0 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
-
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixetFilter_p2p1p0,
-                            _mm256_add_epi16(p256_3, p256_0)), 3);
+      const __m256i eight = _mm256_set1_epi16(8);
+      const __m256i four = _mm256_set1_epi16(4);
+      __m256i pixelFilter_p, pixelFilter_q, pixetFilter_p2p1p0,
+          pixetFilter_q2q1q0, sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q;
 
-            flat_p0 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
+      const __m256i filter =
+          _mm256_load_si256((__m256i const *)filt_loopfilter_avx2);
+      p256_7 = _mm256_shuffle_epi8(p256_7, filter);
+      p256_6 = _mm256_shuffle_epi8(p256_6, filter);
+      p256_5 = _mm256_shuffle_epi8(p256_5, filter);
+      p256_4 = _mm256_shuffle_epi8(p256_4, filter);
+      p256_3 = _mm256_shuffle_epi8(p256_3, filter);
+      p256_2 = _mm256_shuffle_epi8(p256_2, filter);
+      p256_1 = _mm256_shuffle_epi8(p256_1, filter);
+      p256_0 = _mm256_shuffle_epi8(p256_0, filter);
+      q256_0 = _mm256_shuffle_epi8(q256_0, filter);
+      q256_1 = _mm256_shuffle_epi8(q256_1, filter);
+      q256_2 = _mm256_shuffle_epi8(q256_2, filter);
+      q256_3 = _mm256_shuffle_epi8(q256_3, filter);
+      q256_4 = _mm256_shuffle_epi8(q256_4, filter);
+      q256_5 = _mm256_shuffle_epi8(q256_5, filter);
+      q256_6 = _mm256_shuffle_epi8(q256_6, filter);
+      q256_7 = _mm256_shuffle_epi8(q256_7, filter);
 
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixetFilter_p2p1p0,
-                            _mm256_add_epi16(q256_3, q256_0)), 3);
+      pixelFilter_p = _mm256_add_epi16(_mm256_add_epi16(p256_6, p256_5),
+                                       _mm256_add_epi16(p256_4, p256_3));
+      pixelFilter_q = _mm256_add_epi16(_mm256_add_epi16(q256_6, q256_5),
+                                       _mm256_add_epi16(q256_4, q256_3));
 
-            flat_q0 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
+      pixetFilter_p2p1p0 =
+          _mm256_add_epi16(p256_0, _mm256_add_epi16(p256_2, p256_1));
+      pixelFilter_p = _mm256_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
 
-            sum_p7 = _mm256_add_epi16(p256_7, p256_7);
+      pixetFilter_q2q1q0 =
+          _mm256_add_epi16(q256_0, _mm256_add_epi16(q256_2, q256_1));
+      pixelFilter_q = _mm256_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
 
-            sum_q7 = _mm256_add_epi16(q256_7, q256_7);
+      pixelFilter_p = _mm256_add_epi16(
+          eight, _mm256_add_epi16(pixelFilter_p, pixelFilter_q));
 
-            sum_p3 = _mm256_add_epi16(p256_3, p256_3);
+      pixetFilter_p2p1p0 = _mm256_add_epi16(
+          four, _mm256_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0));
 
-            sum_q3 = _mm256_add_epi16(q256_3, q256_3);
+      res_p = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(p256_7, p256_0)), 4);
 
-            pixelFilter_q = _mm256_sub_epi16(pixelFilter_p, p256_6);
+      flat2_p0 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-            pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_6);
+      res_q = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(q256_7, q256_0)), 4);
 
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_p,
-                            _mm256_add_epi16(sum_p7, p256_1)), 4);
+      flat2_q0 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
 
-            flat2_p1 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
+      res_p =
+          _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0,
+                                             _mm256_add_epi16(p256_3, p256_0)),
+                            3);
 
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_q,
-                            _mm256_add_epi16(sum_q7, q256_1)), 4);
+      flat_p0 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-            flat2_q1 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
+      res_q =
+          _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0,
+                                             _mm256_add_epi16(q256_3, q256_0)),
+                            3);
 
-            pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_p2p1p0, p256_2);
+      flat_q0 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
 
-            pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_2);
+      sum_p7 = _mm256_add_epi16(p256_7, p256_7);
 
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixetFilter_p2p1p0,
-                            _mm256_add_epi16(sum_p3, p256_1)), 3);
+      sum_q7 = _mm256_add_epi16(q256_7, q256_7);
 
-            flat_p1 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
+      sum_p3 = _mm256_add_epi16(p256_3, p256_3);
 
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixetFilter_q2q1q0,
-                            _mm256_add_epi16(sum_q3, q256_1)), 3);
+      sum_q3 = _mm256_add_epi16(q256_3, q256_3);
 
-            flat_q1 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
+      pixelFilter_q = _mm256_sub_epi16(pixelFilter_p, p256_6);
 
-            sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
+      pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_6);
 
-            sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
+      res_p = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_1)), 4);
 
-            sum_p3 = _mm256_add_epi16(sum_p3, p256_3);
+      flat2_p1 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-            sum_q3 = _mm256_add_epi16(sum_q3, q256_3);
+      res_q = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_1)), 4);
 
-            pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_5);
+      flat2_q1 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
 
-            pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_5);
+      pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_p2p1p0, p256_2);
 
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_p,
-                            _mm256_add_epi16(sum_p7, p256_2)), 4);
+      pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_2);
 
-            flat2_p2 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
+      res_p =
+          _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0,
+                                             _mm256_add_epi16(sum_p3, p256_1)),
+                            3);
 
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_q,
-                            _mm256_add_epi16(sum_q7, q256_2)), 4);
+      flat_p1 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-            flat2_q2 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
+      res_q =
+          _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_q2q1q0,
+                                             _mm256_add_epi16(sum_q3, q256_1)),
+                            3);
 
-            pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_1);
+      flat_q1 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
 
-            pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_q2q1q0, p256_1);
+      sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
 
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixetFilter_p2p1p0,
-                            _mm256_add_epi16(sum_p3, p256_2)), 3);
+      sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
 
-            flat_p2 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
+      sum_p3 = _mm256_add_epi16(sum_p3, p256_3);
 
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixetFilter_q2q1q0,
-                            _mm256_add_epi16(sum_q3, q256_2)), 3);
+      sum_q3 = _mm256_add_epi16(sum_q3, q256_3);
 
-            flat_q2 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
+      pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_5);
 
-            sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
+      pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_5);
 
-            sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
+      res_p = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_2)), 4);
 
-            pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_4);
+      flat2_p2 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-            pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_4);
+      res_q = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_2)), 4);
 
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_p,
-                            _mm256_add_epi16(sum_p7, p256_3)), 4);
+      flat2_q2 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
 
-            flat2_p3 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
+      pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_1);
 
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_q,
-                            _mm256_add_epi16(sum_q7, q256_3)), 4);
+      pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_q2q1q0, p256_1);
 
-            flat2_q3 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
+      res_p =
+          _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0,
+                                             _mm256_add_epi16(sum_p3, p256_2)),
+                            3);
 
-            sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
+      flat_p2 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-            sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
+      res_q =
+          _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_q2q1q0,
+                                             _mm256_add_epi16(sum_q3, q256_2)),
+                            3);
 
-            pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_3);
+      flat_q2 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
 
-            pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_3);
+      sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
 
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_p,
-                            _mm256_add_epi16(sum_p7, p256_4)), 4);
+      sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
 
-            flat2_p4 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
+      pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_4);
 
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_q,
-                            _mm256_add_epi16(sum_q7, q256_4)), 4);
+      pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_4);
 
-            flat2_q4 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
+      res_p = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_3)), 4);
 
-            sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
+      flat2_p3 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-            sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
+      res_q = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_3)), 4);
 
-            pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_2);
+      flat2_q3 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
 
-            pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_2);
+      sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
 
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_p,
-                            _mm256_add_epi16(sum_p7, p256_5)), 4);
+      sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
 
-            flat2_p5 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
+      pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_3);
 
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_q,
-                            _mm256_add_epi16(sum_q7, q256_5)), 4);
+      pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_3);
 
-            flat2_q5 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
+      res_p = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_4)), 4);
 
-            sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
+      flat2_p4 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-            sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
+      res_q = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_4)), 4);
 
-            pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_1);
+      flat2_q4 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
 
-            pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_1);
+      sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
 
-            res_p = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_p,
-                            _mm256_add_epi16(sum_p7, p256_6)), 4);
+      sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
 
-            flat2_p6 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p),
-                            168));
+      pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_2);
 
-            res_q = _mm256_srli_epi16(
-                    _mm256_add_epi16(pixelFilter_q,
-                            _mm256_add_epi16(sum_q7, q256_6)), 4);
+      pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_2);
 
-            flat2_q6 = _mm256_castsi256_si128(
-                    _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q),
-                            168));
-        }
+      res_p = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_5)), 4);
 
-        // wide flat
-        // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+      flat2_p5 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-        p2 = _mm_andnot_si128(flat, p2);
-        flat_p2 = _mm_and_si128(flat, flat_p2);
-        p2 = _mm_or_si128(flat_p2, p2);
+      res_q = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_5)), 4);
 
-        p1 = _mm_andnot_si128(flat, ps1);
-        flat_p1 = _mm_and_si128(flat, flat_p1);
-        p1 = _mm_or_si128(flat_p1, p1);
+      flat2_q5 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
 
-        p0 = _mm_andnot_si128(flat, ps0);
-        flat_p0 = _mm_and_si128(flat, flat_p0);
-        p0 = _mm_or_si128(flat_p0, p0);
+      sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
 
-        q0 = _mm_andnot_si128(flat, qs0);
-        flat_q0 = _mm_and_si128(flat, flat_q0);
-        q0 = _mm_or_si128(flat_q0, q0);
+      sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
 
-        q1 = _mm_andnot_si128(flat, qs1);
-        flat_q1 = _mm_and_si128(flat, flat_q1);
-        q1 = _mm_or_si128(flat_q1, q1);
+      pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_1);
 
-        q2 = _mm_andnot_si128(flat, q2);
-        flat_q2 = _mm_and_si128(flat, flat_q2);
-        q2 = _mm_or_si128(flat_q2, q2);
+      pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_1);
 
-        p6 = _mm_andnot_si128(flat2, p6);
-        flat2_p6 = _mm_and_si128(flat2, flat2_p6);
-        p6 = _mm_or_si128(flat2_p6, p6);
-        _mm_storeu_si128((__m128i *) (s - 7 * p), p6);
+      res_p = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_6)), 4);
 
-        p5 = _mm_andnot_si128(flat2, p5);
-        flat2_p5 = _mm_and_si128(flat2, flat2_p5);
-        p5 = _mm_or_si128(flat2_p5, p5);
-        _mm_storeu_si128((__m128i *) (s - 6 * p), p5);
+      flat2_p6 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
 
-        p4 = _mm_andnot_si128(flat2, p4);
-        flat2_p4 = _mm_and_si128(flat2, flat2_p4);
-        p4 = _mm_or_si128(flat2_p4, p4);
-        _mm_storeu_si128((__m128i *) (s - 5 * p), p4);
+      res_q = _mm256_srli_epi16(
+          _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_6)), 4);
 
-        p3 = _mm_andnot_si128(flat2, p3);
-        flat2_p3 = _mm_and_si128(flat2, flat2_p3);
-        p3 = _mm_or_si128(flat2_p3, p3);
-        _mm_storeu_si128((__m128i *) (s - 4 * p), p3);
-
-        p2 = _mm_andnot_si128(flat2, p2);
-        flat2_p2 = _mm_and_si128(flat2, flat2_p2);
-        p2 = _mm_or_si128(flat2_p2, p2);
-        _mm_storeu_si128((__m128i *) (s - 3 * p), p2);
-
-        p1 = _mm_andnot_si128(flat2, p1);
-        flat2_p1 = _mm_and_si128(flat2, flat2_p1);
-        p1 = _mm_or_si128(flat2_p1, p1);
-        _mm_storeu_si128((__m128i *) (s - 2 * p), p1);
-
-        p0 = _mm_andnot_si128(flat2, p0);
-        flat2_p0 = _mm_and_si128(flat2, flat2_p0);
-        p0 = _mm_or_si128(flat2_p0, p0);
-        _mm_storeu_si128((__m128i *) (s - 1 * p), p0);
-
-        q0 = _mm_andnot_si128(flat2, q0);
-        flat2_q0 = _mm_and_si128(flat2, flat2_q0);
-        q0 = _mm_or_si128(flat2_q0, q0);
-        _mm_storeu_si128((__m128i *) (s - 0 * p), q0);
-
-        q1 = _mm_andnot_si128(flat2, q1);
-        flat2_q1 = _mm_and_si128(flat2, flat2_q1);
-        q1 = _mm_or_si128(flat2_q1, q1);
-        _mm_storeu_si128((__m128i *) (s + 1 * p), q1);
-
-        q2 = _mm_andnot_si128(flat2, q2);
-        flat2_q2 = _mm_and_si128(flat2, flat2_q2);
-        q2 = _mm_or_si128(flat2_q2, q2);
-        _mm_storeu_si128((__m128i *) (s + 2 * p), q2);
-
-        q3 = _mm_andnot_si128(flat2, q3);
-        flat2_q3 = _mm_and_si128(flat2, flat2_q3);
-        q3 = _mm_or_si128(flat2_q3, q3);
-        _mm_storeu_si128((__m128i *) (s + 3 * p), q3);
-
-        q4 = _mm_andnot_si128(flat2, q4);
-        flat2_q4 = _mm_and_si128(flat2, flat2_q4);
-        q4 = _mm_or_si128(flat2_q4, q4);
-        _mm_storeu_si128((__m128i *) (s + 4 * p), q4);
-
-        q5 = _mm_andnot_si128(flat2, q5);
-        flat2_q5 = _mm_and_si128(flat2, flat2_q5);
-        q5 = _mm_or_si128(flat2_q5, q5);
-        _mm_storeu_si128((__m128i *) (s + 5 * p), q5);
-
-        q6 = _mm_andnot_si128(flat2, q6);
-        flat2_q6 = _mm_and_si128(flat2, flat2_q6);
-        q6 = _mm_or_si128(flat2_q6, q6);
-        _mm_storeu_si128((__m128i *) (s + 6 * p), q6);
+      flat2_q6 = _mm256_castsi256_si128(
+          _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     }
+
+    // wide flat
+    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    p2 = _mm_andnot_si128(flat, p2);
+    flat_p2 = _mm_and_si128(flat, flat_p2);
+    p2 = _mm_or_si128(flat_p2, p2);
+
+    p1 = _mm_andnot_si128(flat, ps1);
+    flat_p1 = _mm_and_si128(flat, flat_p1);
+    p1 = _mm_or_si128(flat_p1, p1);
+
+    p0 = _mm_andnot_si128(flat, ps0);
+    flat_p0 = _mm_and_si128(flat, flat_p0);
+    p0 = _mm_or_si128(flat_p0, p0);
+
+    q0 = _mm_andnot_si128(flat, qs0);
+    flat_q0 = _mm_and_si128(flat, flat_q0);
+    q0 = _mm_or_si128(flat_q0, q0);
+
+    q1 = _mm_andnot_si128(flat, qs1);
+    flat_q1 = _mm_and_si128(flat, flat_q1);
+    q1 = _mm_or_si128(flat_q1, q1);
+
+    q2 = _mm_andnot_si128(flat, q2);
+    flat_q2 = _mm_and_si128(flat, flat_q2);
+    q2 = _mm_or_si128(flat_q2, q2);
+
+    p6 = _mm_andnot_si128(flat2, p6);
+    flat2_p6 = _mm_and_si128(flat2, flat2_p6);
+    p6 = _mm_or_si128(flat2_p6, p6);
+    _mm_storeu_si128((__m128i *)(s - 7 * p), p6);
+
+    p5 = _mm_andnot_si128(flat2, p5);
+    flat2_p5 = _mm_and_si128(flat2, flat2_p5);
+    p5 = _mm_or_si128(flat2_p5, p5);
+    _mm_storeu_si128((__m128i *)(s - 6 * p), p5);
+
+    p4 = _mm_andnot_si128(flat2, p4);
+    flat2_p4 = _mm_and_si128(flat2, flat2_p4);
+    p4 = _mm_or_si128(flat2_p4, p4);
+    _mm_storeu_si128((__m128i *)(s - 5 * p), p4);
+
+    p3 = _mm_andnot_si128(flat2, p3);
+    flat2_p3 = _mm_and_si128(flat2, flat2_p3);
+    p3 = _mm_or_si128(flat2_p3, p3);
+    _mm_storeu_si128((__m128i *)(s - 4 * p), p3);
+
+    p2 = _mm_andnot_si128(flat2, p2);
+    flat2_p2 = _mm_and_si128(flat2, flat2_p2);
+    p2 = _mm_or_si128(flat2_p2, p2);
+    _mm_storeu_si128((__m128i *)(s - 3 * p), p2);
+
+    p1 = _mm_andnot_si128(flat2, p1);
+    flat2_p1 = _mm_and_si128(flat2, flat2_p1);
+    p1 = _mm_or_si128(flat2_p1, p1);
+    _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
+
+    p0 = _mm_andnot_si128(flat2, p0);
+    flat2_p0 = _mm_and_si128(flat2, flat2_p0);
+    p0 = _mm_or_si128(flat2_p0, p0);
+    _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
+
+    q0 = _mm_andnot_si128(flat2, q0);
+    flat2_q0 = _mm_and_si128(flat2, flat2_q0);
+    q0 = _mm_or_si128(flat2_q0, q0);
+    _mm_storeu_si128((__m128i *)(s - 0 * p), q0);
+
+    q1 = _mm_andnot_si128(flat2, q1);
+    flat2_q1 = _mm_and_si128(flat2, flat2_q1);
+    q1 = _mm_or_si128(flat2_q1, q1);
+    _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
+
+    q2 = _mm_andnot_si128(flat2, q2);
+    flat2_q2 = _mm_and_si128(flat2, flat2_q2);
+    q2 = _mm_or_si128(flat2_q2, q2);
+    _mm_storeu_si128((__m128i *)(s + 2 * p), q2);
+
+    q3 = _mm_andnot_si128(flat2, q3);
+    flat2_q3 = _mm_and_si128(flat2, flat2_q3);
+    q3 = _mm_or_si128(flat2_q3, q3);
+    _mm_storeu_si128((__m128i *)(s + 3 * p), q3);
+
+    q4 = _mm_andnot_si128(flat2, q4);
+    flat2_q4 = _mm_and_si128(flat2, flat2_q4);
+    q4 = _mm_or_si128(flat2_q4, q4);
+    _mm_storeu_si128((__m128i *)(s + 4 * p), q4);
+
+    q5 = _mm_andnot_si128(flat2, q5);
+    flat2_q5 = _mm_and_si128(flat2, flat2_q5);
+    q5 = _mm_or_si128(flat2_q5, q5);
+    _mm_storeu_si128((__m128i *)(s + 5 * p), q5);
+
+    q6 = _mm_andnot_si128(flat2, q6);
+    flat2_q6 = _mm_and_si128(flat2, flat2_q6);
+    q6 = _mm_or_si128(flat2_q6, q6);
+    _mm_storeu_si128((__m128i *)(s + 6 * p), q6);
+  }
 }
 
 void vpx_lpf_horizontal_16_avx2(unsigned char *s, int p,
-        const unsigned char *_blimit, const unsigned char *_limit,
-        const unsigned char *_thresh, int count) {
-    if (count == 1)
-        mb_lpf_horizontal_edge_w_avx2_8(s, p, _blimit, _limit, _thresh);
-    else
-        mb_lpf_horizontal_edge_w_avx2_16(s, p, _blimit, _limit, _thresh);
+                                const unsigned char *_blimit,
+                                const unsigned char *_limit,
+                                const unsigned char *_thresh, int count) {
+  if (count == 1)
+    mb_lpf_horizontal_edge_w_avx2_8(s, p, _blimit, _limit, _thresh);
+  else
+    mb_lpf_horizontal_edge_w_avx2_16(s, p, _blimit, _limit, _thresh);
 }
diff --git a/vpx_dsp/x86/loopfilter_sse2.c b/vpx_dsp/x86/loopfilter_sse2.c
index ed101273671d6b319d2453d4694e10258336f3e0..8a58a36da471fba5591b619131136322974c69f7 100644
--- a/vpx_dsp/x86/loopfilter_sse2.c
+++ b/vpx_dsp/x86/loopfilter_sse2.c
@@ -18,8 +18,7 @@ static INLINE __m128i abs_diff(__m128i a, __m128i b) {
   return _mm_or_si128(_mm_subs_epu8(a, b), _mm_subs_epu8(b, a));
 }
 
-static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
-                                            int p,
+static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, int p,
                                             const unsigned char *_blimit,
                                             const unsigned char *_limit,
                                             const unsigned char *_thresh) {
@@ -33,27 +32,27 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
   __m128i abs_p1p0;
 
   q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p));
-  q4p4 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q4p4),
-                                       (__m64 *)(s + 4 * p)));
+  q4p4 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *)(s + 4 * p)));
   q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
-  q3p3 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q3p3),
-                                       (__m64 *)(s + 3 * p)));
+  q3p3 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *)(s + 3 * p)));
   q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
-  q2p2 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q2p2),
-                                       (__m64 *)(s + 2 * p)));
+  q2p2 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *)(s + 2 * p)));
   q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
-  q1p1 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q1p1),
-                                       (__m64 *)(s + 1 * p)));
+  q1p1 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *)(s + 1 * p)));
   p1q1 = _mm_shuffle_epi32(q1p1, 78);
   q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
-  q0p0 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q0p0),
-                                       (__m64 *)(s - 0 * p)));
+  q0p0 = _mm_castps_si128(
+      _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *)(s - 0 * p)));
   p0q0 = _mm_shuffle_epi32(q0p0, 78);
 
   {
     __m128i abs_p1q1, abs_p0q0, abs_q1q0, fe, ff, work;
     abs_p1p0 = abs_diff(q1p1, q0p0);
-    abs_q1q0 =  _mm_srli_si128(abs_p1p0, 8);
+    abs_q1q0 = _mm_srli_si128(abs_p1p0, 8);
     fe = _mm_set1_epi8(0xfe);
     ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
     abs_p0q0 = abs_diff(q0p0, p0q0);
@@ -62,7 +61,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
     hev = _mm_subs_epu8(flat, thresh);
     hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
 
-    abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+    abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
     abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
     mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
     mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
@@ -71,8 +70,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
     // mask |= (abs(p1 - p0) > limit) * -1;
     // mask |= (abs(q1 - q0) > limit) * -1;
 
-    work = _mm_max_epu8(abs_diff(q2p2, q1p1),
-                        abs_diff(q3p3, q2p2));
+    work = _mm_max_epu8(abs_diff(q2p2, q1p1), abs_diff(q3p3, q2p2));
     mask = _mm_max_epu8(work, mask);
     mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8));
     mask = _mm_subs_epu8(mask, limit);
@@ -134,17 +132,17 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
       flat = _mm_and_si128(flat, mask);
 
       q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * p));
-      q5p5 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q5p5),
-                                           (__m64 *)(s + 5 * p)));
+      q5p5 = _mm_castps_si128(
+          _mm_loadh_pi(_mm_castsi128_ps(q5p5), (__m64 *)(s + 5 * p)));
 
       q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * p));
-      q6p6 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q6p6),
-                                           (__m64 *)(s + 6 * p)));
+      q6p6 = _mm_castps_si128(
+          _mm_loadh_pi(_mm_castsi128_ps(q6p6), (__m64 *)(s + 6 * p)));
       flat2 = _mm_max_epu8(abs_diff(q4p4, q0p0), abs_diff(q5p5, q0p0));
 
       q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * p));
-      q7p7 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q7p7),
-                                           (__m64 *)(s + 7 * p)));
+      q7p7 = _mm_castps_si128(
+          _mm_loadh_pi(_mm_castsi128_ps(q7p7), (__m64 *)(s + 7 * p)));
       work = _mm_max_epu8(abs_diff(q6p6, q0p0), abs_diff(q7p7, q0p0));
       flat2 = _mm_max_epu8(work, flat2);
       flat2 = _mm_max_epu8(flat2, _mm_srli_si128(flat2, 8));
@@ -164,7 +162,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
       __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0;
       __m128i sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q;
 
-      p7_16 = _mm_unpacklo_epi8(q7p7, zero);;
+      p7_16 = _mm_unpacklo_epi8(q7p7, zero);
       p6_16 = _mm_unpacklo_epi8(q6p6, zero);
       p5_16 = _mm_unpacklo_epi8(q5p5, zero);
       p4_16 = _mm_unpacklo_epi8(q4p4, zero);
@@ -187,24 +185,23 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
                                     _mm_add_epi16(q4_16, q3_16));
 
       pixetFilter_p2p1p0 = _mm_add_epi16(p0_16, _mm_add_epi16(p2_16, p1_16));
-      pixelFilter_p =  _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
+      pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
 
       pixetFilter_q2q1q0 = _mm_add_epi16(q0_16, _mm_add_epi16(q2_16, q1_16));
-      pixelFilter_q =  _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
-      pixelFilter_p =  _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p,
-                                                         pixelFilter_q));
-      pixetFilter_p2p1p0 =   _mm_add_epi16(four,
-                                           _mm_add_epi16(pixetFilter_p2p1p0,
-                                                         pixetFilter_q2q1q0));
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                                           _mm_add_epi16(p7_16, p0_16)), 4);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                                           _mm_add_epi16(q7_16, q0_16)), 4);
+      pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
+      pixelFilter_p =
+          _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, pixelFilter_q));
+      pixetFilter_p2p1p0 = _mm_add_epi16(
+          four, _mm_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0));
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(p7_16, p0_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(q7_16, q0_16)), 4);
       flat2_q0p0 = _mm_packus_epi16(res_p, res_q);
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
-                                           _mm_add_epi16(p3_16, p0_16)), 3);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
-                                           _mm_add_epi16(q3_16, q0_16)), 3);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(p3_16, p0_16)), 3);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(q3_16, q0_16)), 3);
 
       flat_q0p0 = _mm_packus_epi16(res_p, res_q);
 
@@ -215,18 +212,18 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
 
       pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6_16);
       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6_16);
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                             _mm_add_epi16(sum_p7, p1_16)), 4);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                             _mm_add_epi16(sum_q7, q1_16)), 4);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p1_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q1_16)), 4);
       flat2_q1p1 = _mm_packus_epi16(res_p, res_q);
 
       pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2_16);
       pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2_16);
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
-                             _mm_add_epi16(sum_p3, p1_16)), 3);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0,
-                             _mm_add_epi16(sum_q3, q1_16)), 3);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p1_16)), 3);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q1_16)), 3);
       flat_q1p1 = _mm_packus_epi16(res_p, res_q);
 
       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
@@ -236,59 +233,59 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
 
       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5_16);
       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5_16);
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                             _mm_add_epi16(sum_p7, p2_16)), 4);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                             _mm_add_epi16(sum_q7, q2_16)), 4);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p2_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q2_16)), 4);
       flat2_q2p2 = _mm_packus_epi16(res_p, res_q);
 
       pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1_16);
       pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1_16);
 
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
-                                           _mm_add_epi16(sum_p3, p2_16)), 3);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0,
-                                           _mm_add_epi16(sum_q3, q2_16)), 3);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p2_16)), 3);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q2_16)), 3);
       flat_q2p2 = _mm_packus_epi16(res_p, res_q);
 
       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
       sum_q7 = _mm_add_epi16(sum_q7, q7_16);
       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4_16);
       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4_16);
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                             _mm_add_epi16(sum_p7, p3_16)), 4);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                             _mm_add_epi16(sum_q7, q3_16)), 4);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p3_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q3_16)), 4);
       flat2_q3p3 = _mm_packus_epi16(res_p, res_q);
 
       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
       sum_q7 = _mm_add_epi16(sum_q7, q7_16);
       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3_16);
       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3_16);
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                             _mm_add_epi16(sum_p7, p4_16)), 4);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                             _mm_add_epi16(sum_q7, q4_16)), 4);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p4_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q4_16)), 4);
       flat2_q4p4 = _mm_packus_epi16(res_p, res_q);
 
       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
       sum_q7 = _mm_add_epi16(sum_q7, q7_16);
       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2_16);
       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2_16);
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                             _mm_add_epi16(sum_p7, p5_16)), 4);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                             _mm_add_epi16(sum_q7, q5_16)), 4);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p5_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q5_16)), 4);
       flat2_q5p5 = _mm_packus_epi16(res_p, res_q);
 
       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
       sum_q7 = _mm_add_epi16(sum_q7, q7_16);
       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1_16);
       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1_16);
-      res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
-                             _mm_add_epi16(sum_p7, p6_16)), 4);
-      res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
-                             _mm_add_epi16(sum_q7, q6_16)), 4);
+      res_p = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p6_16)), 4);
+      res_q = _mm_srli_epi16(
+          _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q6_16)), 4);
       flat2_q6p6 = _mm_packus_epi16(res_p, res_q);
     }
     // wide flat
@@ -349,7 +346,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
     flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0);
     q0p0 = _mm_or_si128(q0p0, flat2_q0p0);
     _mm_storel_epi64((__m128i *)(s - 1 * p), q0p0);
-    _mm_storeh_pi((__m64 *)(s - 0 * p),  _mm_castsi128_ps(q0p0));
+    _mm_storeh_pi((__m64 *)(s - 0 * p), _mm_castsi128_ps(q0p0));
   }
 }
 
@@ -367,8 +364,8 @@ static INLINE __m128i filter8_mask(const __m128i *const flat,
                                    const __m128i *const other_filt,
                                    const __m128i *const f8_lo,
                                    const __m128i *const f8_hi) {
-  const __m128i f8 = _mm_packus_epi16(_mm_srli_epi16(*f8_lo, 3),
-                                      _mm_srli_epi16(*f8_hi, 3));
+  const __m128i f8 =
+      _mm_packus_epi16(_mm_srli_epi16(*f8_lo, 3), _mm_srli_epi16(*f8_hi, 3));
   const __m128i result = _mm_and_si128(*flat, f8);
   return _mm_or_si128(_mm_andnot_si128(*flat, *other_filt), result);
 }
@@ -377,14 +374,13 @@ static INLINE __m128i filter16_mask(const __m128i *const flat,
                                     const __m128i *const other_filt,
                                     const __m128i *const f_lo,
                                     const __m128i *const f_hi) {
-  const __m128i f = _mm_packus_epi16(_mm_srli_epi16(*f_lo, 4),
-                                     _mm_srli_epi16(*f_hi, 4));
+  const __m128i f =
+      _mm_packus_epi16(_mm_srli_epi16(*f_lo, 4), _mm_srli_epi16(*f_hi, 4));
   const __m128i result = _mm_and_si128(*flat, f);
   return _mm_or_si128(_mm_andnot_si128(*flat, *other_filt), result);
 }
 
-static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s,
-                                             int p,
+static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, int p,
                                              const unsigned char *_blimit,
                                              const unsigned char *_limit,
                                              const unsigned char *_thresh) {
@@ -429,7 +425,7 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s,
     __m128i work;
     max_abs_p1p0q1q0 = _mm_max_epu8(abs_p1p0, abs_q1q0);
 
-    abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+    abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
     abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
     mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
     mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
@@ -628,16 +624,16 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s,
       __m128i f_hi;
 
       f_lo = _mm_sub_epi16(_mm_slli_epi16(p7_lo, 3), p7_lo);  // p7 * 7
-      f_lo = _mm_add_epi16(_mm_slli_epi16(p6_lo, 1),
-                           _mm_add_epi16(p4_lo, f_lo));
+      f_lo =
+          _mm_add_epi16(_mm_slli_epi16(p6_lo, 1), _mm_add_epi16(p4_lo, f_lo));
       f_lo = _mm_add_epi16(_mm_add_epi16(p3_lo, f_lo),
                            _mm_add_epi16(p2_lo, p1_lo));
       f_lo = _mm_add_epi16(_mm_add_epi16(p0_lo, q0_lo), f_lo);
       f_lo = _mm_add_epi16(_mm_add_epi16(p5_lo, eight), f_lo);
 
       f_hi = _mm_sub_epi16(_mm_slli_epi16(p7_hi, 3), p7_hi);  // p7 * 7
-      f_hi = _mm_add_epi16(_mm_slli_epi16(p6_hi, 1),
-                           _mm_add_epi16(p4_hi, f_hi));
+      f_hi =
+          _mm_add_epi16(_mm_slli_epi16(p6_hi, 1), _mm_add_epi16(p4_hi, f_hi));
       f_hi = _mm_add_epi16(_mm_add_epi16(p3_hi, f_hi),
                            _mm_add_epi16(p2_hi, p1_hi));
       f_hi = _mm_add_epi16(_mm_add_epi16(p0_hi, q0_hi), f_hi);
@@ -765,7 +761,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
     const __m128i ff = _mm_cmpeq_epi8(fe, fe);
     __m128i abs_p1q1, abs_p0q0, abs_q1q0, abs_p1p0, work;
     abs_p1p0 = abs_diff(q1p1, q0p0);
-    abs_q1q0 =  _mm_srli_si128(abs_p1p0, 8);
+    abs_q1q0 = _mm_srli_si128(abs_p1p0, 8);
 
     abs_p0q0 = abs_diff(q0p0, p0q0);
     abs_p1q1 = abs_diff(q1p1, p1q1);
@@ -773,7 +769,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
     hev = _mm_subs_epu8(flat, thresh);
     hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
 
-    abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+    abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
     abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
     mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
     mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
@@ -782,8 +778,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
     // mask |= (abs(p1 - p0) > limit) * -1;
     // mask |= (abs(q1 - q0) > limit) * -1;
 
-    work = _mm_max_epu8(abs_diff(q2p2, q1p1),
-                        abs_diff(q3p3, q2p2));
+    work = _mm_max_epu8(abs_diff(q2p2, q1p1), abs_diff(q3p3, q2p2));
     mask = _mm_max_epu8(work, mask);
     mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8));
     mask = _mm_subs_epu8(mask, limit);
@@ -791,8 +786,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
 
     // flat_mask4
 
-    flat = _mm_max_epu8(abs_diff(q2p2, q0p0),
-                        abs_diff(q3p3, q0p0));
+    flat = _mm_max_epu8(abs_diff(q2p2, q0p0), abs_diff(q3p3, q0p0));
     flat = _mm_max_epu8(abs_p1p0, flat);
     flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8));
     flat = _mm_subs_epu8(flat, one);
@@ -857,14 +851,14 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
     const __m128i t3 = _mm_set1_epi8(3);
     const __m128i t80 = _mm_set1_epi8(0x80);
     const __m128i t1 = _mm_set1_epi8(0x1);
-    const __m128i ps1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * p)),
-                                      t80);
-    const __m128i ps0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * p)),
-                                      t80);
-    const __m128i qs0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * p)),
-                                      t80);
-    const __m128i qs1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * p)),
-                                      t80);
+    const __m128i ps1 =
+        _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * p)), t80);
+    const __m128i ps0 =
+        _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * p)), t80);
+    const __m128i qs0 =
+        _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * p)), t80);
+    const __m128i qs1 =
+        _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * p)), t80);
     __m128i filt;
     __m128i work_a;
     __m128i filter1, filter2;
@@ -943,8 +937,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
   }
 }
 
-void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p,
-                                    const uint8_t *_blimit0,
+void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
                                     const uint8_t *_limit0,
                                     const uint8_t *_thresh0,
                                     const uint8_t *_blimit1,
@@ -979,17 +972,17 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p,
   q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
   q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
   {
-    const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0),
-                                          _mm_subs_epu8(p0, p1));
-    const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
-                                          _mm_subs_epu8(q0, q1));
+    const __m128i abs_p1p0 =
+        _mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1));
+    const __m128i abs_q1q0 =
+        _mm_or_si128(_mm_subs_epu8(q1, q0), _mm_subs_epu8(q0, q1));
     const __m128i one = _mm_set1_epi8(1);
     const __m128i fe = _mm_set1_epi8(0xfe);
     const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
-    __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
-                                    _mm_subs_epu8(q0, p0));
-    __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1),
-                                    _mm_subs_epu8(q1, p1));
+    __m128i abs_p0q0 =
+        _mm_or_si128(_mm_subs_epu8(p0, q0), _mm_subs_epu8(q0, p0));
+    __m128i abs_p1q1 =
+        _mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1));
     __m128i work;
 
     // filter_mask and hev_mask
@@ -997,7 +990,7 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p,
     hev = _mm_subs_epu8(flat, thresh);
     hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
 
-    abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+    abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
     abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
     mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
     mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
@@ -1005,29 +998,25 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p,
     mask = _mm_max_epu8(flat, mask);
     // mask |= (abs(p1 - p0) > limit) * -1;
     // mask |= (abs(q1 - q0) > limit) * -1;
-    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1),
-                                     _mm_subs_epu8(p1, p2)),
-                         _mm_or_si128(_mm_subs_epu8(p3, p2),
-                                      _mm_subs_epu8(p2, p3)));
+    work = _mm_max_epu8(
+        _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)),
+        _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3)));
     mask = _mm_max_epu8(work, mask);
-    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1),
-                                     _mm_subs_epu8(q1, q2)),
-                         _mm_or_si128(_mm_subs_epu8(q3, q2),
-                                      _mm_subs_epu8(q2, q3)));
+    work = _mm_max_epu8(
+        _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)),
+        _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3)));
     mask = _mm_max_epu8(work, mask);
     mask = _mm_subs_epu8(mask, limit);
     mask = _mm_cmpeq_epi8(mask, zero);
 
     // flat_mask4
-    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0),
-                                     _mm_subs_epu8(p0, p2)),
-                         _mm_or_si128(_mm_subs_epu8(q2, q0),
-                                      _mm_subs_epu8(q0, q2)));
+    work = _mm_max_epu8(
+        _mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)),
+        _mm_or_si128(_mm_subs_epu8(q2, q0), _mm_subs_epu8(q0, q2)));
     flat = _mm_max_epu8(work, flat);
-    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p3, p0),
-                                     _mm_subs_epu8(p0, p3)),
-                         _mm_or_si128(_mm_subs_epu8(q3, q0),
-                                      _mm_subs_epu8(q0, q3)));
+    work = _mm_max_epu8(
+        _mm_or_si128(_mm_subs_epu8(p3, p0), _mm_subs_epu8(p0, p3)),
+        _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3)));
     flat = _mm_max_epu8(work, flat);
     flat = _mm_subs_epu8(flat, one);
     flat = _mm_cmpeq_epi8(flat, zero);
@@ -1098,14 +1087,14 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p,
     const __m128i t1 = _mm_set1_epi8(0x1);
     const __m128i t7f = _mm_set1_epi8(0x7f);
 
-    const __m128i ps1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)),
-                                      t80);
-    const __m128i ps0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)),
-                                      t80);
-    const __m128i qs0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)),
-                                      t80);
-    const __m128i qs1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)),
-                                      t80);
+    const __m128i ps1 =
+        _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), t80);
+    const __m128i ps0 =
+        _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), t80);
+    const __m128i qs0 =
+        _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), t80);
+    const __m128i qs1 =
+        _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), t80);
     __m128i filt;
     __m128i work_a;
     __m128i filter1, filter2;
@@ -1221,23 +1210,23 @@ void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
 
   // filter_mask and hev_mask
   {
-    const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0),
-                                          _mm_subs_epu8(p0, p1));
-    const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
-                                          _mm_subs_epu8(q0, q1));
+    const __m128i abs_p1p0 =
+        _mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1));
+    const __m128i abs_q1q0 =
+        _mm_or_si128(_mm_subs_epu8(q1, q0), _mm_subs_epu8(q0, q1));
     const __m128i fe = _mm_set1_epi8(0xfe);
     const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
-    __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
-                                    _mm_subs_epu8(q0, p0));
-    __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1),
-                                    _mm_subs_epu8(q1, p1));
+    __m128i abs_p0q0 =
+        _mm_or_si128(_mm_subs_epu8(p0, q0), _mm_subs_epu8(q0, p0));
+    __m128i abs_p1q1 =
+        _mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1));
     __m128i work;
 
     flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
     hev = _mm_subs_epu8(flat, thresh);
     hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
 
-    abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+    abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
     abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
     mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
     mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
@@ -1245,15 +1234,13 @@ void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
     mask = _mm_max_epu8(flat, mask);
     // mask |= (abs(p1 - p0) > limit) * -1;
     // mask |= (abs(q1 - q0) > limit) * -1;
-    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1),
-                                     _mm_subs_epu8(p1, p2)),
-                         _mm_or_si128(_mm_subs_epu8(p3, p2),
-                                      _mm_subs_epu8(p2, p3)));
+    work = _mm_max_epu8(
+        _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)),
+        _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3)));
     mask = _mm_max_epu8(work, mask);
-    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1),
-                                     _mm_subs_epu8(q1, q2)),
-                         _mm_or_si128(_mm_subs_epu8(q3, q2),
-                                      _mm_subs_epu8(q2, q3)));
+    work = _mm_max_epu8(
+        _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)),
+        _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3)));
     mask = _mm_max_epu8(work, mask);
     mask = _mm_subs_epu8(mask, limit);
     mask = _mm_cmpeq_epi8(mask, zero);
@@ -1269,14 +1256,14 @@ void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
     const __m128i t1 = _mm_set1_epi8(0x1);
     const __m128i t7f = _mm_set1_epi8(0x7f);
 
-    const __m128i ps1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)),
-                                      t80);
-    const __m128i ps0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)),
-                                      t80);
-    const __m128i qs0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)),
-                                      t80);
-    const __m128i qs1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)),
-                                      t80);
+    const __m128i ps1 =
+        _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), t80);
+    const __m128i ps0 =
+        _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), t80);
+    const __m128i qs0 =
+        _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), t80);
+    const __m128i qs1 =
+        _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), t80);
     __m128i filt;
     __m128i work_a;
     __m128i filter1, filter2;
@@ -1334,44 +1321,44 @@ static INLINE void transpose8x16(unsigned char *in0, unsigned char *in1,
   __m128i x8, x9, x10, x11, x12, x13, x14, x15;
 
   // 2-way interleave w/hoisting of unpacks
-  x0 = _mm_loadl_epi64((__m128i *)in0);  // 1
+  x0 = _mm_loadl_epi64((__m128i *)in0);           // 1
   x1 = _mm_loadl_epi64((__m128i *)(in0 + in_p));  // 3
-  x0 = _mm_unpacklo_epi8(x0, x1);  // 1
+  x0 = _mm_unpacklo_epi8(x0, x1);                 // 1
 
   x2 = _mm_loadl_epi64((__m128i *)(in0 + 2 * in_p));  // 5
-  x3 = _mm_loadl_epi64((__m128i *)(in0 + 3*in_p));  // 7
-  x1 = _mm_unpacklo_epi8(x2, x3);  // 2
+  x3 = _mm_loadl_epi64((__m128i *)(in0 + 3 * in_p));  // 7
+  x1 = _mm_unpacklo_epi8(x2, x3);                     // 2
 
-  x4 = _mm_loadl_epi64((__m128i *)(in0 + 4*in_p));  // 9
-  x5 = _mm_loadl_epi64((__m128i *)(in0 + 5*in_p));  // 11
-  x2 = _mm_unpacklo_epi8(x4, x5);  // 3
+  x4 = _mm_loadl_epi64((__m128i *)(in0 + 4 * in_p));  // 9
+  x5 = _mm_loadl_epi64((__m128i *)(in0 + 5 * in_p));  // 11
+  x2 = _mm_unpacklo_epi8(x4, x5);                     // 3
 
-  x6 = _mm_loadl_epi64((__m128i *)(in0 + 6*in_p));  // 13
-  x7 = _mm_loadl_epi64((__m128i *)(in0 + 7*in_p));  // 15
-  x3 = _mm_unpacklo_epi8(x6, x7);  // 4
-  x4 = _mm_unpacklo_epi16(x0, x1);  // 9
+  x6 = _mm_loadl_epi64((__m128i *)(in0 + 6 * in_p));  // 13
+  x7 = _mm_loadl_epi64((__m128i *)(in0 + 7 * in_p));  // 15
+  x3 = _mm_unpacklo_epi8(x6, x7);                     // 4
+  x4 = _mm_unpacklo_epi16(x0, x1);                    // 9
 
-  x8 = _mm_loadl_epi64((__m128i *)in1);  // 2
+  x8 = _mm_loadl_epi64((__m128i *)in1);           // 2
   x9 = _mm_loadl_epi64((__m128i *)(in1 + in_p));  // 4
-  x8 = _mm_unpacklo_epi8(x8, x9);  // 5
-  x5 = _mm_unpacklo_epi16(x2, x3);  // 10
+  x8 = _mm_unpacklo_epi8(x8, x9);                 // 5
+  x5 = _mm_unpacklo_epi16(x2, x3);                // 10
 
   x10 = _mm_loadl_epi64((__m128i *)(in1 + 2 * in_p));  // 6
-  x11 = _mm_loadl_epi64((__m128i *)(in1 + 3*in_p));  // 8
-  x9 = _mm_unpacklo_epi8(x10, x11);  // 6
+  x11 = _mm_loadl_epi64((__m128i *)(in1 + 3 * in_p));  // 8
+  x9 = _mm_unpacklo_epi8(x10, x11);                    // 6
 
-  x12 = _mm_loadl_epi64((__m128i *)(in1 + 4*in_p));  // 10
-  x13 = _mm_loadl_epi64((__m128i *)(in1 + 5*in_p));  // 12
-  x10 = _mm_unpacklo_epi8(x12, x13);  // 7
-  x12 = _mm_unpacklo_epi16(x8, x9);  // 11
+  x12 = _mm_loadl_epi64((__m128i *)(in1 + 4 * in_p));  // 10
+  x13 = _mm_loadl_epi64((__m128i *)(in1 + 5 * in_p));  // 12
+  x10 = _mm_unpacklo_epi8(x12, x13);                   // 7
+  x12 = _mm_unpacklo_epi16(x8, x9);                    // 11
 
-  x14 = _mm_loadl_epi64((__m128i *)(in1 + 6*in_p));  // 14
-  x15 = _mm_loadl_epi64((__m128i *)(in1 + 7*in_p));  // 16
-  x11 = _mm_unpacklo_epi8(x14, x15);  // 8
-  x13 = _mm_unpacklo_epi16(x10, x11);  // 12
+  x14 = _mm_loadl_epi64((__m128i *)(in1 + 6 * in_p));  // 14
+  x15 = _mm_loadl_epi64((__m128i *)(in1 + 7 * in_p));  // 16
+  x11 = _mm_unpacklo_epi8(x14, x15);                   // 8
+  x13 = _mm_unpacklo_epi16(x10, x11);                  // 12
 
-  x6 = _mm_unpacklo_epi32(x4, x5);  // 13
-  x7 = _mm_unpackhi_epi32(x4, x5);  // 14
+  x6 = _mm_unpacklo_epi32(x4, x5);     // 13
+  x7 = _mm_unpackhi_epi32(x4, x5);     // 14
   x14 = _mm_unpacklo_epi32(x12, x13);  // 15
   x15 = _mm_unpackhi_epi32(x12, x13);  // 16
 
@@ -1407,23 +1394,31 @@ static INLINE void transpose(unsigned char *src[], int in_p,
     unsigned char *in = src[idx8x8];
     unsigned char *out = dst[idx8x8];
 
-    x0 = _mm_loadl_epi64((__m128i *)(in + 0*in_p));  // 00 01 02 03 04 05 06 07
-    x1 = _mm_loadl_epi64((__m128i *)(in + 1*in_p));  // 10 11 12 13 14 15 16 17
+    x0 =
+        _mm_loadl_epi64((__m128i *)(in + 0 * in_p));  // 00 01 02 03 04 05 06 07
+    x1 =
+        _mm_loadl_epi64((__m128i *)(in + 1 * in_p));  // 10 11 12 13 14 15 16 17
     // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
     x0 = _mm_unpacklo_epi8(x0, x1);
 
-    x2 = _mm_loadl_epi64((__m128i *)(in + 2*in_p));  // 20 21 22 23 24 25 26 27
-    x3 = _mm_loadl_epi64((__m128i *)(in + 3*in_p));  // 30 31 32 33 34 35 36 37
+    x2 =
+        _mm_loadl_epi64((__m128i *)(in + 2 * in_p));  // 20 21 22 23 24 25 26 27
+    x3 =
+        _mm_loadl_epi64((__m128i *)(in + 3 * in_p));  // 30 31 32 33 34 35 36 37
     // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
     x1 = _mm_unpacklo_epi8(x2, x3);
 
-    x4 = _mm_loadl_epi64((__m128i *)(in + 4*in_p));  // 40 41 42 43 44 45 46 47
-    x5 = _mm_loadl_epi64((__m128i *)(in + 5*in_p));  // 50 51 52 53 54 55 56 57
+    x4 =
+        _mm_loadl_epi64((__m128i *)(in + 4 * in_p));  // 40 41 42 43 44 45 46 47
+    x5 =
+        _mm_loadl_epi64((__m128i *)(in + 5 * in_p));  // 50 51 52 53 54 55 56 57
     // 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
     x2 = _mm_unpacklo_epi8(x4, x5);
 
-    x6 = _mm_loadl_epi64((__m128i *)(in + 6*in_p));  // 60 61 62 63 64 65 66 67
-    x7 = _mm_loadl_epi64((__m128i *)(in + 7*in_p));  // 70 71 72 73 74 75 76 77
+    x6 =
+        _mm_loadl_epi64((__m128i *)(in + 6 * in_p));  // 60 61 62 63 64 65 66 67
+    x7 =
+        _mm_loadl_epi64((__m128i *)(in + 7 * in_p));  // 70 71 72 73 74 75 76 77
     // 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
     x3 = _mm_unpacklo_epi8(x6, x7);
 
@@ -1433,15 +1428,15 @@ static INLINE void transpose(unsigned char *src[], int in_p,
     x5 = _mm_unpacklo_epi16(x2, x3);
     // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
     x6 = _mm_unpacklo_epi32(x4, x5);
-    _mm_storel_pd((double *)(out + 0*out_p),
+    _mm_storel_pd((double *)(out + 0 * out_p),
                   _mm_castsi128_pd(x6));  // 00 10 20 30 40 50 60 70
-    _mm_storeh_pd((double *)(out + 1*out_p),
+    _mm_storeh_pd((double *)(out + 1 * out_p),
                   _mm_castsi128_pd(x6));  // 01 11 21 31 41 51 61 71
     // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
     x7 = _mm_unpackhi_epi32(x4, x5);
-    _mm_storel_pd((double *)(out + 2*out_p),
+    _mm_storel_pd((double *)(out + 2 * out_p),
                   _mm_castsi128_pd(x7));  // 02 12 22 32 42 52 62 72
-    _mm_storeh_pd((double *)(out + 3*out_p),
+    _mm_storeh_pd((double *)(out + 3 * out_p),
                   _mm_castsi128_pd(x7));  // 03 13 23 33 43 53 63 73
 
     // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37
@@ -1450,25 +1445,23 @@ static INLINE void transpose(unsigned char *src[], int in_p,
     x5 = _mm_unpackhi_epi16(x2, x3);
     // 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75
     x6 = _mm_unpacklo_epi32(x4, x5);
-    _mm_storel_pd((double *)(out + 4*out_p),
+    _mm_storel_pd((double *)(out + 4 * out_p),
                   _mm_castsi128_pd(x6));  // 04 14 24 34 44 54 64 74
-    _mm_storeh_pd((double *)(out + 5*out_p),
+    _mm_storeh_pd((double *)(out + 5 * out_p),
                   _mm_castsi128_pd(x6));  // 05 15 25 35 45 55 65 75
     // 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77
     x7 = _mm_unpackhi_epi32(x4, x5);
 
-    _mm_storel_pd((double *)(out + 6*out_p),
+    _mm_storel_pd((double *)(out + 6 * out_p),
                   _mm_castsi128_pd(x7));  // 06 16 26 36 46 56 66 76
-    _mm_storeh_pd((double *)(out + 7*out_p),
+    _mm_storeh_pd((double *)(out + 7 * out_p),
                   _mm_castsi128_pd(x7));  // 07 17 27 37 47 57 67 77
   } while (++idx8x8 < num_8x8_to_transpose);
 }
 
 void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
-                                  const uint8_t *limit0,
-                                  const uint8_t *thresh0,
-                                  const uint8_t *blimit1,
-                                  const uint8_t *limit1,
+                                  const uint8_t *limit0, const uint8_t *thresh0,
+                                  const uint8_t *blimit1, const uint8_t *limit1,
                                   const uint8_t *thresh1) {
   DECLARE_ALIGNED(16, unsigned char, t_dst[16 * 8]);
   unsigned char *src[2];
@@ -1515,10 +1508,8 @@ void vpx_lpf_vertical_8_sse2(unsigned char *s, int p,
 }
 
 void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
-                                  const uint8_t *limit0,
-                                  const uint8_t *thresh0,
-                                  const uint8_t *blimit1,
-                                  const uint8_t *limit1,
+                                  const uint8_t *limit0, const uint8_t *thresh0,
+                                  const uint8_t *blimit1, const uint8_t *limit1,
                                   const uint8_t *thresh1) {
   DECLARE_ALIGNED(16, unsigned char, t_dst[16 * 8]);
   unsigned char *src[2];
@@ -1578,8 +1569,7 @@ void vpx_lpf_vertical_16_dual_sse2(unsigned char *s, int p,
   transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16);
 
   // Loop filtering
-  mb_lpf_horizontal_edge_w_sse2_16(t_dst + 8 * 16, 16, blimit, limit,
-                                   thresh);
+  mb_lpf_horizontal_edge_w_sse2_16(t_dst + 8 * 16, 16, blimit, limit, thresh);
 
   // Transpose back
   transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p);
diff --git a/vpx_dsp/x86/quantize_sse2.c b/vpx_dsp/x86/quantize_sse2.c
index 962fa1f3cf08d3b32378d592c9747952c50eeaba..3d0ed8610d126117642943cad976e96d86af8bf6 100644
--- a/vpx_dsp/x86/quantize_sse2.c
+++ b/vpx_dsp/x86/quantize_sse2.c
@@ -14,18 +14,19 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx/vpx_integer.h"
 
-static INLINE __m128i load_coefficients(const tran_low_t *coeff_ptr) {
+static INLINE __m128i load_coefficients(const tran_low_t* coeff_ptr) {
 #if CONFIG_VPX_HIGHBITDEPTH
   return _mm_setr_epi16((int16_t)coeff_ptr[0], (int16_t)coeff_ptr[1],
-      (int16_t)coeff_ptr[2], (int16_t)coeff_ptr[3], (int16_t)coeff_ptr[4],
-      (int16_t)coeff_ptr[5], (int16_t)coeff_ptr[6], (int16_t)coeff_ptr[7]);
+                        (int16_t)coeff_ptr[2], (int16_t)coeff_ptr[3],
+                        (int16_t)coeff_ptr[4], (int16_t)coeff_ptr[5],
+                        (int16_t)coeff_ptr[6], (int16_t)coeff_ptr[7]);
 #else
-  return _mm_load_si128((const __m128i *)coeff_ptr);
+  return _mm_load_si128((const __m128i*)coeff_ptr);
 #endif
 }
 
 static INLINE void store_coefficients(__m128i coeff_vals,
-                                      tran_low_t *coeff_ptr) {
+                                      tran_low_t* coeff_ptr) {
 #if CONFIG_VPX_HIGHBITDEPTH
   __m128i one = _mm_set1_epi16(1);
   __m128i coeff_vals_hi = _mm_mulhi_epi16(coeff_vals, one);
@@ -44,8 +45,7 @@ void vpx_quantize_b_sse2(const tran_low_t* coeff_ptr, intptr_t n_coeffs,
                          const int16_t* round_ptr, const int16_t* quant_ptr,
                          const int16_t* quant_shift_ptr, tran_low_t* qcoeff_ptr,
                          tran_low_t* dqcoeff_ptr, const int16_t* dequant_ptr,
-                         uint16_t* eob_ptr,
-                         const int16_t* scan_ptr,
+                         uint16_t* eob_ptr, const int16_t* scan_ptr,
                          const int16_t* iscan_ptr) {
   __m128i zero;
   (void)scan_ptr;
diff --git a/vpx_dsp/x86/sad4d_avx2.c b/vpx_dsp/x86/sad4d_avx2.c
index 793658f9ea937098aa17394e0de65d0ce7485ce9..962b8fb11a423dacbb23b130285deb915027f332 100644
--- a/vpx_dsp/x86/sad4d_avx2.c
+++ b/vpx_dsp/x86/sad4d_avx2.c
@@ -11,10 +11,8 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx/vpx_integer.h"
 
-void vpx_sad32x32x4d_avx2(const uint8_t *src,
-                          int src_stride,
-                          const uint8_t *const ref[4],
-                          int ref_stride,
+void vpx_sad32x32x4d_avx2(const uint8_t *src, int src_stride,
+                          const uint8_t *const ref[4], int ref_stride,
                           uint32_t res[4]) {
   __m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg;
   __m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3;
@@ -30,7 +28,7 @@ void vpx_sad32x32x4d_avx2(const uint8_t *src,
   sum_ref1 = _mm256_set1_epi16(0);
   sum_ref2 = _mm256_set1_epi16(0);
   sum_ref3 = _mm256_set1_epi16(0);
-  for (i = 0; i < 32 ; i++) {
+  for (i = 0; i < 32; i++) {
     // load src and all refs
     src_reg = _mm256_loadu_si256((const __m256i *)src);
     ref0_reg = _mm256_loadu_si256((const __m256i *)ref0);
@@ -48,11 +46,11 @@ void vpx_sad32x32x4d_avx2(const uint8_t *src,
     sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
     sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
 
-    src+= src_stride;
-    ref0+= ref_stride;
-    ref1+= ref_stride;
-    ref2+= ref_stride;
-    ref3+= ref_stride;
+    src += src_stride;
+    ref0 += ref_stride;
+    ref1 += ref_stride;
+    ref2 += ref_stride;
+    ref3 += ref_stride;
   }
   {
     __m128i sum;
@@ -81,10 +79,8 @@ void vpx_sad32x32x4d_avx2(const uint8_t *src,
   }
 }
 
-void vpx_sad64x64x4d_avx2(const uint8_t *src,
-                          int src_stride,
-                          const uint8_t *const ref[4],
-                          int ref_stride,
+void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
+                          const uint8_t *const ref[4], int ref_stride,
                           uint32_t res[4]) {
   __m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg;
   __m256i ref1_reg, ref1next_reg, ref2_reg, ref2next_reg;
@@ -102,7 +98,7 @@ void vpx_sad64x64x4d_avx2(const uint8_t *src,
   sum_ref1 = _mm256_set1_epi16(0);
   sum_ref2 = _mm256_set1_epi16(0);
   sum_ref3 = _mm256_set1_epi16(0);
-  for (i = 0; i < 64 ; i++) {
+  for (i = 0; i < 64; i++) {
     // load 64 bytes from src and all refs
     src_reg = _mm256_loadu_si256((const __m256i *)src);
     srcnext_reg = _mm256_loadu_si256((const __m256i *)(src + 32));
@@ -133,11 +129,11 @@ void vpx_sad64x64x4d_avx2(const uint8_t *src,
     sum_ref1 = _mm256_add_epi32(sum_ref1, ref1next_reg);
     sum_ref2 = _mm256_add_epi32(sum_ref2, ref2next_reg);
     sum_ref3 = _mm256_add_epi32(sum_ref3, ref3next_reg);
-    src+= src_stride;
-    ref0+= ref_stride;
-    ref1+= ref_stride;
-    ref2+= ref_stride;
-    ref3+= ref_stride;
+    src += src_stride;
+    ref0 += ref_stride;
+    ref1 += ref_stride;
+    ref2 += ref_stride;
+    ref3 += ref_stride;
   }
   {
     __m128i sum;
diff --git a/vpx_dsp/x86/sad_avx2.c b/vpx_dsp/x86/sad_avx2.c
index ce9ad8f780c244d2e2f52fe7b90dcf396f9836ec..0d092e11aa9be336aa5722c2d5a76e81953dc15e 100644
--- a/vpx_dsp/x86/sad_avx2.c
+++ b/vpx_dsp/x86/sad_avx2.c
@@ -11,169 +11,170 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_ports/mem.h"
 
-#define FSAD64_H(h) \
-unsigned int vpx_sad64x##h##_avx2(const uint8_t *src_ptr, \
-                                  int src_stride, \
-                                  const uint8_t *ref_ptr, \
-                                  int ref_stride) { \
-  int i, res; \
-  __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
-  __m256i sum_sad = _mm256_setzero_si256(); \
-  __m256i sum_sad_h; \
-  __m128i sum_sad128; \
-  for (i = 0 ; i < h ; i++) { \
-    ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
-    ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \
-    sad1_reg = _mm256_sad_epu8(ref1_reg, \
-               _mm256_loadu_si256((__m256i const *)src_ptr)); \
-    sad2_reg = _mm256_sad_epu8(ref2_reg, \
-               _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \
-    sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
-    ref_ptr+= ref_stride; \
-    src_ptr+= src_stride; \
-  } \
-  sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
-  sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
-  sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
-  sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
-  res = _mm_cvtsi128_si32(sum_sad128); \
-  return res; \
-}
+#define FSAD64_H(h)                                                           \
+  unsigned int vpx_sad64x##h##_avx2(const uint8_t *src_ptr, int src_stride,   \
+                                    const uint8_t *ref_ptr, int ref_stride) { \
+    int i, res;                                                               \
+    __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg;                           \
+    __m256i sum_sad = _mm256_setzero_si256();                                 \
+    __m256i sum_sad_h;                                                        \
+    __m128i sum_sad128;                                                       \
+    for (i = 0; i < h; i++) {                                                 \
+      ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr);                \
+      ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32));         \
+      sad1_reg = _mm256_sad_epu8(                                             \
+          ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr));            \
+      sad2_reg = _mm256_sad_epu8(                                             \
+          ref2_reg, _mm256_loadu_si256((__m256i const *)(src_ptr + 32)));     \
+      sum_sad =                                                               \
+          _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg));    \
+      ref_ptr += ref_stride;                                                  \
+      src_ptr += src_stride;                                                  \
+    }                                                                         \
+    sum_sad_h = _mm256_srli_si256(sum_sad, 8);                                \
+    sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h);                           \
+    sum_sad128 = _mm256_extracti128_si256(sum_sad, 1);                        \
+    sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128);  \
+    res = _mm_cvtsi128_si32(sum_sad128);                                      \
+    return res;                                                               \
+  }
 
-#define FSAD32_H(h) \
-unsigned int vpx_sad32x##h##_avx2(const uint8_t *src_ptr, \
-                                  int src_stride, \
-                                  const uint8_t *ref_ptr, \
-                                  int ref_stride) { \
-  int i, res; \
-  __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
-  __m256i sum_sad = _mm256_setzero_si256(); \
-  __m256i sum_sad_h; \
-  __m128i sum_sad128; \
-  int ref2_stride = ref_stride << 1; \
-  int src2_stride = src_stride << 1; \
-  int max = h >> 1; \
-  for (i = 0 ; i < max ; i++) { \
-    ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
-    ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
-    sad1_reg = _mm256_sad_epu8(ref1_reg, \
-               _mm256_loadu_si256((__m256i const *)src_ptr)); \
-    sad2_reg = _mm256_sad_epu8(ref2_reg, \
-               _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \
-    sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
-    ref_ptr+= ref2_stride; \
-    src_ptr+= src2_stride; \
-  } \
-  sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
-  sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
-  sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
-  sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
-  res = _mm_cvtsi128_si32(sum_sad128); \
-  return res; \
-}
+#define FSAD32_H(h)                                                           \
+  unsigned int vpx_sad32x##h##_avx2(const uint8_t *src_ptr, int src_stride,   \
+                                    const uint8_t *ref_ptr, int ref_stride) { \
+    int i, res;                                                               \
+    __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg;                           \
+    __m256i sum_sad = _mm256_setzero_si256();                                 \
+    __m256i sum_sad_h;                                                        \
+    __m128i sum_sad128;                                                       \
+    int ref2_stride = ref_stride << 1;                                        \
+    int src2_stride = src_stride << 1;                                        \
+    int max = h >> 1;                                                         \
+    for (i = 0; i < max; i++) {                                               \
+      ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr);                \
+      ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
+      sad1_reg = _mm256_sad_epu8(                                             \
+          ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr));            \
+      sad2_reg = _mm256_sad_epu8(                                             \
+          ref2_reg,                                                           \
+          _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride)));       \
+      sum_sad =                                                               \
+          _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg));    \
+      ref_ptr += ref2_stride;                                                 \
+      src_ptr += src2_stride;                                                 \
+    }                                                                         \
+    sum_sad_h = _mm256_srli_si256(sum_sad, 8);                                \
+    sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h);                           \
+    sum_sad128 = _mm256_extracti128_si256(sum_sad, 1);                        \
+    sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128);  \
+    res = _mm_cvtsi128_si32(sum_sad128);                                      \
+    return res;                                                               \
+  }
 
-#define FSAD64 \
-FSAD64_H(64); \
-FSAD64_H(32);
+#define FSAD64  \
+  FSAD64_H(64); \
+  FSAD64_H(32);
 
-#define FSAD32 \
-FSAD32_H(64); \
-FSAD32_H(32); \
-FSAD32_H(16);
+#define FSAD32  \
+  FSAD32_H(64); \
+  FSAD32_H(32); \
+  FSAD32_H(16);
 
-FSAD64;
-FSAD32;
+/* clang-format off */
+FSAD64
+FSAD32
+/* clang-format on */
 
 #undef FSAD64
 #undef FSAD32
 #undef FSAD64_H
 #undef FSAD32_H
 
-#define FSADAVG64_H(h) \
-unsigned int vpx_sad64x##h##_avg_avx2(const uint8_t *src_ptr, \
-                                      int src_stride, \
-                                      const uint8_t *ref_ptr, \
-                                      int  ref_stride, \
-                                      const uint8_t *second_pred) { \
-  int i, res; \
-  __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
-  __m256i sum_sad = _mm256_setzero_si256(); \
-  __m256i sum_sad_h; \
-  __m128i sum_sad128; \
-  for (i = 0 ; i < h ; i++) { \
-    ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
-    ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \
-    ref1_reg = _mm256_avg_epu8(ref1_reg, \
-               _mm256_loadu_si256((__m256i const *)second_pred)); \
-    ref2_reg = _mm256_avg_epu8(ref2_reg, \
-               _mm256_loadu_si256((__m256i const *)(second_pred +32))); \
-    sad1_reg = _mm256_sad_epu8(ref1_reg, \
-               _mm256_loadu_si256((__m256i const *)src_ptr)); \
-    sad2_reg = _mm256_sad_epu8(ref2_reg, \
-               _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \
-    sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
-    ref_ptr+= ref_stride; \
-    src_ptr+= src_stride; \
-    second_pred+= 64; \
-  } \
-  sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
-  sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
-  sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
-  sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
-  res = _mm_cvtsi128_si32(sum_sad128); \
-  return res; \
-}
+#define FSADAVG64_H(h)                                                        \
+  unsigned int vpx_sad64x##h##_avg_avx2(                                      \
+      const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,         \
+      int ref_stride, const uint8_t *second_pred) {                           \
+    int i, res;                                                               \
+    __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg;                           \
+    __m256i sum_sad = _mm256_setzero_si256();                                 \
+    __m256i sum_sad_h;                                                        \
+    __m128i sum_sad128;                                                       \
+    for (i = 0; i < h; i++) {                                                 \
+      ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr);                \
+      ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32));         \
+      ref1_reg = _mm256_avg_epu8(                                             \
+          ref1_reg, _mm256_loadu_si256((__m256i const *)second_pred));        \
+      ref2_reg = _mm256_avg_epu8(                                             \
+          ref2_reg, _mm256_loadu_si256((__m256i const *)(second_pred + 32))); \
+      sad1_reg = _mm256_sad_epu8(                                             \
+          ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr));            \
+      sad2_reg = _mm256_sad_epu8(                                             \
+          ref2_reg, _mm256_loadu_si256((__m256i const *)(src_ptr + 32)));     \
+      sum_sad =                                                               \
+          _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg));    \
+      ref_ptr += ref_stride;                                                  \
+      src_ptr += src_stride;                                                  \
+      second_pred += 64;                                                      \
+    }                                                                         \
+    sum_sad_h = _mm256_srli_si256(sum_sad, 8);                                \
+    sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h);                           \
+    sum_sad128 = _mm256_extracti128_si256(sum_sad, 1);                        \
+    sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128);  \
+    res = _mm_cvtsi128_si32(sum_sad128);                                      \
+    return res;                                                               \
+  }
 
-#define FSADAVG32_H(h) \
-unsigned int vpx_sad32x##h##_avg_avx2(const uint8_t *src_ptr, \
-                                      int src_stride, \
-                                      const uint8_t *ref_ptr, \
-                                      int  ref_stride, \
-                                      const uint8_t *second_pred) { \
-  int i, res; \
-  __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
-  __m256i sum_sad = _mm256_setzero_si256(); \
-  __m256i sum_sad_h; \
-  __m128i sum_sad128; \
-  int ref2_stride = ref_stride << 1; \
-  int src2_stride = src_stride << 1; \
-  int max = h >> 1; \
-  for (i = 0 ; i < max ; i++) { \
-    ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
-    ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
-    ref1_reg = _mm256_avg_epu8(ref1_reg, \
-               _mm256_loadu_si256((__m256i const *)second_pred)); \
-    ref2_reg = _mm256_avg_epu8(ref2_reg, \
-               _mm256_loadu_si256((__m256i const *)(second_pred +32))); \
-    sad1_reg = _mm256_sad_epu8(ref1_reg, \
-               _mm256_loadu_si256((__m256i const *)src_ptr)); \
-    sad2_reg = _mm256_sad_epu8(ref2_reg, \
-               _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \
-    sum_sad = _mm256_add_epi32(sum_sad, \
-              _mm256_add_epi32(sad1_reg, sad2_reg)); \
-    ref_ptr+= ref2_stride; \
-    src_ptr+= src2_stride; \
-    second_pred+= 64; \
-  } \
-  sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
-  sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
-  sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
-  sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
-  res = _mm_cvtsi128_si32(sum_sad128); \
-  return res; \
-}
+#define FSADAVG32_H(h)                                                        \
+  unsigned int vpx_sad32x##h##_avg_avx2(                                      \
+      const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,         \
+      int ref_stride, const uint8_t *second_pred) {                           \
+    int i, res;                                                               \
+    __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg;                           \
+    __m256i sum_sad = _mm256_setzero_si256();                                 \
+    __m256i sum_sad_h;                                                        \
+    __m128i sum_sad128;                                                       \
+    int ref2_stride = ref_stride << 1;                                        \
+    int src2_stride = src_stride << 1;                                        \
+    int max = h >> 1;                                                         \
+    for (i = 0; i < max; i++) {                                               \
+      ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr);                \
+      ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \
+      ref1_reg = _mm256_avg_epu8(                                             \
+          ref1_reg, _mm256_loadu_si256((__m256i const *)second_pred));        \
+      ref2_reg = _mm256_avg_epu8(                                             \
+          ref2_reg, _mm256_loadu_si256((__m256i const *)(second_pred + 32))); \
+      sad1_reg = _mm256_sad_epu8(                                             \
+          ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr));            \
+      sad2_reg = _mm256_sad_epu8(                                             \
+          ref2_reg,                                                           \
+          _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride)));       \
+      sum_sad =                                                               \
+          _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg));    \
+      ref_ptr += ref2_stride;                                                 \
+      src_ptr += src2_stride;                                                 \
+      second_pred += 64;                                                      \
+    }                                                                         \
+    sum_sad_h = _mm256_srli_si256(sum_sad, 8);                                \
+    sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h);                           \
+    sum_sad128 = _mm256_extracti128_si256(sum_sad, 1);                        \
+    sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128);  \
+    res = _mm_cvtsi128_si32(sum_sad128);                                      \
+    return res;                                                               \
+  }
 
-#define FSADAVG64 \
-FSADAVG64_H(64); \
-FSADAVG64_H(32);
+#define FSADAVG64  \
+  FSADAVG64_H(64); \
+  FSADAVG64_H(32);
 
-#define FSADAVG32 \
-FSADAVG32_H(64); \
-FSADAVG32_H(32); \
-FSADAVG32_H(16);
+#define FSADAVG32  \
+  FSADAVG32_H(64); \
+  FSADAVG32_H(32); \
+  FSADAVG32_H(16);
 
-FSADAVG64;
-FSADAVG32;
+/* clang-format off */
+FSADAVG64
+FSADAVG32
+/* clang-format on */
 
 #undef FSADAVG64
 #undef FSADAVG32
diff --git a/vpx_dsp/x86/txfm_common_sse2.h b/vpx_dsp/x86/txfm_common_sse2.h
index 536b206876c28ee1b658a1942ceb0157e8208eb9..f8edb1b7874c61bf44f54a01a479617a8a8e5152 100644
--- a/vpx_dsp/x86/txfm_common_sse2.h
+++ b/vpx_dsp/x86/txfm_common_sse2.h
@@ -14,15 +14,15 @@
 #include <emmintrin.h>
 #include "vpx/vpx_integer.h"
 
-#define pair_set_epi16(a, b) \
+#define pair_set_epi16(a, b)                                            \
   _mm_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
                 (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a))
 
-#define dual_set_epi16(a, b) \
+#define dual_set_epi16(a, b)                                            \
   _mm_set_epi16((int16_t)(b), (int16_t)(b), (int16_t)(b), (int16_t)(b), \
                 (int16_t)(a), (int16_t)(a), (int16_t)(a), (int16_t)(a))
 
-#define octa_set_epi16(a, b, c, d, e, f, g, h) \
+#define octa_set_epi16(a, b, c, d, e, f, g, h)                           \
   _mm_setr_epi16((int16_t)(a), (int16_t)(b), (int16_t)(c), (int16_t)(d), \
                  (int16_t)(e), (int16_t)(f), (int16_t)(g), (int16_t)(h))
 
diff --git a/vpx_dsp/x86/variance_avx2.c b/vpx_dsp/x86/variance_avx2.c
index 7851a98b140949ee33c1495400c2ff625792e0a0..c67fe0b31d8611703d27025f1709710000080d89 100644
--- a/vpx_dsp/x86/variance_avx2.c
+++ b/vpx_dsp/x86/variance_avx2.c
@@ -14,13 +14,13 @@ typedef void (*get_var_avx2)(const uint8_t *src, int src_stride,
                              unsigned int *sse, int *sum);
 
 void vpx_get32x32var_avx2(const uint8_t *src, int src_stride,
-                          const uint8_t *ref, int ref_stride,
-                          unsigned int *sse, int *sum);
+                          const uint8_t *ref, int ref_stride, unsigned int *sse,
+                          int *sum);
 
 static void variance_avx2(const uint8_t *src, int src_stride,
-                          const uint8_t *ref, int  ref_stride,
-                          int w, int h, unsigned int *sse, int *sum,
-                          get_var_avx2 var_fn, int block_size) {
+                          const uint8_t *ref, int ref_stride, int w, int h,
+                          unsigned int *sse, int *sum, get_var_avx2 var_fn,
+                          int block_size) {
   int i, j;
 
   *sse = 0;
@@ -30,21 +30,20 @@ static void variance_avx2(const uint8_t *src, int src_stride,
     for (j = 0; j < w; j += block_size) {
       unsigned int sse0;
       int sum0;
-      var_fn(&src[src_stride * i + j], src_stride,
-             &ref[ref_stride * i + j], ref_stride, &sse0, &sum0);
+      var_fn(&src[src_stride * i + j], src_stride, &ref[ref_stride * i + j],
+             ref_stride, &sse0, &sum0);
       *sse += sse0;
       *sum += sum0;
     }
   }
 }
 
-
 unsigned int vpx_variance16x16_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_avx2(src, src_stride, ref, ref_stride, 16, 16,
-                sse, &sum, vpx_get16x16var_avx2, 16);
+  variance_avx2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
+                vpx_get16x16var_avx2, 16);
   return *sse - (((unsigned int)sum * sum) >> 8);
 }
 
@@ -60,8 +59,8 @@ unsigned int vpx_variance32x16_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_avx2(src, src_stride, ref, ref_stride, 32, 16,
-                sse, &sum, vpx_get32x32var_avx2, 32);
+  variance_avx2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum,
+                vpx_get32x32var_avx2, 32);
   return *sse - (((int64_t)sum * sum) >> 9);
 }
 
@@ -69,8 +68,8 @@ unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_avx2(src, src_stride, ref, ref_stride, 32, 32,
-                sse, &sum, vpx_get32x32var_avx2, 32);
+  variance_avx2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum,
+                vpx_get32x32var_avx2, 32);
   return *sse - (((int64_t)sum * sum) >> 10);
 }
 
@@ -78,8 +77,8 @@ unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_avx2(src, src_stride, ref, ref_stride, 64, 64,
-                sse, &sum, vpx_get32x32var_avx2, 32);
+  variance_avx2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum,
+                vpx_get32x32var_avx2, 32);
   return *sse - (((int64_t)sum * sum) >> 12);
 }
 
@@ -87,79 +86,58 @@ unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_avx2(src, src_stride, ref, ref_stride, 64, 32,
-                sse, &sum, vpx_get32x32var_avx2, 32);
+  variance_avx2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum,
+                vpx_get32x32var_avx2, 32);
   return *sse - (((int64_t)sum * sum) >> 11);
 }
 
 unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
                                              int x_offset, int y_offset,
                                              const uint8_t *dst, int dst_stride,
-                                             int height,
-                                             unsigned int *sse);
-
-unsigned int vpx_sub_pixel_avg_variance32xh_avx2(const uint8_t *src,
-                                                 int src_stride,
-                                                 int x_offset,
-                                                 int y_offset,
-                                                 const uint8_t *dst,
-                                                 int dst_stride,
-                                                 const uint8_t *sec,
-                                                 int sec_stride,
-                                                 int height,
-                                                 unsigned int *sseptr);
+                                             int height, unsigned int *sse);
+
+unsigned int vpx_sub_pixel_avg_variance32xh_avx2(
+    const uint8_t *src, int src_stride, int x_offset, int y_offset,
+    const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride,
+    int height, unsigned int *sseptr);
 
 unsigned int vpx_sub_pixel_variance64x64_avx2(const uint8_t *src,
-                                              int src_stride,
-                                              int x_offset,
-                                              int y_offset,
-                                              const uint8_t *dst,
+                                              int src_stride, int x_offset,
+                                              int y_offset, const uint8_t *dst,
                                               int dst_stride,
                                               unsigned int *sse) {
   unsigned int sse1;
-  const int se1 = vpx_sub_pixel_variance32xh_avx2(src, src_stride, x_offset,
-                                                  y_offset, dst, dst_stride,
-                                                  64, &sse1);
+  const int se1 = vpx_sub_pixel_variance32xh_avx2(
+      src, src_stride, x_offset, y_offset, dst, dst_stride, 64, &sse1);
   unsigned int sse2;
-  const int se2 = vpx_sub_pixel_variance32xh_avx2(src + 32, src_stride,
-                                                  x_offset, y_offset,
-                                                  dst + 32, dst_stride,
-                                                  64, &sse2);
+  const int se2 =
+      vpx_sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset,
+                                      dst + 32, dst_stride, 64, &sse2);
   const int se = se1 + se2;
   *sse = sse1 + sse2;
   return *sse - (((int64_t)se * se) >> 12);
 }
 
 unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src,
-                                              int src_stride,
-                                              int x_offset,
-                                              int y_offset,
-                                              const uint8_t *dst,
+                                              int src_stride, int x_offset,
+                                              int y_offset, const uint8_t *dst,
                                               int dst_stride,
                                               unsigned int *sse) {
-  const int se = vpx_sub_pixel_variance32xh_avx2(src, src_stride, x_offset,
-                                                 y_offset, dst, dst_stride,
-                                                 32, sse);
+  const int se = vpx_sub_pixel_variance32xh_avx2(
+      src, src_stride, x_offset, y_offset, dst, dst_stride, 32, sse);
   return *sse - (((int64_t)se * se) >> 10);
 }
 
-unsigned int vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t *src,
-                                                  int src_stride,
-                                                  int x_offset,
-                                                  int y_offset,
-                                                  const uint8_t *dst,
-                                                  int dst_stride,
-                                                  unsigned int *sse,
-                                                  const uint8_t *sec) {
+unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
+    const uint8_t *src, int src_stride, int x_offset, int y_offset,
+    const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
   unsigned int sse1;
-  const int se1 = vpx_sub_pixel_avg_variance32xh_avx2(src, src_stride, x_offset,
-                                                      y_offset, dst, dst_stride,
-                                                      sec, 64, 64, &sse1);
+  const int se1 = vpx_sub_pixel_avg_variance32xh_avx2(
+      src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 64, 64, &sse1);
   unsigned int sse2;
-  const int se2 =
-    vpx_sub_pixel_avg_variance32xh_avx2(src + 32, src_stride, x_offset,
-                                        y_offset, dst + 32, dst_stride,
-                                        sec + 32, 64, 64, &sse2);
+  const int se2 = vpx_sub_pixel_avg_variance32xh_avx2(
+      src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, sec + 32,
+      64, 64, &sse2);
   const int se = se1 + se2;
 
   *sse = sse1 + sse2;
@@ -167,17 +145,11 @@ unsigned int vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t *src,
   return *sse - (((int64_t)se * se) >> 12);
 }
 
-unsigned int vpx_sub_pixel_avg_variance32x32_avx2(const uint8_t *src,
-                                                  int src_stride,
-                                                  int x_offset,
-                                                  int y_offset,
-                                                  const uint8_t *dst,
-                                                  int dst_stride,
-                                                  unsigned int *sse,
-                                                  const uint8_t *sec) {
+unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
+    const uint8_t *src, int src_stride, int x_offset, int y_offset,
+    const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
   // Process 32 elements in parallel.
-  const int se = vpx_sub_pixel_avg_variance32xh_avx2(src, src_stride, x_offset,
-                                                     y_offset, dst, dst_stride,
-                                                     sec, 32, 32, sse);
+  const int se = vpx_sub_pixel_avg_variance32xh_avx2(
+      src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 32, 32, sse);
   return *sse - (((int64_t)se * se) >> 10);
 }
diff --git a/vpx_dsp/x86/variance_impl_avx2.c b/vpx_dsp/x86/variance_impl_avx2.c
index b289e9a0c74840d652aa74c38cf0ca0c0d2cf05a..5a1f125e5db32bb73e95d229d470164f5aee4181 100644
--- a/vpx_dsp/x86/variance_impl_avx2.c
+++ b/vpx_dsp/x86/variance_impl_avx2.c
@@ -14,306 +14,289 @@
 #include "vpx_ports/mem.h"
 
 DECLARE_ALIGNED(32, static const uint8_t, bilinear_filters_avx2[512]) = {
-  16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0,
-  16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0,
-  14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2,
-  14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2,
-  12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4,
-  12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4,
-  10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6,
-  10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-  6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10,
-  6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10,
-  4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12,
-  4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12,
-  2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14,
-  2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14,
+  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16,
+  0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  14, 2,  14, 2,  14, 2,
+  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14,
+  2,  14, 2,  14, 2,  14, 2,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,
+  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12,
+  4,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,
+  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  8,  8,  8,  8,  8,
+  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,
+  8,  8,  8,  8,  8,  8,  8,  8,  6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,
+  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10,
+  6,  10, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,
+  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 2,  14, 2,  14,
+  2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,
+  14, 2,  14, 2,  14, 2,  14, 2,  14,
 };
 
+void vpx_get16x16var_avx2(const unsigned char *src_ptr, int source_stride,
+                          const unsigned char *ref_ptr, int recon_stride,
+                          unsigned int *SSE, int *Sum) {
+  __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low;
+  __m256i ref_expand_high, madd_low, madd_high;
+  unsigned int i, src_2strides, ref_2strides;
+  __m256i zero_reg = _mm256_set1_epi16(0);
+  __m256i sum_ref_src = _mm256_set1_epi16(0);
+  __m256i madd_ref_src = _mm256_set1_epi16(0);
+
+  // processing two strides in a 256 bit register reducing the number
+  // of loop stride by half (comparing to the sse2 code)
+  src_2strides = source_stride << 1;
+  ref_2strides = recon_stride << 1;
+  for (i = 0; i < 8; i++) {
+    src = _mm256_castsi128_si256(_mm_loadu_si128((__m128i const *)(src_ptr)));
+    src = _mm256_inserti128_si256(
+        src, _mm_loadu_si128((__m128i const *)(src_ptr + source_stride)), 1);
+
+    ref = _mm256_castsi128_si256(_mm_loadu_si128((__m128i const *)(ref_ptr)));
+    ref = _mm256_inserti128_si256(
+        ref, _mm_loadu_si128((__m128i const *)(ref_ptr + recon_stride)), 1);
+
+    // expanding to 16 bit each lane
+    src_expand_low = _mm256_unpacklo_epi8(src, zero_reg);
+    src_expand_high = _mm256_unpackhi_epi8(src, zero_reg);
+
+    ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg);
+    ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg);
+
+    // src-ref
+    src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low);
+    src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high);
+
+    // madd low (src - ref)
+    madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low);
+
+    // add high to low
+    src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high);
+
+    // madd high (src - ref)
+    madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high);
+
+    sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low);
+
+    // add high to low
+    madd_ref_src =
+        _mm256_add_epi32(madd_ref_src, _mm256_add_epi32(madd_low, madd_high));
+
+    src_ptr += src_2strides;
+    ref_ptr += ref_2strides;
+  }
 
-void vpx_get16x16var_avx2(const unsigned char *src_ptr,
-                          int source_stride,
-                          const unsigned char *ref_ptr,
-                          int recon_stride,
-                          unsigned int *SSE,
-                          int *Sum) {
-    __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low;
-    __m256i ref_expand_high, madd_low, madd_high;
-    unsigned int i, src_2strides, ref_2strides;
-    __m256i zero_reg = _mm256_set1_epi16(0);
-    __m256i sum_ref_src = _mm256_set1_epi16(0);
-    __m256i madd_ref_src = _mm256_set1_epi16(0);
-
-    // processing two strides in a 256 bit register reducing the number
-    // of loop stride by half (comparing to the sse2 code)
-    src_2strides = source_stride << 1;
-    ref_2strides = recon_stride << 1;
-    for (i = 0; i < 8; i++) {
-        src = _mm256_castsi128_si256(
-              _mm_loadu_si128((__m128i const *) (src_ptr)));
-        src = _mm256_inserti128_si256(src,
-              _mm_loadu_si128((__m128i const *)(src_ptr+source_stride)), 1);
-
-        ref =_mm256_castsi128_si256(
-             _mm_loadu_si128((__m128i const *) (ref_ptr)));
-        ref = _mm256_inserti128_si256(ref,
-              _mm_loadu_si128((__m128i const *)(ref_ptr+recon_stride)), 1);
-
-        // expanding to 16 bit each lane
-        src_expand_low = _mm256_unpacklo_epi8(src, zero_reg);
-        src_expand_high = _mm256_unpackhi_epi8(src, zero_reg);
-
-        ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg);
-        ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg);
-
-        // src-ref
-        src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low);
-        src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high);
-
-        // madd low (src - ref)
-        madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low);
-
-        // add high to low
-        src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high);
-
-        // madd high (src - ref)
-        madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high);
-
-        sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low);
-
-        // add high to low
-        madd_ref_src = _mm256_add_epi32(madd_ref_src,
-                       _mm256_add_epi32(madd_low, madd_high));
-
-        src_ptr+= src_2strides;
-        ref_ptr+= ref_2strides;
-    }
-
-    {
-        __m128i sum_res, madd_res;
-        __m128i expand_sum_low, expand_sum_high, expand_sum;
-        __m128i expand_madd_low, expand_madd_high, expand_madd;
-        __m128i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum;
+  {
+    __m128i sum_res, madd_res;
+    __m128i expand_sum_low, expand_sum_high, expand_sum;
+    __m128i expand_madd_low, expand_madd_high, expand_madd;
+    __m128i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum;
 
-        // extract the low lane and add it to the high lane
-        sum_res = _mm_add_epi16(_mm256_castsi256_si128(sum_ref_src),
-                                _mm256_extractf128_si256(sum_ref_src, 1));
+    // extract the low lane and add it to the high lane
+    sum_res = _mm_add_epi16(_mm256_castsi256_si128(sum_ref_src),
+                            _mm256_extractf128_si256(sum_ref_src, 1));
 
-        madd_res = _mm_add_epi32(_mm256_castsi256_si128(madd_ref_src),
-                                 _mm256_extractf128_si256(madd_ref_src, 1));
+    madd_res = _mm_add_epi32(_mm256_castsi256_si128(madd_ref_src),
+                             _mm256_extractf128_si256(madd_ref_src, 1));
 
-        // padding each 2 bytes with another 2 zeroed bytes
-        expand_sum_low = _mm_unpacklo_epi16(_mm256_castsi256_si128(zero_reg),
-                                            sum_res);
-        expand_sum_high = _mm_unpackhi_epi16(_mm256_castsi256_si128(zero_reg),
-                                             sum_res);
+    // padding each 2 bytes with another 2 zeroed bytes
+    expand_sum_low =
+        _mm_unpacklo_epi16(_mm256_castsi256_si128(zero_reg), sum_res);
+    expand_sum_high =
+        _mm_unpackhi_epi16(_mm256_castsi256_si128(zero_reg), sum_res);
 
-        // shifting the sign 16 bits right
-        expand_sum_low = _mm_srai_epi32(expand_sum_low, 16);
-        expand_sum_high = _mm_srai_epi32(expand_sum_high, 16);
+    // shifting the sign 16 bits right
+    expand_sum_low = _mm_srai_epi32(expand_sum_low, 16);
+    expand_sum_high = _mm_srai_epi32(expand_sum_high, 16);
 
-        expand_sum = _mm_add_epi32(expand_sum_low, expand_sum_high);
+    expand_sum = _mm_add_epi32(expand_sum_low, expand_sum_high);
 
-        // expand each 32 bits of the madd result to 64 bits
-        expand_madd_low = _mm_unpacklo_epi32(madd_res,
-                          _mm256_castsi256_si128(zero_reg));
-        expand_madd_high = _mm_unpackhi_epi32(madd_res,
-                           _mm256_castsi256_si128(zero_reg));
+    // expand each 32 bits of the madd result to 64 bits
+    expand_madd_low =
+        _mm_unpacklo_epi32(madd_res, _mm256_castsi256_si128(zero_reg));
+    expand_madd_high =
+        _mm_unpackhi_epi32(madd_res, _mm256_castsi256_si128(zero_reg));
 
-        expand_madd = _mm_add_epi32(expand_madd_low, expand_madd_high);
+    expand_madd = _mm_add_epi32(expand_madd_low, expand_madd_high);
 
-        ex_expand_sum_low = _mm_unpacklo_epi32(expand_sum,
-                            _mm256_castsi256_si128(zero_reg));
-        ex_expand_sum_high = _mm_unpackhi_epi32(expand_sum,
-                             _mm256_castsi256_si128(zero_reg));
+    ex_expand_sum_low =
+        _mm_unpacklo_epi32(expand_sum, _mm256_castsi256_si128(zero_reg));
+    ex_expand_sum_high =
+        _mm_unpackhi_epi32(expand_sum, _mm256_castsi256_si128(zero_reg));
 
-        ex_expand_sum = _mm_add_epi32(ex_expand_sum_low, ex_expand_sum_high);
+    ex_expand_sum = _mm_add_epi32(ex_expand_sum_low, ex_expand_sum_high);
 
-        // shift 8 bytes eight
-        madd_res = _mm_srli_si128(expand_madd, 8);
-        sum_res = _mm_srli_si128(ex_expand_sum, 8);
+    // shift 8 bytes eight
+    madd_res = _mm_srli_si128(expand_madd, 8);
+    sum_res = _mm_srli_si128(ex_expand_sum, 8);
 
-        madd_res = _mm_add_epi32(madd_res, expand_madd);
-        sum_res = _mm_add_epi32(sum_res, ex_expand_sum);
+    madd_res = _mm_add_epi32(madd_res, expand_madd);
+    sum_res = _mm_add_epi32(sum_res, ex_expand_sum);
 
-        *((int*)SSE)= _mm_cvtsi128_si32(madd_res);
+    *((int *)SSE) = _mm_cvtsi128_si32(madd_res);
 
-        *((int*)Sum)= _mm_cvtsi128_si32(sum_res);
-    }
+    *((int *)Sum) = _mm_cvtsi128_si32(sum_res);
+  }
 }
 
-void vpx_get32x32var_avx2(const unsigned char *src_ptr,
-                          int source_stride,
-                          const unsigned char *ref_ptr,
-                          int recon_stride,
-                          unsigned int *SSE,
-                          int *Sum) {
-    __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low;
-    __m256i ref_expand_high, madd_low, madd_high;
-    unsigned int i;
-    __m256i zero_reg = _mm256_set1_epi16(0);
-    __m256i sum_ref_src = _mm256_set1_epi16(0);
-    __m256i madd_ref_src = _mm256_set1_epi16(0);
+void vpx_get32x32var_avx2(const unsigned char *src_ptr, int source_stride,
+                          const unsigned char *ref_ptr, int recon_stride,
+                          unsigned int *SSE, int *Sum) {
+  __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low;
+  __m256i ref_expand_high, madd_low, madd_high;
+  unsigned int i;
+  __m256i zero_reg = _mm256_set1_epi16(0);
+  __m256i sum_ref_src = _mm256_set1_epi16(0);
+  __m256i madd_ref_src = _mm256_set1_epi16(0);
 
-    // processing 32 elements in parallel
-    for (i = 0; i < 16; i++) {
-       src = _mm256_loadu_si256((__m256i const *) (src_ptr));
+  // processing 32 elements in parallel
+  for (i = 0; i < 16; i++) {
+    src = _mm256_loadu_si256((__m256i const *)(src_ptr));
 
-       ref = _mm256_loadu_si256((__m256i const *) (ref_ptr));
+    ref = _mm256_loadu_si256((__m256i const *)(ref_ptr));
 
-       // expanding to 16 bit each lane
-       src_expand_low = _mm256_unpacklo_epi8(src, zero_reg);
-       src_expand_high = _mm256_unpackhi_epi8(src, zero_reg);
+    // expanding to 16 bit each lane
+    src_expand_low = _mm256_unpacklo_epi8(src, zero_reg);
+    src_expand_high = _mm256_unpackhi_epi8(src, zero_reg);
 
-       ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg);
-       ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg);
+    ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg);
+    ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg);
 
-       // src-ref
-       src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low);
-       src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high);
+    // src-ref
+    src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low);
+    src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high);
 
-       // madd low (src - ref)
-       madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low);
+    // madd low (src - ref)
+    madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low);
 
-       // add high to low
-       src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high);
+    // add high to low
+    src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high);
 
-       // madd high (src - ref)
-       madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high);
+    // madd high (src - ref)
+    madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high);
 
-       sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low);
+    sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low);
 
-       // add high to low
-       madd_ref_src = _mm256_add_epi32(madd_ref_src,
-                      _mm256_add_epi32(madd_low, madd_high));
+    // add high to low
+    madd_ref_src =
+        _mm256_add_epi32(madd_ref_src, _mm256_add_epi32(madd_low, madd_high));
 
-       src_ptr+= source_stride;
-       ref_ptr+= recon_stride;
-    }
+    src_ptr += source_stride;
+    ref_ptr += recon_stride;
+  }
 
-    {
-      __m256i expand_sum_low, expand_sum_high, expand_sum;
-      __m256i expand_madd_low, expand_madd_high, expand_madd;
-      __m256i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum;
+  {
+    __m256i expand_sum_low, expand_sum_high, expand_sum;
+    __m256i expand_madd_low, expand_madd_high, expand_madd;
+    __m256i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum;
 
-      // padding each 2 bytes with another 2 zeroed bytes
-      expand_sum_low = _mm256_unpacklo_epi16(zero_reg, sum_ref_src);
-      expand_sum_high = _mm256_unpackhi_epi16(zero_reg, sum_ref_src);
+    // padding each 2 bytes with another 2 zeroed bytes
+    expand_sum_low = _mm256_unpacklo_epi16(zero_reg, sum_ref_src);
+    expand_sum_high = _mm256_unpackhi_epi16(zero_reg, sum_ref_src);
 
-      // shifting the sign 16 bits right
-      expand_sum_low = _mm256_srai_epi32(expand_sum_low, 16);
-      expand_sum_high = _mm256_srai_epi32(expand_sum_high, 16);
+    // shifting the sign 16 bits right
+    expand_sum_low = _mm256_srai_epi32(expand_sum_low, 16);
+    expand_sum_high = _mm256_srai_epi32(expand_sum_high, 16);
 
-      expand_sum = _mm256_add_epi32(expand_sum_low, expand_sum_high);
+    expand_sum = _mm256_add_epi32(expand_sum_low, expand_sum_high);
 
-      // expand each 32 bits of the madd result to 64 bits
-      expand_madd_low = _mm256_unpacklo_epi32(madd_ref_src, zero_reg);
-      expand_madd_high = _mm256_unpackhi_epi32(madd_ref_src, zero_reg);
+    // expand each 32 bits of the madd result to 64 bits
+    expand_madd_low = _mm256_unpacklo_epi32(madd_ref_src, zero_reg);
+    expand_madd_high = _mm256_unpackhi_epi32(madd_ref_src, zero_reg);
 
-      expand_madd = _mm256_add_epi32(expand_madd_low, expand_madd_high);
+    expand_madd = _mm256_add_epi32(expand_madd_low, expand_madd_high);
 
-      ex_expand_sum_low = _mm256_unpacklo_epi32(expand_sum, zero_reg);
-      ex_expand_sum_high = _mm256_unpackhi_epi32(expand_sum, zero_reg);
+    ex_expand_sum_low = _mm256_unpacklo_epi32(expand_sum, zero_reg);
+    ex_expand_sum_high = _mm256_unpackhi_epi32(expand_sum, zero_reg);
 
-      ex_expand_sum = _mm256_add_epi32(ex_expand_sum_low, ex_expand_sum_high);
+    ex_expand_sum = _mm256_add_epi32(ex_expand_sum_low, ex_expand_sum_high);
 
-      // shift 8 bytes eight
-      madd_ref_src = _mm256_srli_si256(expand_madd, 8);
-      sum_ref_src = _mm256_srli_si256(ex_expand_sum, 8);
+    // shift 8 bytes eight
+    madd_ref_src = _mm256_srli_si256(expand_madd, 8);
+    sum_ref_src = _mm256_srli_si256(ex_expand_sum, 8);
 
-      madd_ref_src = _mm256_add_epi32(madd_ref_src, expand_madd);
-      sum_ref_src = _mm256_add_epi32(sum_ref_src, ex_expand_sum);
+    madd_ref_src = _mm256_add_epi32(madd_ref_src, expand_madd);
+    sum_ref_src = _mm256_add_epi32(sum_ref_src, ex_expand_sum);
 
-      // extract the low lane and the high lane and add the results
-      *((int*)SSE)= _mm_cvtsi128_si32(_mm256_castsi256_si128(madd_ref_src)) +
-      _mm_cvtsi128_si32(_mm256_extractf128_si256(madd_ref_src, 1));
+    // extract the low lane and the high lane and add the results
+    *((int *)SSE) =
+        _mm_cvtsi128_si32(_mm256_castsi256_si128(madd_ref_src)) +
+        _mm_cvtsi128_si32(_mm256_extractf128_si256(madd_ref_src, 1));
 
-      *((int*)Sum)= _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_ref_src)) +
-      _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_ref_src, 1));
-    }
+    *((int *)Sum) = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_ref_src)) +
+                    _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_ref_src, 1));
+  }
 }
 
-#define FILTER_SRC(filter) \
-  /* filter the source */ \
+#define FILTER_SRC(filter)                               \
+  /* filter the source */                                \
   exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter); \
   exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter); \
-  \
-  /* add 8 to source */ \
-  exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8); \
-  exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8); \
-  \
-  /* divide source by 16 */ \
-  exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \
+                                                         \
+  /* add 8 to source */                                  \
+  exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);        \
+  exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);        \
+                                                         \
+  /* divide source by 16 */                              \
+  exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);         \
   exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
 
-#define MERGE_WITH_SRC(src_reg, reg) \
+#define MERGE_WITH_SRC(src_reg, reg)               \
   exp_src_lo = _mm256_unpacklo_epi8(src_reg, reg); \
   exp_src_hi = _mm256_unpackhi_epi8(src_reg, reg);
 
-#define LOAD_SRC_DST \
-  /* load source and destination */ \
-  src_reg = _mm256_loadu_si256((__m256i const *) (src)); \
-  dst_reg = _mm256_loadu_si256((__m256i const *) (dst));
+#define LOAD_SRC_DST                                    \
+  /* load source and destination */                     \
+  src_reg = _mm256_loadu_si256((__m256i const *)(src)); \
+  dst_reg = _mm256_loadu_si256((__m256i const *)(dst));
 
-#define AVG_NEXT_SRC(src_reg, size_stride) \
-  src_next_reg = _mm256_loadu_si256((__m256i const *) \
-                                   (src + size_stride)); \
-  /* average between current and next stride source */ \
+#define AVG_NEXT_SRC(src_reg, size_stride)                                 \
+  src_next_reg = _mm256_loadu_si256((__m256i const *)(src + size_stride)); \
+  /* average between current and next stride source */                     \
   src_reg = _mm256_avg_epu8(src_reg, src_next_reg);
 
-#define MERGE_NEXT_SRC(src_reg, size_stride) \
-  src_next_reg = _mm256_loadu_si256((__m256i const *) \
-                                   (src + size_stride)); \
+#define MERGE_NEXT_SRC(src_reg, size_stride)                               \
+  src_next_reg = _mm256_loadu_si256((__m256i const *)(src + size_stride)); \
   MERGE_WITH_SRC(src_reg, src_next_reg)
 
-#define CALC_SUM_SSE_INSIDE_LOOP \
-  /* expand each byte to 2 bytes */ \
-  exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg); \
-  exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg); \
-  /* source - dest */ \
-  exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo); \
-  exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi); \
-  /* caculate sum */ \
-  sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo); \
+#define CALC_SUM_SSE_INSIDE_LOOP                          \
+  /* expand each byte to 2 bytes */                       \
+  exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);   \
+  exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);   \
+  /* source - dest */                                     \
+  exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);  \
+  exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);  \
+  /* caculate sum */                                      \
+  sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo);        \
   exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo); \
-  sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi); \
+  sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi);        \
   exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi); \
-  /* calculate sse */ \
-  sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo); \
+  /* calculate sse */                                     \
+  sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo);        \
   sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
 
 // final calculation to sum and sse
-#define CALC_SUM_AND_SSE \
-  res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg); \
-  sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \
-  sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp); \
-  sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp); \
-  sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
-  sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi); \
-  \
-  sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \
-  sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \
-  \
-  sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
-  sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
-  *((int*)sse)= _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) + \
-                _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1)); \
-  sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \
-  sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
-  sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) + \
+#define CALC_SUM_AND_SSE                                                   \
+  res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg);                         \
+  sse_reg_hi = _mm256_srli_si256(sse_reg, 8);                              \
+  sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp);                    \
+  sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp);                    \
+  sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);                         \
+  sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi);                      \
+                                                                           \
+  sse_reg_hi = _mm256_srli_si256(sse_reg, 4);                              \
+  sum_reg_hi = _mm256_srli_si256(sum_reg, 8);                              \
+                                                                           \
+  sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);                         \
+  sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);                         \
+  *((int *)sse) = _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) +     \
+                  _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1)); \
+  sum_reg_hi = _mm256_srli_si256(sum_reg, 4);                              \
+  sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);                         \
+  sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) +               \
         _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1));
 
-
-unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src,
-                                             int src_stride,
-                                             int x_offset,
-                                             int y_offset,
-                                             const uint8_t *dst,
-                                             int dst_stride,
-                                             int height,
-                                             unsigned int *sse) {
+unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
+                                             int x_offset, int y_offset,
+                                             const uint8_t *dst, int dst_stride,
+                                             int height, unsigned int *sse) {
   __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
   __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi;
   __m256i zero_reg;
@@ -325,66 +308,66 @@ unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src,
   // x_offset = 0 and y_offset = 0
   if (x_offset == 0) {
     if (y_offset == 0) {
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
         // expend each byte to 2 bytes
         MERGE_WITH_SRC(src_reg, zero_reg)
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
-    // x_offset = 0 and y_offset = 8
+      // x_offset = 0 and y_offset = 8
     } else if (y_offset == 8) {
       __m256i src_next_reg;
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
         AVG_NEXT_SRC(src_reg, src_stride)
         // expend each byte to 2 bytes
         MERGE_WITH_SRC(src_reg, zero_reg)
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
-    // x_offset = 0 and y_offset = bilin interpolation
+      // x_offset = 0 and y_offset = bilin interpolation
     } else {
       __m256i filter, pw8, src_next_reg;
 
       y_offset <<= 5;
-      filter = _mm256_load_si256((__m256i const *)
-               (bilinear_filters_avx2 + y_offset));
+      filter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + y_offset));
       pw8 = _mm256_set1_epi16(8);
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
         MERGE_NEXT_SRC(src_reg, src_stride)
         FILTER_SRC(filter)
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
     }
-  // x_offset = 8  and y_offset = 0
+    // x_offset = 8  and y_offset = 0
   } else if (x_offset == 8) {
     if (y_offset == 0) {
       __m256i src_next_reg;
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
         AVG_NEXT_SRC(src_reg, 1)
         // expand each byte to 2 bytes
         MERGE_WITH_SRC(src_reg, zero_reg)
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
-    // x_offset = 8  and y_offset = 8
+      // x_offset = 8  and y_offset = 8
     } else if (y_offset == 8) {
       __m256i src_next_reg, src_avg;
       // load source and another source starting from the next
       // following byte
-      src_reg = _mm256_loadu_si256((__m256i const *) (src));
+      src_reg = _mm256_loadu_si256((__m256i const *)(src));
       AVG_NEXT_SRC(src_reg, 1)
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         src_avg = src_reg;
-        src+= src_stride;
+        src += src_stride;
         LOAD_SRC_DST
         AVG_NEXT_SRC(src_reg, 1)
         // average between previous average to current average
@@ -393,92 +376,92 @@ unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src,
         MERGE_WITH_SRC(src_avg, zero_reg)
         // save current source average
         CALC_SUM_SSE_INSIDE_LOOP
-        dst+= dst_stride;
+        dst += dst_stride;
       }
-    // x_offset = 8  and y_offset = bilin interpolation
+      // x_offset = 8  and y_offset = bilin interpolation
     } else {
       __m256i filter, pw8, src_next_reg, src_avg;
       y_offset <<= 5;
-      filter = _mm256_load_si256((__m256i const *)
-               (bilinear_filters_avx2 + y_offset));
+      filter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + y_offset));
       pw8 = _mm256_set1_epi16(8);
       // load source and another source starting from the next
       // following byte
-      src_reg = _mm256_loadu_si256((__m256i const *) (src));
+      src_reg = _mm256_loadu_si256((__m256i const *)(src));
       AVG_NEXT_SRC(src_reg, 1)
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         // save current source average
         src_avg = src_reg;
-        src+= src_stride;
+        src += src_stride;
         LOAD_SRC_DST
         AVG_NEXT_SRC(src_reg, 1)
         MERGE_WITH_SRC(src_avg, src_reg)
         FILTER_SRC(filter)
         CALC_SUM_SSE_INSIDE_LOOP
-        dst+= dst_stride;
+        dst += dst_stride;
       }
     }
-  // x_offset = bilin interpolation and y_offset = 0
+    // x_offset = bilin interpolation and y_offset = 0
   } else {
     if (y_offset == 0) {
       __m256i filter, pw8, src_next_reg;
       x_offset <<= 5;
-      filter = _mm256_load_si256((__m256i const *)
-               (bilinear_filters_avx2 + x_offset));
+      filter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + x_offset));
       pw8 = _mm256_set1_epi16(8);
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
         MERGE_NEXT_SRC(src_reg, 1)
         FILTER_SRC(filter)
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
-    // x_offset = bilin interpolation and y_offset = 8
+      // x_offset = bilin interpolation and y_offset = 8
     } else if (y_offset == 8) {
       __m256i filter, pw8, src_next_reg, src_pack;
       x_offset <<= 5;
-      filter = _mm256_load_si256((__m256i const *)
-               (bilinear_filters_avx2 + x_offset));
+      filter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + x_offset));
       pw8 = _mm256_set1_epi16(8);
-      src_reg = _mm256_loadu_si256((__m256i const *) (src));
+      src_reg = _mm256_loadu_si256((__m256i const *)(src));
       MERGE_NEXT_SRC(src_reg, 1)
       FILTER_SRC(filter)
       // convert each 16 bit to 8 bit to each low and high lane source
-      src_pack =  _mm256_packus_epi16(exp_src_lo, exp_src_hi);
-      for (i = 0; i < height ; i++) {
-        src+= src_stride;
+      src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+      for (i = 0; i < height; i++) {
+        src += src_stride;
         LOAD_SRC_DST
         MERGE_NEXT_SRC(src_reg, 1)
         FILTER_SRC(filter)
-        src_reg =  _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+        src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
         // average between previous pack to the current
         src_pack = _mm256_avg_epu8(src_pack, src_reg);
         MERGE_WITH_SRC(src_pack, zero_reg)
         CALC_SUM_SSE_INSIDE_LOOP
         src_pack = src_reg;
-        dst+= dst_stride;
+        dst += dst_stride;
       }
-    // x_offset = bilin interpolation and y_offset = bilin interpolation
+      // x_offset = bilin interpolation and y_offset = bilin interpolation
     } else {
       __m256i xfilter, yfilter, pw8, src_next_reg, src_pack;
       x_offset <<= 5;
-      xfilter = _mm256_load_si256((__m256i const *)
-                (bilinear_filters_avx2 + x_offset));
+      xfilter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + x_offset));
       y_offset <<= 5;
-      yfilter = _mm256_load_si256((__m256i const *)
-                (bilinear_filters_avx2 + y_offset));
+      yfilter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + y_offset));
       pw8 = _mm256_set1_epi16(8);
       // load source and another source starting from the next
       // following byte
-      src_reg = _mm256_loadu_si256((__m256i const *) (src));
+      src_reg = _mm256_loadu_si256((__m256i const *)(src));
       MERGE_NEXT_SRC(src_reg, 1)
 
       FILTER_SRC(xfilter)
       // convert each 16 bit to 8 bit to each low and high lane source
       src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
-      for (i = 0; i < height ; i++) {
-        src+= src_stride;
+      for (i = 0; i < height; i++) {
+        src += src_stride;
         LOAD_SRC_DST
         MERGE_NEXT_SRC(src_reg, 1)
         FILTER_SRC(xfilter)
@@ -489,7 +472,7 @@ unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src,
         FILTER_SRC(yfilter)
         src_pack = src_reg;
         CALC_SUM_SSE_INSIDE_LOOP
-        dst+= dst_stride;
+        dst += dst_stride;
       }
     }
   }
@@ -497,16 +480,10 @@ unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src,
   return sum;
 }
 
-unsigned int vpx_sub_pixel_avg_variance32xh_avx2(const uint8_t *src,
-                                             int src_stride,
-                                             int x_offset,
-                                             int y_offset,
-                                             const uint8_t *dst,
-                                             int dst_stride,
-                                             const uint8_t *sec,
-                                             int sec_stride,
-                                             int height,
-                                             unsigned int *sse) {
+unsigned int vpx_sub_pixel_avg_variance32xh_avx2(
+    const uint8_t *src, int src_stride, int x_offset, int y_offset,
+    const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride,
+    int height, unsigned int *sse) {
   __m256i sec_reg;
   __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
   __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi;
@@ -519,190 +496,190 @@ unsigned int vpx_sub_pixel_avg_variance32xh_avx2(const uint8_t *src,
   // x_offset = 0 and y_offset = 0
   if (x_offset == 0) {
     if (y_offset == 0) {
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
-        sec_reg = _mm256_loadu_si256((__m256i const *) (sec));
+        sec_reg = _mm256_loadu_si256((__m256i const *)(sec));
         src_reg = _mm256_avg_epu8(src_reg, sec_reg);
-        sec+= sec_stride;
+        sec += sec_stride;
         // expend each byte to 2 bytes
         MERGE_WITH_SRC(src_reg, zero_reg)
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
     } else if (y_offset == 8) {
       __m256i src_next_reg;
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
         AVG_NEXT_SRC(src_reg, src_stride)
-        sec_reg = _mm256_loadu_si256((__m256i const *) (sec));
+        sec_reg = _mm256_loadu_si256((__m256i const *)(sec));
         src_reg = _mm256_avg_epu8(src_reg, sec_reg);
-        sec+= sec_stride;
+        sec += sec_stride;
         // expend each byte to 2 bytes
         MERGE_WITH_SRC(src_reg, zero_reg)
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
-    // x_offset = 0 and y_offset = bilin interpolation
+      // x_offset = 0 and y_offset = bilin interpolation
     } else {
       __m256i filter, pw8, src_next_reg;
 
       y_offset <<= 5;
-      filter = _mm256_load_si256((__m256i const *)
-                 (bilinear_filters_avx2 + y_offset));
+      filter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + y_offset));
       pw8 = _mm256_set1_epi16(8);
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
         MERGE_NEXT_SRC(src_reg, src_stride)
         FILTER_SRC(filter)
         src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
-        sec_reg = _mm256_loadu_si256((__m256i const *) (sec));
+        sec_reg = _mm256_loadu_si256((__m256i const *)(sec));
         src_reg = _mm256_avg_epu8(src_reg, sec_reg);
-        sec+= sec_stride;
+        sec += sec_stride;
         MERGE_WITH_SRC(src_reg, zero_reg)
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
     }
-  // x_offset = 8  and y_offset = 0
+    // x_offset = 8  and y_offset = 0
   } else if (x_offset == 8) {
     if (y_offset == 0) {
       __m256i src_next_reg;
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
         AVG_NEXT_SRC(src_reg, 1)
-        sec_reg = _mm256_loadu_si256((__m256i const *) (sec));
+        sec_reg = _mm256_loadu_si256((__m256i const *)(sec));
         src_reg = _mm256_avg_epu8(src_reg, sec_reg);
-        sec+= sec_stride;
+        sec += sec_stride;
         // expand each byte to 2 bytes
         MERGE_WITH_SRC(src_reg, zero_reg)
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
-    // x_offset = 8  and y_offset = 8
+      // x_offset = 8  and y_offset = 8
     } else if (y_offset == 8) {
       __m256i src_next_reg, src_avg;
       // load source and another source starting from the next
       // following byte
-      src_reg = _mm256_loadu_si256((__m256i const *) (src));
+      src_reg = _mm256_loadu_si256((__m256i const *)(src));
       AVG_NEXT_SRC(src_reg, 1)
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         // save current source average
         src_avg = src_reg;
-        src+= src_stride;
+        src += src_stride;
         LOAD_SRC_DST
         AVG_NEXT_SRC(src_reg, 1)
         // average between previous average to current average
         src_avg = _mm256_avg_epu8(src_avg, src_reg);
-        sec_reg = _mm256_loadu_si256((__m256i const *) (sec));
+        sec_reg = _mm256_loadu_si256((__m256i const *)(sec));
         src_avg = _mm256_avg_epu8(src_avg, sec_reg);
-        sec+= sec_stride;
+        sec += sec_stride;
         // expand each byte to 2 bytes
         MERGE_WITH_SRC(src_avg, zero_reg)
         CALC_SUM_SSE_INSIDE_LOOP
-        dst+= dst_stride;
+        dst += dst_stride;
       }
-    // x_offset = 8  and y_offset = bilin interpolation
+      // x_offset = 8  and y_offset = bilin interpolation
     } else {
       __m256i filter, pw8, src_next_reg, src_avg;
       y_offset <<= 5;
-      filter = _mm256_load_si256((__m256i const *)
-               (bilinear_filters_avx2 + y_offset));
+      filter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + y_offset));
       pw8 = _mm256_set1_epi16(8);
       // load source and another source starting from the next
       // following byte
-      src_reg = _mm256_loadu_si256((__m256i const *) (src));
+      src_reg = _mm256_loadu_si256((__m256i const *)(src));
       AVG_NEXT_SRC(src_reg, 1)
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         // save current source average
         src_avg = src_reg;
-        src+= src_stride;
+        src += src_stride;
         LOAD_SRC_DST
         AVG_NEXT_SRC(src_reg, 1)
         MERGE_WITH_SRC(src_avg, src_reg)
         FILTER_SRC(filter)
         src_avg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
-        sec_reg = _mm256_loadu_si256((__m256i const *) (sec));
+        sec_reg = _mm256_loadu_si256((__m256i const *)(sec));
         src_avg = _mm256_avg_epu8(src_avg, sec_reg);
         // expand each byte to 2 bytes
         MERGE_WITH_SRC(src_avg, zero_reg)
-        sec+= sec_stride;
+        sec += sec_stride;
         CALC_SUM_SSE_INSIDE_LOOP
-        dst+= dst_stride;
+        dst += dst_stride;
       }
     }
-  // x_offset = bilin interpolation and y_offset = 0
+    // x_offset = bilin interpolation and y_offset = 0
   } else {
     if (y_offset == 0) {
       __m256i filter, pw8, src_next_reg;
       x_offset <<= 5;
-      filter = _mm256_load_si256((__m256i const *)
-               (bilinear_filters_avx2 + x_offset));
+      filter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + x_offset));
       pw8 = _mm256_set1_epi16(8);
-      for (i = 0; i < height ; i++) {
+      for (i = 0; i < height; i++) {
         LOAD_SRC_DST
         MERGE_NEXT_SRC(src_reg, 1)
         FILTER_SRC(filter)
         src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
-        sec_reg = _mm256_loadu_si256((__m256i const *) (sec));
+        sec_reg = _mm256_loadu_si256((__m256i const *)(sec));
         src_reg = _mm256_avg_epu8(src_reg, sec_reg);
         MERGE_WITH_SRC(src_reg, zero_reg)
-        sec+= sec_stride;
+        sec += sec_stride;
         CALC_SUM_SSE_INSIDE_LOOP
-        src+= src_stride;
-        dst+= dst_stride;
+        src += src_stride;
+        dst += dst_stride;
       }
-    // x_offset = bilin interpolation and y_offset = 8
+      // x_offset = bilin interpolation and y_offset = 8
     } else if (y_offset == 8) {
       __m256i filter, pw8, src_next_reg, src_pack;
       x_offset <<= 5;
-      filter = _mm256_load_si256((__m256i const *)
-               (bilinear_filters_avx2 + x_offset));
+      filter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + x_offset));
       pw8 = _mm256_set1_epi16(8);
-      src_reg = _mm256_loadu_si256((__m256i const *) (src));
+      src_reg = _mm256_loadu_si256((__m256i const *)(src));
       MERGE_NEXT_SRC(src_reg, 1)
       FILTER_SRC(filter)
       // convert each 16 bit to 8 bit to each low and high lane source
-      src_pack =  _mm256_packus_epi16(exp_src_lo, exp_src_hi);
-      for (i = 0; i < height ; i++) {
-        src+= src_stride;
+      src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+      for (i = 0; i < height; i++) {
+        src += src_stride;
         LOAD_SRC_DST
         MERGE_NEXT_SRC(src_reg, 1)
         FILTER_SRC(filter)
-        src_reg =  _mm256_packus_epi16(exp_src_lo, exp_src_hi);
+        src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
         // average between previous pack to the current
         src_pack = _mm256_avg_epu8(src_pack, src_reg);
-        sec_reg = _mm256_loadu_si256((__m256i const *) (sec));
+        sec_reg = _mm256_loadu_si256((__m256i const *)(sec));
         src_pack = _mm256_avg_epu8(src_pack, sec_reg);
-        sec+= sec_stride;
+        sec += sec_stride;
         MERGE_WITH_SRC(src_pack, zero_reg)
         src_pack = src_reg;
         CALC_SUM_SSE_INSIDE_LOOP
-        dst+= dst_stride;
+        dst += dst_stride;
       }
-    // x_offset = bilin interpolation and y_offset = bilin interpolation
+      // x_offset = bilin interpolation and y_offset = bilin interpolation
     } else {
       __m256i xfilter, yfilter, pw8, src_next_reg, src_pack;
       x_offset <<= 5;
-      xfilter = _mm256_load_si256((__m256i const *)
-                (bilinear_filters_avx2 + x_offset));
+      xfilter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + x_offset));
       y_offset <<= 5;
-      yfilter = _mm256_load_si256((__m256i const *)
-                (bilinear_filters_avx2 + y_offset));
+      yfilter = _mm256_load_si256(
+          (__m256i const *)(bilinear_filters_avx2 + y_offset));
       pw8 = _mm256_set1_epi16(8);
       // load source and another source starting from the next
       // following byte
-      src_reg = _mm256_loadu_si256((__m256i const *) (src));
+      src_reg = _mm256_loadu_si256((__m256i const *)(src));
       MERGE_NEXT_SRC(src_reg, 1)
 
       FILTER_SRC(xfilter)
       // convert each 16 bit to 8 bit to each low and high lane source
       src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
-      for (i = 0; i < height ; i++) {
-        src+= src_stride;
+      for (i = 0; i < height; i++) {
+        src += src_stride;
         LOAD_SRC_DST
         MERGE_NEXT_SRC(src_reg, 1)
         FILTER_SRC(xfilter)
@@ -712,13 +689,13 @@ unsigned int vpx_sub_pixel_avg_variance32xh_avx2(const uint8_t *src,
         // filter the source
         FILTER_SRC(yfilter)
         src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
-        sec_reg = _mm256_loadu_si256((__m256i const *) (sec));
+        sec_reg = _mm256_loadu_si256((__m256i const *)(sec));
         src_pack = _mm256_avg_epu8(src_pack, sec_reg);
         MERGE_WITH_SRC(src_pack, zero_reg)
         src_pack = src_reg;
-        sec+= sec_stride;
+        sec += sec_stride;
         CALC_SUM_SSE_INSIDE_LOOP
-        dst+= dst_stride;
+        dst += dst_stride;
       }
     }
   }
diff --git a/vpx_dsp/x86/variance_mmx.c b/vpx_dsp/x86/variance_mmx.c
index f04f4e2c8f4758f10c6bc5a9c91fe779f037d224..da8c81819547211c5a1c17a94c21c394f1dc2b43 100644
--- a/vpx_dsp/x86/variance_mmx.c
+++ b/vpx_dsp/x86/variance_mmx.c
@@ -13,221 +13,202 @@
 #include "vpx_ports/mem.h"
 
 DECLARE_ALIGNED(16, static const int16_t, bilinear_filters_mmx[8][8]) = {
-  { 128, 128, 128, 128,   0,   0,   0,   0 },
-  { 112, 112, 112, 112,  16,  16,  16,  16 },
-  {  96,  96,  96,  96,  32,  32,  32,  32 },
-  {  80,  80,  80,  80,  48,  48,  48,  48 },
-  {  64,  64,  64,  64,  64,  64,  64,  64 },
-  {  48,  48,  48,  48,  80,  80,  80,  80 },
-  {  32,  32,  32,  32,  96,  96,  96,  96 },
-  {  16,  16,  16,  16, 112, 112, 112, 112 }
+  { 128, 128, 128, 128, 0, 0, 0, 0 }, { 112, 112, 112, 112, 16, 16, 16, 16 },
+  { 96, 96, 96, 96, 32, 32, 32, 32 }, { 80, 80, 80, 80, 48, 48, 48, 48 },
+  { 64, 64, 64, 64, 64, 64, 64, 64 }, { 48, 48, 48, 48, 80, 80, 80, 80 },
+  { 32, 32, 32, 32, 96, 96, 96, 96 }, { 16, 16, 16, 16, 112, 112, 112, 112 }
 };
 
-extern void vpx_get4x4var_mmx(const uint8_t *a, int a_stride,
-                              const uint8_t *b, int b_stride,
-                              unsigned int *sse, int *sum);
+extern void vpx_get4x4var_mmx(const uint8_t *a, int a_stride, const uint8_t *b,
+                              int b_stride, unsigned int *sse, int *sum);
 
 extern void vpx_filter_block2d_bil4x4_var_mmx(const unsigned char *ref_ptr,
                                               int ref_pixels_per_line,
                                               const unsigned char *src_ptr,
                                               int src_pixels_per_line,
                                               const int16_t *HFilter,
-                                              const int16_t *VFilter,
-                                              int *sum,
+                                              const int16_t *VFilter, int *sum,
                                               unsigned int *sumsquared);
 
-extern void vpx_filter_block2d_bil_var_mmx(const unsigned char *ref_ptr,
-                                           int ref_pixels_per_line,
-                                           const unsigned char *src_ptr,
-                                           int src_pixels_per_line,
-                                           unsigned int Height,
-                                           const int16_t *HFilter,
-                                           const int16_t *VFilter,
-                                           int *sum,
-                                           unsigned int *sumsquared);
-
+extern void vpx_filter_block2d_bil_var_mmx(
+    const unsigned char *ref_ptr, int ref_pixels_per_line,
+    const unsigned char *src_ptr, int src_pixels_per_line, unsigned int Height,
+    const int16_t *HFilter, const int16_t *VFilter, int *sum,
+    unsigned int *sumsquared);
 
 unsigned int vpx_variance4x4_mmx(const unsigned char *a, int a_stride,
                                  const unsigned char *b, int b_stride,
                                  unsigned int *sse) {
-    unsigned int var;
-    int avg;
+  unsigned int var;
+  int avg;
 
-    vpx_get4x4var_mmx(a, a_stride, b, b_stride, &var, &avg);
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 4));
+  vpx_get4x4var_mmx(a, a_stride, b, b_stride, &var, &avg);
+  *sse = var;
+  return (var - (((unsigned int)avg * avg) >> 4));
 }
 
 unsigned int vpx_variance8x8_mmx(const unsigned char *a, int a_stride,
                                  const unsigned char *b, int b_stride,
                                  unsigned int *sse) {
-    unsigned int var;
-    int avg;
+  unsigned int var;
+  int avg;
 
-    vpx_get8x8var_mmx(a, a_stride, b, b_stride, &var, &avg);
-    *sse = var;
+  vpx_get8x8var_mmx(a, a_stride, b, b_stride, &var, &avg);
+  *sse = var;
 
-    return (var - (((unsigned int)avg * avg) >> 6));
+  return (var - (((unsigned int)avg * avg) >> 6));
 }
 
 unsigned int vpx_mse16x16_mmx(const unsigned char *a, int a_stride,
                               const unsigned char *b, int b_stride,
                               unsigned int *sse) {
-    unsigned int sse0, sse1, sse2, sse3, var;
-    int sum0, sum1, sum2, sum3;
-
-    vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0);
-    vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1);
-    vpx_get8x8var_mmx(a + 8 * a_stride, a_stride,
-                      b + 8 * b_stride, b_stride, &sse2, &sum2);
-    vpx_get8x8var_mmx(a + 8 * a_stride + 8, a_stride,
-                      b + 8 * b_stride + 8, b_stride, &sse3, &sum3);
-
-    var = sse0 + sse1 + sse2 + sse3;
-    *sse = var;
-    return var;
+  unsigned int sse0, sse1, sse2, sse3, var;
+  int sum0, sum1, sum2, sum3;
+
+  vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0);
+  vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1);
+  vpx_get8x8var_mmx(a + 8 * a_stride, a_stride, b + 8 * b_stride, b_stride,
+                    &sse2, &sum2);
+  vpx_get8x8var_mmx(a + 8 * a_stride + 8, a_stride, b + 8 * b_stride + 8,
+                    b_stride, &sse3, &sum3);
+
+  var = sse0 + sse1 + sse2 + sse3;
+  *sse = var;
+  return var;
 }
 
 unsigned int vpx_variance16x16_mmx(const unsigned char *a, int a_stride,
                                    const unsigned char *b, int b_stride,
                                    unsigned int *sse) {
-    unsigned int sse0, sse1, sse2, sse3, var;
-    int sum0, sum1, sum2, sum3, avg;
-
-    vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0);
-    vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1);
-    vpx_get8x8var_mmx(a + 8 * a_stride, a_stride,
-                      b + 8 * b_stride, b_stride, &sse2, &sum2);
-    vpx_get8x8var_mmx(a + 8 * a_stride + 8, a_stride,
-                      b + 8 * b_stride + 8, b_stride, &sse3, &sum3);
-
-    var = sse0 + sse1 + sse2 + sse3;
-    avg = sum0 + sum1 + sum2 + sum3;
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 8));
+  unsigned int sse0, sse1, sse2, sse3, var;
+  int sum0, sum1, sum2, sum3, avg;
+
+  vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0);
+  vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1);
+  vpx_get8x8var_mmx(a + 8 * a_stride, a_stride, b + 8 * b_stride, b_stride,
+                    &sse2, &sum2);
+  vpx_get8x8var_mmx(a + 8 * a_stride + 8, a_stride, b + 8 * b_stride + 8,
+                    b_stride, &sse3, &sum3);
+
+  var = sse0 + sse1 + sse2 + sse3;
+  avg = sum0 + sum1 + sum2 + sum3;
+  *sse = var;
+  return (var - (((unsigned int)avg * avg) >> 8));
 }
 
 unsigned int vpx_variance16x8_mmx(const unsigned char *a, int a_stride,
                                   const unsigned char *b, int b_stride,
                                   unsigned int *sse) {
-    unsigned int sse0, sse1, var;
-    int sum0, sum1, avg;
+  unsigned int sse0, sse1, var;
+  int sum0, sum1, avg;
 
-    vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0);
-    vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1);
+  vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0);
+  vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1);
 
-    var = sse0 + sse1;
-    avg = sum0 + sum1;
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 7));
+  var = sse0 + sse1;
+  avg = sum0 + sum1;
+  *sse = var;
+  return (var - (((unsigned int)avg * avg) >> 7));
 }
 
 unsigned int vpx_variance8x16_mmx(const unsigned char *a, int a_stride,
                                   const unsigned char *b, int b_stride,
                                   unsigned int *sse) {
-    unsigned int sse0, sse1, var;
-    int sum0, sum1, avg;
+  unsigned int sse0, sse1, var;
+  int sum0, sum1, avg;
 
-    vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0);
-    vpx_get8x8var_mmx(a + 8 * a_stride, a_stride,
-                      b + 8 * b_stride, b_stride, &sse1, &sum1);
+  vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0);
+  vpx_get8x8var_mmx(a + 8 * a_stride, a_stride, b + 8 * b_stride, b_stride,
+                    &sse1, &sum1);
 
-    var = sse0 + sse1;
-    avg = sum0 + sum1;
-    *sse = var;
+  var = sse0 + sse1;
+  avg = sum0 + sum1;
+  *sse = var;
 
-    return (var - (((unsigned int)avg * avg) >> 7));
+  return (var - (((unsigned int)avg * avg) >> 7));
 }
 
 uint32_t vpx_sub_pixel_variance4x4_mmx(const uint8_t *a, int a_stride,
                                        int xoffset, int yoffset,
                                        const uint8_t *b, int b_stride,
                                        uint32_t *sse) {
-    int xsum;
-    unsigned int xxsum;
-    vpx_filter_block2d_bil4x4_var_mmx(a, a_stride, b, b_stride,
-                                      bilinear_filters_mmx[xoffset],
-                                      bilinear_filters_mmx[yoffset],
-                                      &xsum, &xxsum);
-    *sse = xxsum;
-    return (xxsum - (((unsigned int)xsum * xsum) >> 4));
+  int xsum;
+  unsigned int xxsum;
+  vpx_filter_block2d_bil4x4_var_mmx(
+      a, a_stride, b, b_stride, bilinear_filters_mmx[xoffset],
+      bilinear_filters_mmx[yoffset], &xsum, &xxsum);
+  *sse = xxsum;
+  return (xxsum - (((unsigned int)xsum * xsum) >> 4));
 }
 
-
 uint32_t vpx_sub_pixel_variance8x8_mmx(const uint8_t *a, int a_stride,
                                        int xoffset, int yoffset,
                                        const uint8_t *b, int b_stride,
                                        uint32_t *sse) {
-    int xsum;
-    uint32_t xxsum;
-    vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 8,
-                                   bilinear_filters_mmx[xoffset],
-                                   bilinear_filters_mmx[yoffset],
-                                   &xsum, &xxsum);
-    *sse = xxsum;
-    return (xxsum - (((uint32_t)xsum * xsum) >> 6));
+  int xsum;
+  uint32_t xxsum;
+  vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 8,
+                                 bilinear_filters_mmx[xoffset],
+                                 bilinear_filters_mmx[yoffset], &xsum, &xxsum);
+  *sse = xxsum;
+  return (xxsum - (((uint32_t)xsum * xsum) >> 6));
 }
 
 uint32_t vpx_sub_pixel_variance16x16_mmx(const uint8_t *a, int a_stride,
                                          int xoffset, int yoffset,
                                          const uint8_t *b, int b_stride,
                                          uint32_t *sse) {
-    int xsum0, xsum1;
-    unsigned int xxsum0, xxsum1;
+  int xsum0, xsum1;
+  unsigned int xxsum0, xxsum1;
 
-    vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 16,
-                                   bilinear_filters_mmx[xoffset],
-                                   bilinear_filters_mmx[yoffset],
-                                   &xsum0, &xxsum0);
+  vpx_filter_block2d_bil_var_mmx(
+      a, a_stride, b, b_stride, 16, bilinear_filters_mmx[xoffset],
+      bilinear_filters_mmx[yoffset], &xsum0, &xxsum0);
 
-    vpx_filter_block2d_bil_var_mmx(a + 8, a_stride, b + 8, b_stride, 16,
-                                   bilinear_filters_mmx[xoffset],
-                                   bilinear_filters_mmx[yoffset],
-                                   &xsum1, &xxsum1);
+  vpx_filter_block2d_bil_var_mmx(
+      a + 8, a_stride, b + 8, b_stride, 16, bilinear_filters_mmx[xoffset],
+      bilinear_filters_mmx[yoffset], &xsum1, &xxsum1);
 
-    xsum0 += xsum1;
-    xxsum0 += xxsum1;
+  xsum0 += xsum1;
+  xxsum0 += xxsum1;
 
-    *sse = xxsum0;
-    return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8));
+  *sse = xxsum0;
+  return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8));
 }
 
 uint32_t vpx_sub_pixel_variance16x8_mmx(const uint8_t *a, int a_stride,
                                         int xoffset, int yoffset,
                                         const uint8_t *b, int b_stride,
                                         uint32_t *sse) {
-    int xsum0, xsum1;
-    unsigned int xxsum0, xxsum1;
+  int xsum0, xsum1;
+  unsigned int xxsum0, xxsum1;
 
-    vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 8,
-                                   bilinear_filters_mmx[xoffset],
-                                   bilinear_filters_mmx[yoffset],
-                                   &xsum0, &xxsum0);
+  vpx_filter_block2d_bil_var_mmx(
+      a, a_stride, b, b_stride, 8, bilinear_filters_mmx[xoffset],
+      bilinear_filters_mmx[yoffset], &xsum0, &xxsum0);
 
-    vpx_filter_block2d_bil_var_mmx(a + 8, a_stride, b + 8, b_stride, 8,
-                                   bilinear_filters_mmx[xoffset],
-                                   bilinear_filters_mmx[yoffset],
-                                   &xsum1, &xxsum1);
+  vpx_filter_block2d_bil_var_mmx(
+      a + 8, a_stride, b + 8, b_stride, 8, bilinear_filters_mmx[xoffset],
+      bilinear_filters_mmx[yoffset], &xsum1, &xxsum1);
 
-    xsum0 += xsum1;
-    xxsum0 += xxsum1;
+  xsum0 += xsum1;
+  xxsum0 += xxsum1;
 
-    *sse = xxsum0;
-    return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 7));
+  *sse = xxsum0;
+  return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 7));
 }
 
 uint32_t vpx_sub_pixel_variance8x16_mmx(const uint8_t *a, int a_stride,
                                         int xoffset, int yoffset,
                                         const uint8_t *b, int b_stride,
                                         uint32_t *sse) {
-    int xsum;
-    unsigned int xxsum;
-    vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 16,
-                                   bilinear_filters_mmx[xoffset],
-                                   bilinear_filters_mmx[yoffset],
-                                   &xsum, &xxsum);
-    *sse = xxsum;
-    return (xxsum - (((uint32_t)xsum * xsum) >> 7));
+  int xsum;
+  unsigned int xxsum;
+  vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 16,
+                                 bilinear_filters_mmx[xoffset],
+                                 bilinear_filters_mmx[yoffset], &xsum, &xxsum);
+  *sse = xxsum;
+  return (xxsum - (((uint32_t)xsum * xsum) >> 7));
 }
 
 uint32_t vpx_variance_halfpixvar16x16_h_mmx(const uint8_t *a, int a_stride,
diff --git a/vpx_dsp/x86/variance_sse2.c b/vpx_dsp/x86/variance_sse2.c
index e6c9365ab4d84e1de5a07c8c684f08bb687ba74b..65f6d52ba02f4715dd15535b41510b8b3a6d624b 100644
--- a/vpx_dsp/x86/variance_sse2.c
+++ b/vpx_dsp/x86/variance_sse2.c
@@ -15,9 +15,9 @@
 
 #include "vpx_ports/mem.h"
 
-typedef void (*getNxMvar_fn_t) (const unsigned char *src, int src_stride,
-                                const unsigned char *ref, int ref_stride,
-                                unsigned int *sse, int *sum);
+typedef void (*getNxMvar_fn_t)(const unsigned char *src, int src_stride,
+                               const unsigned char *ref, int ref_stride,
+                               unsigned int *sse, int *sum);
 
 unsigned int vpx_get_mb_ss_sse2(const int16_t *src) {
   __m128i vsum = _mm_setzero_si128();
@@ -31,11 +31,12 @@ unsigned int vpx_get_mb_ss_sse2(const int16_t *src) {
 
   vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8));
   vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4));
-  return  _mm_cvtsi128_si32(vsum);
+  return _mm_cvtsi128_si32(vsum);
 }
 
-#define READ64(p, stride, i) \
-  _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const uint32_t *)(p + i * stride)), \
+#define READ64(p, stride, i)                                  \
+  _mm_unpacklo_epi8(                                          \
+      _mm_cvtsi32_si128(*(const uint32_t *)(p + i * stride)), \
       _mm_cvtsi32_si128(*(const uint32_t *)(p + (i + 1) * stride)))
 
 static void get4x4var_sse2(const uint8_t *src, int src_stride,
@@ -57,32 +58,31 @@ static void get4x4var_sse2(const uint8_t *src, int src_stride,
   *sum = (int16_t)_mm_extract_epi16(vsum, 0);
 
   // sse
-  vsum = _mm_add_epi32(_mm_madd_epi16(diff0, diff0),
-                       _mm_madd_epi16(diff1, diff1));
+  vsum =
+      _mm_add_epi32(_mm_madd_epi16(diff0, diff0), _mm_madd_epi16(diff1, diff1));
   vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8));
   vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4));
   *sse = _mm_cvtsi128_si32(vsum);
 }
 
-void vpx_get8x8var_sse2(const uint8_t *src, int src_stride,
-                        const uint8_t *ref, int ref_stride,
-                        unsigned int *sse, int *sum) {
+void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref,
+                        int ref_stride, unsigned int *sse, int *sum) {
   const __m128i zero = _mm_setzero_si128();
   __m128i vsum = _mm_setzero_si128();
   __m128i vsse = _mm_setzero_si128();
   int i;
 
   for (i = 0; i < 8; i += 2) {
-    const __m128i src0 = _mm_unpacklo_epi8(_mm_loadl_epi64(
-        (const __m128i *)(src + i * src_stride)), zero);
-    const __m128i ref0 = _mm_unpacklo_epi8(_mm_loadl_epi64(
-        (const __m128i *)(ref + i * ref_stride)), zero);
+    const __m128i src0 = _mm_unpacklo_epi8(
+        _mm_loadl_epi64((const __m128i *)(src + i * src_stride)), zero);
+    const __m128i ref0 = _mm_unpacklo_epi8(
+        _mm_loadl_epi64((const __m128i *)(ref + i * ref_stride)), zero);
     const __m128i diff0 = _mm_sub_epi16(src0, ref0);
 
-    const __m128i src1 = _mm_unpacklo_epi8(_mm_loadl_epi64(
-        (const __m128i *)(src + (i + 1) * src_stride)), zero);
-    const __m128i ref1 = _mm_unpacklo_epi8(_mm_loadl_epi64(
-        (const __m128i *)(ref + (i + 1) * ref_stride)), zero);
+    const __m128i src1 = _mm_unpacklo_epi8(
+        _mm_loadl_epi64((const __m128i *)(src + (i + 1) * src_stride)), zero);
+    const __m128i ref1 = _mm_unpacklo_epi8(
+        _mm_loadl_epi64((const __m128i *)(ref + (i + 1) * ref_stride)), zero);
     const __m128i diff1 = _mm_sub_epi16(src1, ref1);
 
     vsum = _mm_add_epi16(vsum, diff0);
@@ -104,8 +104,8 @@ void vpx_get8x8var_sse2(const uint8_t *src, int src_stride,
 }
 
 void vpx_get16x16var_sse2(const uint8_t *src, int src_stride,
-                          const uint8_t *ref, int ref_stride,
-                          unsigned int *sse, int *sum) {
+                          const uint8_t *ref, int ref_stride, unsigned int *sse,
+                          int *sum) {
   const __m128i zero = _mm_setzero_si128();
   __m128i vsum = _mm_setzero_si128();
   __m128i vsse = _mm_setzero_si128();
@@ -135,8 +135,8 @@ void vpx_get16x16var_sse2(const uint8_t *src, int src_stride,
   // sum
   vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
   vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
-  *sum = (int16_t)_mm_extract_epi16(vsum, 0) +
-             (int16_t)_mm_extract_epi16(vsum, 1);
+  *sum =
+      (int16_t)_mm_extract_epi16(vsum, 0) + (int16_t)_mm_extract_epi16(vsum, 1);
 
   // sse
   vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8));
@@ -144,10 +144,9 @@ void vpx_get16x16var_sse2(const uint8_t *src, int src_stride,
   *sse = _mm_cvtsi128_si32(vsse);
 }
 
-
 static void variance_sse2(const unsigned char *src, int src_stride,
-                          const unsigned char *ref, int ref_stride,
-                          int w, int h, unsigned int *sse, int *sum,
+                          const unsigned char *ref, int ref_stride, int w,
+                          int h, unsigned int *sse, int *sum,
                           getNxMvar_fn_t var_fn, int block_size) {
   int i, j;
 
@@ -158,8 +157,8 @@ static void variance_sse2(const unsigned char *src, int src_stride,
     for (j = 0; j < w; j += block_size) {
       unsigned int sse0;
       int sum0;
-      var_fn(src + src_stride * i + j, src_stride,
-             ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
+      var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j,
+             ref_stride, &sse0, &sum0);
       *sse += sse0;
       *sum += sum0;
     }
@@ -178,8 +177,8 @@ unsigned int vpx_variance8x4_sse2(const uint8_t *src, int src_stride,
                                   const uint8_t *ref, int ref_stride,
                                   unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 8, 4,
-                sse, &sum, get4x4var_sse2, 4);
+  variance_sse2(src, src_stride, ref, ref_stride, 8, 4, sse, &sum,
+                get4x4var_sse2, 4);
   return *sse - (((unsigned int)sum * sum) >> 5);
 }
 
@@ -187,8 +186,8 @@ unsigned int vpx_variance4x8_sse2(const uint8_t *src, int src_stride,
                                   const uint8_t *ref, int ref_stride,
                                   unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 4, 8,
-                sse, &sum, get4x4var_sse2, 4);
+  variance_sse2(src, src_stride, ref, ref_stride, 4, 8, sse, &sum,
+                get4x4var_sse2, 4);
   return *sse - (((unsigned int)sum * sum) >> 5);
 }
 
@@ -204,8 +203,8 @@ unsigned int vpx_variance16x8_sse2(const unsigned char *src, int src_stride,
                                    const unsigned char *ref, int ref_stride,
                                    unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 16, 8,
-                sse, &sum, vpx_get8x8var_sse2, 8);
+  variance_sse2(src, src_stride, ref, ref_stride, 16, 8, sse, &sum,
+                vpx_get8x8var_sse2, 8);
   return *sse - (((unsigned int)sum * sum) >> 7);
 }
 
@@ -213,8 +212,8 @@ unsigned int vpx_variance8x16_sse2(const unsigned char *src, int src_stride,
                                    const unsigned char *ref, int ref_stride,
                                    unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 8, 16,
-                sse, &sum, vpx_get8x8var_sse2, 8);
+  variance_sse2(src, src_stride, ref, ref_stride, 8, 16, sse, &sum,
+                vpx_get8x8var_sse2, 8);
   return *sse - (((unsigned int)sum * sum) >> 7);
 }
 
@@ -230,8 +229,8 @@ unsigned int vpx_variance32x32_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 32, 32,
-                sse, &sum, vpx_get16x16var_sse2, 16);
+  variance_sse2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum,
+                vpx_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 10);
 }
 
@@ -239,8 +238,8 @@ unsigned int vpx_variance32x16_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 32, 16,
-                sse, &sum, vpx_get16x16var_sse2, 16);
+  variance_sse2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum,
+                vpx_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 9);
 }
 
@@ -248,8 +247,8 @@ unsigned int vpx_variance16x32_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 16, 32,
-                sse, &sum, vpx_get16x16var_sse2, 16);
+  variance_sse2(src, src_stride, ref, ref_stride, 16, 32, sse, &sum,
+                vpx_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 9);
 }
 
@@ -257,8 +256,8 @@ unsigned int vpx_variance64x64_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 64, 64,
-                sse, &sum, vpx_get16x16var_sse2, 16);
+  variance_sse2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum,
+                vpx_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 12);
 }
 
@@ -266,8 +265,8 @@ unsigned int vpx_variance64x32_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 64, 32,
-                sse, &sum, vpx_get16x16var_sse2, 16);
+  variance_sse2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum,
+                vpx_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 11);
 }
 
@@ -275,8 +274,8 @@ unsigned int vpx_variance32x64_sse2(const uint8_t *src, int src_stride,
                                     const uint8_t *ref, int ref_stride,
                                     unsigned int *sse) {
   int sum;
-  variance_sse2(src, src_stride, ref, ref_stride, 32, 64,
-                sse, &sum, vpx_get16x16var_sse2, 16);
+  variance_sse2(src, src_stride, ref, ref_stride, 32, 64, sse, &sum,
+                vpx_get16x16var_sse2, 16);
   return *sse - (((int64_t)sum * sum) >> 11);
 }
 
@@ -311,17 +310,14 @@ unsigned int vpx_mse16x16_sse2(const uint8_t *src, int src_stride,
 #if CONFIG_USE_X86INC
 // The 2 unused parameters are place holders for PIC enabled build.
 // These definitions are for functions defined in subpel_variance.asm
-#define DECL(w, opt) \
-  int vpx_sub_pixel_variance##w##xh_##opt(const uint8_t *src, \
-                                          ptrdiff_t src_stride, \
-                                          int x_offset, int y_offset, \
-                                          const uint8_t *dst, \
-                                          ptrdiff_t dst_stride, \
-                                          int height, unsigned int *sse, \
-                                          void *unused0, void *unused)
+#define DECL(w, opt)                                                           \
+  int vpx_sub_pixel_variance##w##xh_##opt(                                     \
+      const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset,    \
+      const uint8_t *dst, ptrdiff_t dst_stride, int height, unsigned int *sse, \
+      void *unused0, void *unused)
 #define DECLS(opt1, opt2) \
-  DECL(4, opt2); \
-  DECL(8, opt1); \
+  DECL(4, opt2);          \
+  DECL(8, opt1);          \
   DECL(16, opt1)
 
 DECLS(sse2, sse);
@@ -329,59 +325,52 @@ DECLS(ssse3, ssse3);
 #undef DECLS
 #undef DECL
 
-#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
-unsigned int vpx_sub_pixel_variance##w##x##h##_##opt(const uint8_t *src, \
-                                                     int src_stride, \
-                                                     int x_offset, \
-                                                     int y_offset, \
-                                                     const uint8_t *dst, \
-                                                     int dst_stride, \
-                                                     unsigned int *sse_ptr) { \
-  unsigned int sse; \
-  int se = vpx_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset, \
-                                                y_offset, dst, dst_stride, \
-                                                h, &sse, NULL, NULL); \
-  if (w > wf) { \
-    unsigned int sse2; \
-    int se2 = vpx_sub_pixel_variance##wf##xh_##opt(src + 16, src_stride, \
-                                                   x_offset, y_offset, \
-                                                   dst + 16, dst_stride, \
-                                                   h, &sse2, NULL, NULL); \
-    se += se2; \
-    sse += sse2; \
-    if (w > wf * 2) { \
-      se2 = vpx_sub_pixel_variance##wf##xh_##opt(src + 32, src_stride, \
-                                                 x_offset, y_offset, \
-                                                 dst + 32, dst_stride, \
-                                                 h, &sse2, NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-      se2 = vpx_sub_pixel_variance##wf##xh_##opt(src + 48, src_stride, \
-                                                 x_offset, y_offset, \
-                                                 dst + 48, dst_stride, \
-                                                 h, &sse2, NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-    } \
-  } \
-  *sse_ptr = sse; \
-  return sse - ((cast se * se) >> (wlog2 + hlog2)); \
-}
+#define FN(w, h, wf, wlog2, hlog2, opt, cast)                                  \
+  unsigned int vpx_sub_pixel_variance##w##x##h##_##opt(                        \
+      const uint8_t *src, int src_stride, int x_offset, int y_offset,          \
+      const uint8_t *dst, int dst_stride, unsigned int *sse_ptr) {             \
+    unsigned int sse;                                                          \
+    int se = vpx_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset,   \
+                                                  y_offset, dst, dst_stride,   \
+                                                  h, &sse, NULL, NULL);        \
+    if (w > wf) {                                                              \
+      unsigned int sse2;                                                       \
+      int se2 = vpx_sub_pixel_variance##wf##xh_##opt(                          \
+          src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h,   \
+          &sse2, NULL, NULL);                                                  \
+      se += se2;                                                               \
+      sse += sse2;                                                             \
+      if (w > wf * 2) {                                                        \
+        se2 = vpx_sub_pixel_variance##wf##xh_##opt(                            \
+            src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
+            &sse2, NULL, NULL);                                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+        se2 = vpx_sub_pixel_variance##wf##xh_##opt(                            \
+            src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
+            &sse2, NULL, NULL);                                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+      }                                                                        \
+    }                                                                          \
+    *sse_ptr = sse;                                                            \
+    return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
+  }
 
-#define FNS(opt1, opt2) \
-FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
-FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
-FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
-FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
-FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
-FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
-FN(16, 16, 16, 4, 4, opt1, (uint32_t)); \
-FN(16,  8, 16, 4, 3, opt1, (uint32_t)); \
-FN(8,  16,  8, 3, 4, opt1, (uint32_t)); \
-FN(8,   8,  8, 3, 3, opt1, (uint32_t)); \
-FN(8,   4,  8, 3, 2, opt1, (uint32_t)); \
-FN(4,   8,  4, 2, 3, opt2, (uint32_t)); \
-FN(4,   4,  4, 2, 2, opt2, (uint32_t))
+#define FNS(opt1, opt2)                   \
+  FN(64, 64, 16, 6, 6, opt1, (int64_t));  \
+  FN(64, 32, 16, 6, 5, opt1, (int64_t));  \
+  FN(32, 64, 16, 5, 6, opt1, (int64_t));  \
+  FN(32, 32, 16, 5, 5, opt1, (int64_t));  \
+  FN(32, 16, 16, 5, 4, opt1, (int64_t));  \
+  FN(16, 32, 16, 4, 5, opt1, (int64_t));  \
+  FN(16, 16, 16, 4, 4, opt1, (uint32_t)); \
+  FN(16, 8, 16, 4, 3, opt1, (uint32_t));  \
+  FN(8, 16, 8, 3, 4, opt1, (uint32_t));   \
+  FN(8, 8, 8, 3, 3, opt1, (uint32_t));    \
+  FN(8, 4, 8, 3, 2, opt1, (uint32_t));    \
+  FN(4, 8, 4, 2, 3, opt2, (uint32_t));    \
+  FN(4, 4, 4, 2, 2, opt2, (uint32_t))
 
 FNS(sse2, sse);
 FNS(ssse3, ssse3);
@@ -390,84 +379,69 @@ FNS(ssse3, ssse3);
 #undef FN
 
 // The 2 unused parameters are place holders for PIC enabled build.
-#define DECL(w, opt) \
-int vpx_sub_pixel_avg_variance##w##xh_##opt(const uint8_t *src, \
-                                            ptrdiff_t src_stride, \
-                                            int x_offset, int y_offset, \
-                                            const uint8_t *dst, \
-                                            ptrdiff_t dst_stride, \
-                                            const uint8_t *sec, \
-                                            ptrdiff_t sec_stride, \
-                                            int height, unsigned int *sse, \
-                                            void *unused0, void *unused)
+#define DECL(w, opt)                                                        \
+  int vpx_sub_pixel_avg_variance##w##xh_##opt(                              \
+      const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
+      const uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *sec,         \
+      ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0,   \
+      void *unused)
 #define DECLS(opt1, opt2) \
-DECL(4, opt2); \
-DECL(8, opt1); \
-DECL(16, opt1)
+  DECL(4, opt2);          \
+  DECL(8, opt1);          \
+  DECL(16, opt1)
 
 DECLS(sse2, sse);
 DECLS(ssse3, ssse3);
 #undef DECL
 #undef DECLS
 
-#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
-unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt(const uint8_t *src, \
-                                                         int src_stride, \
-                                                         int x_offset, \
-                                                         int y_offset, \
-                                                         const uint8_t *dst, \
-                                                         int dst_stride, \
-                                                         unsigned int *sseptr, \
-                                                         const uint8_t *sec) { \
-  unsigned int sse; \
-  int se = vpx_sub_pixel_avg_variance##wf##xh_##opt(src, src_stride, x_offset, \
-                                                    y_offset, dst, dst_stride, \
-                                                    sec, w, h, &sse, NULL, \
-                                                    NULL); \
-  if (w > wf) { \
-    unsigned int sse2; \
-    int se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(src + 16, src_stride, \
-                                                       x_offset, y_offset, \
-                                                       dst + 16, dst_stride, \
-                                                       sec + 16, w, h, &sse2, \
-                                                       NULL, NULL); \
-    se += se2; \
-    sse += sse2; \
-    if (w > wf * 2) { \
-      se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(src + 32, src_stride, \
-                                                     x_offset, y_offset, \
-                                                     dst + 32, dst_stride, \
-                                                     sec + 32, w, h, &sse2, \
-                                                     NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-      se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(src + 48, src_stride, \
-                                                     x_offset, y_offset, \
-                                                     dst + 48, dst_stride, \
-                                                     sec + 48, w, h, &sse2, \
-                                                     NULL, NULL); \
-      se += se2; \
-      sse += sse2; \
-    } \
-  } \
-  *sseptr = sse; \
-  return sse - ((cast se * se) >> (wlog2 + hlog2)); \
-}
+#define FN(w, h, wf, wlog2, hlog2, opt, cast)                                  \
+  unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt(                    \
+      const uint8_t *src, int src_stride, int x_offset, int y_offset,          \
+      const uint8_t *dst, int dst_stride, unsigned int *sseptr,                \
+      const uint8_t *sec) {                                                    \
+    unsigned int sse;                                                          \
+    int se = vpx_sub_pixel_avg_variance##wf##xh_##opt(                         \
+        src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
+        NULL, NULL);                                                           \
+    if (w > wf) {                                                              \
+      unsigned int sse2;                                                       \
+      int se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(                      \
+          src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride,      \
+          sec + 16, w, h, &sse2, NULL, NULL);                                  \
+      se += se2;                                                               \
+      sse += sse2;                                                             \
+      if (w > wf * 2) {                                                        \
+        se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(                        \
+            src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride,    \
+            sec + 32, w, h, &sse2, NULL, NULL);                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+        se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(                        \
+            src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride,    \
+            sec + 48, w, h, &sse2, NULL, NULL);                                \
+        se += se2;                                                             \
+        sse += sse2;                                                           \
+      }                                                                        \
+    }                                                                          \
+    *sseptr = sse;                                                             \
+    return sse - ((cast se * se) >> (wlog2 + hlog2));                          \
+  }
 
-#define FNS(opt1, opt2) \
-FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
-FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
-FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
-FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
-FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
-FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
-FN(16, 16, 16, 4, 4, opt1, (uint32_t)); \
-FN(16,  8, 16, 4, 3, opt1, (uint32_t)); \
-FN(8,  16,  8, 3, 4, opt1, (uint32_t)); \
-FN(8,   8,  8, 3, 3, opt1, (uint32_t)); \
-FN(8,   4,  8, 3, 2, opt1, (uint32_t)); \
-FN(4,   8,  4, 2, 3, opt2, (uint32_t)); \
-FN(4,   4,  4, 2, 2, opt2, (uint32_t))
+#define FNS(opt1, opt2)                   \
+  FN(64, 64, 16, 6, 6, opt1, (int64_t));  \
+  FN(64, 32, 16, 6, 5, opt1, (int64_t));  \
+  FN(32, 64, 16, 5, 6, opt1, (int64_t));  \
+  FN(32, 32, 16, 5, 5, opt1, (int64_t));  \
+  FN(32, 16, 16, 5, 4, opt1, (int64_t));  \
+  FN(16, 32, 16, 4, 5, opt1, (int64_t));  \
+  FN(16, 16, 16, 4, 4, opt1, (uint32_t)); \
+  FN(16, 8, 16, 4, 3, opt1, (uint32_t));  \
+  FN(8, 16, 8, 3, 4, opt1, (uint32_t));   \
+  FN(8, 8, 8, 3, 3, opt1, (uint32_t));    \
+  FN(8, 4, 8, 3, 2, opt1, (uint32_t));    \
+  FN(4, 8, 4, 2, 3, opt2, (uint32_t));    \
+  FN(4, 4, 4, 2, 2, opt2, (uint32_t))
 
 FNS(sse2, sse);
 FNS(ssse3, ssse3);
diff --git a/vpx_dsp/x86/vpx_asm_stubs.c b/vpx_dsp/x86/vpx_asm_stubs.c
index 97256e0f968051c6f4fe8ac68198bc6d530f0c98..e3e2075a3d41bd8d4c9cd7ac653dd9812581d665 100644
--- a/vpx_dsp/x86/vpx_asm_stubs.c
+++ b/vpx_dsp/x86/vpx_asm_stubs.c
@@ -75,7 +75,7 @@ FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, sse2);
 //                             const int16_t *filter_y, int y_step_q4,
 //                             int w, int h);
 FUN_CONV_2D(, sse2);
-FUN_CONV_2D(avg_ , sse2);
+FUN_CONV_2D(avg_, sse2);
 
 #if CONFIG_VPX_HIGHBITDEPTH && ARCH_X86_64
 highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v8_sse2;
@@ -157,6 +157,6 @@ HIGH_FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
 //                                    const int16_t *filter_y, int y_step_q4,
 //                                    int w, int h, int bd);
 HIGH_FUN_CONV_2D(, sse2);
-HIGH_FUN_CONV_2D(avg_ , sse2);
+HIGH_FUN_CONV_2D(avg_, sse2);
 #endif  // CONFIG_VPX_HIGHBITDEPTH && ARCH_X86_64
 #endif  // HAVE_SSE2
diff --git a/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c b/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
index b7186785379d962d8b0702b96c616def12646255..2e8566cd5c2f8a37d3f8530877bdf701e9390bbd 100644
--- a/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
+++ b/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
@@ -40,35 +40,31 @@ DECLARE_ALIGNED(32, static const uint8_t, filt4_global_avx2[32]) = {
 };
 
 #if defined(__clang__)
-# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ <= 3) || \
-    (defined(__APPLE__) && \
-        ((__clang_major__ == 4 && __clang_minor__ <= 2) || \
-            (__clang_major__ == 5 && __clang_minor__ == 0)))
-
-#  define MM256_BROADCASTSI128_SI256(x) \
-       _mm_broadcastsi128_si256((__m128i const *)&(x))
-# else  // clang > 3.3, and not 5.0 on macosx.
-#  define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
-# endif  // clang <= 3.3
+#if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ <= 3) ||  \
+    (defined(__APPLE__) && ((__clang_major__ == 4 && __clang_minor__ <= 2) || \
+                            (__clang_major__ == 5 && __clang_minor__ == 0)))
+
+#define MM256_BROADCASTSI128_SI256(x) \
+  _mm_broadcastsi128_si256((__m128i const *) & (x))
+#else  // clang > 3.3, and not 5.0 on macosx.
+#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
+#endif  // clang <= 3.3
 #elif defined(__GNUC__)
-# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ <= 6)
-#  define MM256_BROADCASTSI128_SI256(x) \
-       _mm_broadcastsi128_si256((__m128i const *)&(x))
-# elif __GNUC__ == 4 && __GNUC_MINOR__ == 7
-#  define MM256_BROADCASTSI128_SI256(x) _mm_broadcastsi128_si256(x)
-# else  // gcc > 4.7
-#  define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
-# endif  // gcc <= 4.6
-#else  // !(gcc || clang)
-# define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ <= 6)
+#define MM256_BROADCASTSI128_SI256(x) \
+  _mm_broadcastsi128_si256((__m128i const *) & (x))
+#elif __GNUC__ == 4 && __GNUC_MINOR__ == 7
+#define MM256_BROADCASTSI128_SI256(x) _mm_broadcastsi128_si256(x)
+#else  // gcc > 4.7
+#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
+#endif  // gcc <= 4.6
+#else   // !(gcc || clang)
+#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
 #endif  // __clang__
 
-static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr,
-                                         ptrdiff_t src_pixels_per_line,
-                                         uint8_t *output_ptr,
-                                         ptrdiff_t output_pitch,
-                                         uint32_t output_height,
-                                         const int16_t *filter) {
+static void vpx_filter_block1d16_h8_avx2(
+    const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
+    ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i filtersReg;
   __m256i addFilterReg64, filt1Reg, filt2Reg, filt3Reg, filt4Reg;
   __m256i firstFilters, secondFilters, thirdFilters, forthFilters;
@@ -82,26 +78,22 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr,
   filtersReg = _mm_loadu_si128((const __m128i *)filter);
   // converting the 16 bit (short) to 8 bit (byte) and have the same data
   // in both lanes of 128 bit register.
-  filtersReg =_mm_packs_epi16(filtersReg, filtersReg);
+  filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
   // have the same data in both lanes of a 256 bit register
   filtersReg32 = MM256_BROADCASTSI128_SI256(filtersReg);
 
   // duplicate only the first 16 bits (first and second byte)
   // across 256 bit register
-  firstFilters = _mm256_shuffle_epi8(filtersReg32,
-                 _mm256_set1_epi16(0x100u));
+  firstFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x100u));
   // duplicate only the second 16 bits (third and forth byte)
   // across 256 bit register
-  secondFilters = _mm256_shuffle_epi8(filtersReg32,
-                  _mm256_set1_epi16(0x302u));
+  secondFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x302u));
   // duplicate only the third 16 bits (fifth and sixth byte)
   // across 256 bit register
-  thirdFilters = _mm256_shuffle_epi8(filtersReg32,
-                 _mm256_set1_epi16(0x504u));
+  thirdFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x504u));
   // duplicate only the forth 16 bits (seventh and eighth byte)
   // across 256 bit register
-  forthFilters = _mm256_shuffle_epi8(filtersReg32,
-                 _mm256_set1_epi16(0x706u));
+  forthFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x706u));
 
   filt1Reg = _mm256_load_si256((__m256i const *)filt1_global_avx2);
   filt2Reg = _mm256_load_si256((__m256i const *)filt2_global_avx2);
@@ -111,17 +103,18 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr,
   // multiple the size of the source and destination stride by two
   src_stride = src_pixels_per_line << 1;
   dst_stride = output_pitch << 1;
-  for (i = output_height; i > 1; i-=2) {
+  for (i = output_height; i > 1; i -= 2) {
     // load the 2 strides of source
-    srcReg32b1 = _mm256_castsi128_si256(
-                 _mm_loadu_si128((const __m128i *)(src_ptr - 3)));
-    srcReg32b1 = _mm256_inserti128_si256(srcReg32b1,
-                 _mm_loadu_si128((const __m128i *)
-                 (src_ptr+src_pixels_per_line-3)), 1);
+    srcReg32b1 =
+        _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src_ptr - 3)));
+    srcReg32b1 = _mm256_inserti128_si256(
+        srcReg32b1,
+        _mm_loadu_si128((const __m128i *)(src_ptr + src_pixels_per_line - 3)),
+        1);
 
     // filter the source buffer
-    srcRegFilt32b1_1= _mm256_shuffle_epi8(srcReg32b1, filt1Reg);
-    srcRegFilt32b2= _mm256_shuffle_epi8(srcReg32b1, filt4Reg);
+    srcRegFilt32b1_1 = _mm256_shuffle_epi8(srcReg32b1, filt1Reg);
+    srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b1, filt4Reg);
 
     // multiply 2 adjacent elements with the filter and add the result
     srcRegFilt32b1_1 = _mm256_maddubs_epi16(srcRegFilt32b1_1, firstFilters);
@@ -131,28 +124,29 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr,
     srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, srcRegFilt32b2);
 
     // filter the source buffer
-    srcRegFilt32b3= _mm256_shuffle_epi8(srcReg32b1, filt2Reg);
-    srcRegFilt32b2= _mm256_shuffle_epi8(srcReg32b1, filt3Reg);
+    srcRegFilt32b3 = _mm256_shuffle_epi8(srcReg32b1, filt2Reg);
+    srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b1, filt3Reg);
 
     // multiply 2 adjacent elements with the filter and add the result
     srcRegFilt32b3 = _mm256_maddubs_epi16(srcRegFilt32b3, secondFilters);
     srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, thirdFilters);
 
     // add and saturate the results together
-    srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1,
-                       _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2));
+    srcRegFilt32b1_1 = _mm256_adds_epi16(
+        srcRegFilt32b1_1, _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2));
 
     // reading 2 strides of the next 16 bytes
     // (part of it was being read by earlier read)
-    srcReg32b2 = _mm256_castsi128_si256(
-                 _mm_loadu_si128((const __m128i *)(src_ptr + 5)));
-    srcReg32b2 = _mm256_inserti128_si256(srcReg32b2,
-                 _mm_loadu_si128((const __m128i *)
-                 (src_ptr+src_pixels_per_line+5)), 1);
+    srcReg32b2 =
+        _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src_ptr + 5)));
+    srcReg32b2 = _mm256_inserti128_si256(
+        srcReg32b2,
+        _mm_loadu_si128((const __m128i *)(src_ptr + src_pixels_per_line + 5)),
+        1);
 
     // add and saturate the results together
-    srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1,
-                       _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2));
+    srcRegFilt32b1_1 = _mm256_adds_epi16(
+        srcRegFilt32b1_1, _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2));
 
     // filter the source buffer
     srcRegFilt32b2_1 = _mm256_shuffle_epi8(srcReg32b2, filt1Reg);
@@ -166,19 +160,18 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr,
     srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, srcRegFilt32b2);
 
     // filter the source buffer
-    srcRegFilt32b3= _mm256_shuffle_epi8(srcReg32b2, filt2Reg);
-    srcRegFilt32b2= _mm256_shuffle_epi8(srcReg32b2, filt3Reg);
+    srcRegFilt32b3 = _mm256_shuffle_epi8(srcReg32b2, filt2Reg);
+    srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b2, filt3Reg);
 
     // multiply 2 adjacent elements with the filter and add the result
     srcRegFilt32b3 = _mm256_maddubs_epi16(srcRegFilt32b3, secondFilters);
     srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, thirdFilters);
 
     // add and saturate the results together
-    srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1,
-                       _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2));
-    srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1,
-                       _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2));
-
+    srcRegFilt32b2_1 = _mm256_adds_epi16(
+        srcRegFilt32b2_1, _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2));
+    srcRegFilt32b2_1 = _mm256_adds_epi16(
+        srcRegFilt32b2_1, _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2));
 
     srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, addFilterReg64);
 
@@ -191,19 +184,18 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr,
     // shrink to 8 bit each 16 bits, the first lane contain the first
     // convolve result and the second lane contain the second convolve
     // result
-    srcRegFilt32b1_1 = _mm256_packus_epi16(srcRegFilt32b1_1,
-                                           srcRegFilt32b2_1);
+    srcRegFilt32b1_1 = _mm256_packus_epi16(srcRegFilt32b1_1, srcRegFilt32b2_1);
 
-    src_ptr+=src_stride;
+    src_ptr += src_stride;
 
     // save 16 bytes
-    _mm_store_si128((__m128i*)output_ptr,
-    _mm256_castsi256_si128(srcRegFilt32b1_1));
+    _mm_store_si128((__m128i *)output_ptr,
+                    _mm256_castsi256_si128(srcRegFilt32b1_1));
 
     // save the next 16 bits
-    _mm_store_si128((__m128i*)(output_ptr+output_pitch),
-    _mm256_extractf128_si256(srcRegFilt32b1_1, 1));
-    output_ptr+=dst_stride;
+    _mm_store_si128((__m128i *)(output_ptr + output_pitch),
+                    _mm256_extractf128_si256(srcRegFilt32b1_1, 1));
+    output_ptr += dst_stride;
   }
 
   // if the number of strides is odd.
@@ -215,83 +207,74 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr,
     srcReg1 = _mm_loadu_si128((const __m128i *)(src_ptr - 3));
 
     // filter the source buffer
-    srcRegFilt1_1 = _mm_shuffle_epi8(srcReg1,
-                    _mm256_castsi256_si128(filt1Reg));
-    srcRegFilt2 = _mm_shuffle_epi8(srcReg1,
-                  _mm256_castsi256_si128(filt4Reg));
+    srcRegFilt1_1 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt1Reg));
+    srcRegFilt2 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt4Reg));
 
     // multiply 2 adjacent elements with the filter and add the result
-    srcRegFilt1_1 = _mm_maddubs_epi16(srcRegFilt1_1,
-                    _mm256_castsi256_si128(firstFilters));
-    srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2,
-                  _mm256_castsi256_si128(forthFilters));
+    srcRegFilt1_1 =
+        _mm_maddubs_epi16(srcRegFilt1_1, _mm256_castsi256_si128(firstFilters));
+    srcRegFilt2 =
+        _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(forthFilters));
 
     // add and saturate the results together
     srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, srcRegFilt2);
 
     // filter the source buffer
-    srcRegFilt3= _mm_shuffle_epi8(srcReg1,
-                 _mm256_castsi256_si128(filt2Reg));
-    srcRegFilt2= _mm_shuffle_epi8(srcReg1,
-                 _mm256_castsi256_si128(filt3Reg));
+    srcRegFilt3 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt2Reg));
+    srcRegFilt2 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt3Reg));
 
     // multiply 2 adjacent elements with the filter and add the result
-    srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3,
-                  _mm256_castsi256_si128(secondFilters));
-    srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2,
-                  _mm256_castsi256_si128(thirdFilters));
+    srcRegFilt3 =
+        _mm_maddubs_epi16(srcRegFilt3, _mm256_castsi256_si128(secondFilters));
+    srcRegFilt2 =
+        _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(thirdFilters));
 
     // add and saturate the results together
-    srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1,
-                    _mm_min_epi16(srcRegFilt3, srcRegFilt2));
+    srcRegFilt1_1 =
+        _mm_adds_epi16(srcRegFilt1_1, _mm_min_epi16(srcRegFilt3, srcRegFilt2));
 
     // reading the next 16 bytes
     // (part of it was being read by earlier read)
     srcReg2 = _mm_loadu_si128((const __m128i *)(src_ptr + 5));
 
     // add and saturate the results together
-    srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1,
-                    _mm_max_epi16(srcRegFilt3, srcRegFilt2));
+    srcRegFilt1_1 =
+        _mm_adds_epi16(srcRegFilt1_1, _mm_max_epi16(srcRegFilt3, srcRegFilt2));
 
     // filter the source buffer
-    srcRegFilt2_1 = _mm_shuffle_epi8(srcReg2,
-                    _mm256_castsi256_si128(filt1Reg));
-    srcRegFilt2 = _mm_shuffle_epi8(srcReg2,
-                  _mm256_castsi256_si128(filt4Reg));
+    srcRegFilt2_1 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt1Reg));
+    srcRegFilt2 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt4Reg));
 
     // multiply 2 adjacent elements with the filter and add the result
-    srcRegFilt2_1 = _mm_maddubs_epi16(srcRegFilt2_1,
-                    _mm256_castsi256_si128(firstFilters));
-    srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2,
-                  _mm256_castsi256_si128(forthFilters));
+    srcRegFilt2_1 =
+        _mm_maddubs_epi16(srcRegFilt2_1, _mm256_castsi256_si128(firstFilters));
+    srcRegFilt2 =
+        _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(forthFilters));
 
     // add and saturate the results together
     srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, srcRegFilt2);
 
     // filter the source buffer
-    srcRegFilt3 = _mm_shuffle_epi8(srcReg2,
-                  _mm256_castsi256_si128(filt2Reg));
-    srcRegFilt2 = _mm_shuffle_epi8(srcReg2,
-                  _mm256_castsi256_si128(filt3Reg));
+    srcRegFilt3 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt2Reg));
+    srcRegFilt2 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt3Reg));
 
     // multiply 2 adjacent elements with the filter and add the result
-    srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3,
-                  _mm256_castsi256_si128(secondFilters));
-    srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2,
-                  _mm256_castsi256_si128(thirdFilters));
+    srcRegFilt3 =
+        _mm_maddubs_epi16(srcRegFilt3, _mm256_castsi256_si128(secondFilters));
+    srcRegFilt2 =
+        _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(thirdFilters));
 
     // add and saturate the results together
-    srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1,
-                    _mm_min_epi16(srcRegFilt3, srcRegFilt2));
-    srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1,
-                    _mm_max_epi16(srcRegFilt3, srcRegFilt2));
-
+    srcRegFilt2_1 =
+        _mm_adds_epi16(srcRegFilt2_1, _mm_min_epi16(srcRegFilt3, srcRegFilt2));
+    srcRegFilt2_1 =
+        _mm_adds_epi16(srcRegFilt2_1, _mm_max_epi16(srcRegFilt3, srcRegFilt2));
 
-    srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1,
-                    _mm256_castsi256_si128(addFilterReg64));
+    srcRegFilt1_1 =
+        _mm_adds_epi16(srcRegFilt1_1, _mm256_castsi256_si128(addFilterReg64));
 
-    srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1,
-                    _mm256_castsi256_si128(addFilterReg64));
+    srcRegFilt2_1 =
+        _mm_adds_epi16(srcRegFilt2_1, _mm256_castsi256_si128(addFilterReg64));
 
     // shift by 7 bit each 16 bit
     srcRegFilt1_1 = _mm_srai_epi16(srcRegFilt1_1, 7);
@@ -303,16 +286,13 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr,
     srcRegFilt1_1 = _mm_packus_epi16(srcRegFilt1_1, srcRegFilt2_1);
 
     // save 16 bytes
-    _mm_store_si128((__m128i*)output_ptr, srcRegFilt1_1);
+    _mm_store_si128((__m128i *)output_ptr, srcRegFilt1_1);
   }
 }
 
-static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr,
-                                         ptrdiff_t src_pitch,
-                                         uint8_t *output_ptr,
-                                         ptrdiff_t out_pitch,
-                                         uint32_t output_height,
-                                         const int16_t *filter) {
+static void vpx_filter_block1d16_v8_avx2(
+    const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
+    ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i filtersReg;
   __m256i addFilterReg64;
   __m256i srcReg32b1, srcReg32b2, srcReg32b3, srcReg32b4, srcReg32b5;
@@ -327,60 +307,56 @@ static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr,
   filtersReg = _mm_loadu_si128((const __m128i *)filter);
   // converting the 16 bit (short) to  8 bit (byte) and have the
   // same data in both lanes of 128 bit register.
-  filtersReg =_mm_packs_epi16(filtersReg, filtersReg);
+  filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
   // have the same data in both lanes of a 256 bit register
   filtersReg32 = MM256_BROADCASTSI128_SI256(filtersReg);
 
   // duplicate only the first 16 bits (first and second byte)
   // across 256 bit register
-  firstFilters = _mm256_shuffle_epi8(filtersReg32,
-                 _mm256_set1_epi16(0x100u));
+  firstFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x100u));
   // duplicate only the second 16 bits (third and forth byte)
   // across 256 bit register
-  secondFilters = _mm256_shuffle_epi8(filtersReg32,
-                  _mm256_set1_epi16(0x302u));
+  secondFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x302u));
   // duplicate only the third 16 bits (fifth and sixth byte)
   // across 256 bit register
-  thirdFilters = _mm256_shuffle_epi8(filtersReg32,
-                 _mm256_set1_epi16(0x504u));
+  thirdFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x504u));
   // duplicate only the forth 16 bits (seventh and eighth byte)
   // across 256 bit register
-  forthFilters = _mm256_shuffle_epi8(filtersReg32,
-                 _mm256_set1_epi16(0x706u));
+  forthFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x706u));
 
   // multiple the size of the source and destination stride by two
   src_stride = src_pitch << 1;
   dst_stride = out_pitch << 1;
 
   // load 16 bytes 7 times in stride of src_pitch
-  srcReg32b1 = _mm256_castsi128_si256(
-               _mm_loadu_si128((const __m128i *)(src_ptr)));
+  srcReg32b1 =
+      _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src_ptr)));
   srcReg32b2 = _mm256_castsi128_si256(
-               _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch)));
+      _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch)));
   srcReg32b3 = _mm256_castsi128_si256(
-               _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 2)));
+      _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 2)));
   srcReg32b4 = _mm256_castsi128_si256(
-               _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 3)));
+      _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 3)));
   srcReg32b5 = _mm256_castsi128_si256(
-               _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 4)));
+      _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 4)));
   srcReg32b6 = _mm256_castsi128_si256(
-               _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 5)));
+      _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 5)));
   srcReg32b7 = _mm256_castsi128_si256(
-               _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 6)));
+      _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 6)));
 
   // have each consecutive loads on the same 256 register
   srcReg32b1 = _mm256_inserti128_si256(srcReg32b1,
-               _mm256_castsi256_si128(srcReg32b2), 1);
+                                       _mm256_castsi256_si128(srcReg32b2), 1);
   srcReg32b2 = _mm256_inserti128_si256(srcReg32b2,
-               _mm256_castsi256_si128(srcReg32b3), 1);
+                                       _mm256_castsi256_si128(srcReg32b3), 1);
   srcReg32b3 = _mm256_inserti128_si256(srcReg32b3,
-               _mm256_castsi256_si128(srcReg32b4), 1);
+                                       _mm256_castsi256_si128(srcReg32b4), 1);
   srcReg32b4 = _mm256_inserti128_si256(srcReg32b4,
-               _mm256_castsi256_si128(srcReg32b5), 1);
+                                       _mm256_castsi256_si128(srcReg32b5), 1);
   srcReg32b5 = _mm256_inserti128_si256(srcReg32b5,
-               _mm256_castsi256_si128(srcReg32b6), 1);
+                                       _mm256_castsi256_si128(srcReg32b6), 1);
   srcReg32b6 = _mm256_inserti128_si256(srcReg32b6,
-               _mm256_castsi256_si128(srcReg32b7), 1);
+                                       _mm256_castsi256_si128(srcReg32b7), 1);
 
   // merge every two consecutive registers except the last one
   srcReg32b10 = _mm256_unpacklo_epi8(srcReg32b1, srcReg32b2);
@@ -398,89 +374,87 @@ static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr,
   // save
   srcReg32b5 = _mm256_unpackhi_epi8(srcReg32b5, srcReg32b6);
 
+  for (i = output_height; i > 1; i -= 2) {
+    // load the last 2 loads of 16 bytes and have every two
+    // consecutive loads in the same 256 bit register
+    srcReg32b8 = _mm256_castsi128_si256(
+        _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 7)));
+    srcReg32b7 = _mm256_inserti128_si256(srcReg32b7,
+                                         _mm256_castsi256_si128(srcReg32b8), 1);
+    srcReg32b9 = _mm256_castsi128_si256(
+        _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 8)));
+    srcReg32b8 = _mm256_inserti128_si256(srcReg32b8,
+                                         _mm256_castsi256_si128(srcReg32b9), 1);
+
+    // merge every two consecutive registers
+    // save
+    srcReg32b4 = _mm256_unpacklo_epi8(srcReg32b7, srcReg32b8);
+    srcReg32b7 = _mm256_unpackhi_epi8(srcReg32b7, srcReg32b8);
+
+    // multiply 2 adjacent elements with the filter and add the result
+    srcReg32b10 = _mm256_maddubs_epi16(srcReg32b10, firstFilters);
+    srcReg32b6 = _mm256_maddubs_epi16(srcReg32b4, forthFilters);
+
+    // add and saturate the results together
+    srcReg32b10 = _mm256_adds_epi16(srcReg32b10, srcReg32b6);
+
+    // multiply 2 adjacent elements with the filter and add the result
+    srcReg32b8 = _mm256_maddubs_epi16(srcReg32b11, secondFilters);
+    srcReg32b12 = _mm256_maddubs_epi16(srcReg32b2, thirdFilters);
+
+    // add and saturate the results together
+    srcReg32b10 = _mm256_adds_epi16(srcReg32b10,
+                                    _mm256_min_epi16(srcReg32b8, srcReg32b12));
+    srcReg32b10 = _mm256_adds_epi16(srcReg32b10,
+                                    _mm256_max_epi16(srcReg32b8, srcReg32b12));
+
+    // multiply 2 adjacent elements with the filter and add the result
+    srcReg32b1 = _mm256_maddubs_epi16(srcReg32b1, firstFilters);
+    srcReg32b6 = _mm256_maddubs_epi16(srcReg32b7, forthFilters);
+
+    srcReg32b1 = _mm256_adds_epi16(srcReg32b1, srcReg32b6);
+
+    // multiply 2 adjacent elements with the filter and add the result
+    srcReg32b8 = _mm256_maddubs_epi16(srcReg32b3, secondFilters);
+    srcReg32b12 = _mm256_maddubs_epi16(srcReg32b5, thirdFilters);
+
+    // add and saturate the results together
+    srcReg32b1 = _mm256_adds_epi16(srcReg32b1,
+                                   _mm256_min_epi16(srcReg32b8, srcReg32b12));
+    srcReg32b1 = _mm256_adds_epi16(srcReg32b1,
+                                   _mm256_max_epi16(srcReg32b8, srcReg32b12));
+
+    srcReg32b10 = _mm256_adds_epi16(srcReg32b10, addFilterReg64);
+    srcReg32b1 = _mm256_adds_epi16(srcReg32b1, addFilterReg64);
+
+    // shift by 7 bit each 16 bit
+    srcReg32b10 = _mm256_srai_epi16(srcReg32b10, 7);
+    srcReg32b1 = _mm256_srai_epi16(srcReg32b1, 7);
 
-  for (i = output_height; i > 1; i-=2) {
-     // load the last 2 loads of 16 bytes and have every two
-     // consecutive loads in the same 256 bit register
-     srcReg32b8 = _mm256_castsi128_si256(
-     _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 7)));
-     srcReg32b7 = _mm256_inserti128_si256(srcReg32b7,
-     _mm256_castsi256_si128(srcReg32b8), 1);
-     srcReg32b9 = _mm256_castsi128_si256(
-     _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 8)));
-     srcReg32b8 = _mm256_inserti128_si256(srcReg32b8,
-     _mm256_castsi256_si128(srcReg32b9), 1);
-
-     // merge every two consecutive registers
-     // save
-     srcReg32b4 = _mm256_unpacklo_epi8(srcReg32b7, srcReg32b8);
-     srcReg32b7 = _mm256_unpackhi_epi8(srcReg32b7, srcReg32b8);
-
-     // multiply 2 adjacent elements with the filter and add the result
-     srcReg32b10 = _mm256_maddubs_epi16(srcReg32b10, firstFilters);
-     srcReg32b6 = _mm256_maddubs_epi16(srcReg32b4, forthFilters);
-
-     // add and saturate the results together
-     srcReg32b10 = _mm256_adds_epi16(srcReg32b10, srcReg32b6);
-
-     // multiply 2 adjacent elements with the filter and add the result
-     srcReg32b8 = _mm256_maddubs_epi16(srcReg32b11, secondFilters);
-     srcReg32b12 = _mm256_maddubs_epi16(srcReg32b2, thirdFilters);
-
-     // add and saturate the results together
-     srcReg32b10 = _mm256_adds_epi16(srcReg32b10,
-                   _mm256_min_epi16(srcReg32b8, srcReg32b12));
-     srcReg32b10 = _mm256_adds_epi16(srcReg32b10,
-                   _mm256_max_epi16(srcReg32b8, srcReg32b12));
-
-     // multiply 2 adjacent elements with the filter and add the result
-     srcReg32b1 = _mm256_maddubs_epi16(srcReg32b1, firstFilters);
-     srcReg32b6 = _mm256_maddubs_epi16(srcReg32b7, forthFilters);
-
-     srcReg32b1 = _mm256_adds_epi16(srcReg32b1, srcReg32b6);
-
-     // multiply 2 adjacent elements with the filter and add the result
-     srcReg32b8 = _mm256_maddubs_epi16(srcReg32b3, secondFilters);
-     srcReg32b12 = _mm256_maddubs_epi16(srcReg32b5, thirdFilters);
-
-     // add and saturate the results together
-     srcReg32b1 = _mm256_adds_epi16(srcReg32b1,
-                  _mm256_min_epi16(srcReg32b8, srcReg32b12));
-     srcReg32b1 = _mm256_adds_epi16(srcReg32b1,
-                  _mm256_max_epi16(srcReg32b8, srcReg32b12));
-
-     srcReg32b10 = _mm256_adds_epi16(srcReg32b10, addFilterReg64);
-     srcReg32b1 = _mm256_adds_epi16(srcReg32b1, addFilterReg64);
-
-     // shift by 7 bit each 16 bit
-     srcReg32b10 = _mm256_srai_epi16(srcReg32b10, 7);
-     srcReg32b1 = _mm256_srai_epi16(srcReg32b1, 7);
-
-     // shrink to 8 bit each 16 bits, the first lane contain the first
-     // convolve result and the second lane contain the second convolve
-     // result
-     srcReg32b1 = _mm256_packus_epi16(srcReg32b10, srcReg32b1);
-
-     src_ptr+=src_stride;
-
-     // save 16 bytes
-     _mm_store_si128((__m128i*)output_ptr,
-     _mm256_castsi256_si128(srcReg32b1));
-
-     // save the next 16 bits
-     _mm_store_si128((__m128i*)(output_ptr+out_pitch),
-     _mm256_extractf128_si256(srcReg32b1, 1));
-
-     output_ptr+=dst_stride;
-
-     // save part of the registers for next strides
-     srcReg32b10 = srcReg32b11;
-     srcReg32b1 = srcReg32b3;
-     srcReg32b11 = srcReg32b2;
-     srcReg32b3 = srcReg32b5;
-     srcReg32b2 = srcReg32b4;
-     srcReg32b5 = srcReg32b7;
-     srcReg32b7 = srcReg32b9;
+    // shrink to 8 bit each 16 bits, the first lane contain the first
+    // convolve result and the second lane contain the second convolve
+    // result
+    srcReg32b1 = _mm256_packus_epi16(srcReg32b10, srcReg32b1);
+
+    src_ptr += src_stride;
+
+    // save 16 bytes
+    _mm_store_si128((__m128i *)output_ptr, _mm256_castsi256_si128(srcReg32b1));
+
+    // save the next 16 bits
+    _mm_store_si128((__m128i *)(output_ptr + out_pitch),
+                    _mm256_extractf128_si256(srcReg32b1, 1));
+
+    output_ptr += dst_stride;
+
+    // save part of the registers for next strides
+    srcReg32b10 = srcReg32b11;
+    srcReg32b1 = srcReg32b3;
+    srcReg32b11 = srcReg32b2;
+    srcReg32b3 = srcReg32b5;
+    srcReg32b2 = srcReg32b4;
+    srcReg32b5 = srcReg32b7;
+    srcReg32b7 = srcReg32b9;
   }
   if (i > 0) {
     __m128i srcRegFilt1, srcRegFilt3, srcRegFilt4, srcRegFilt5;
@@ -489,55 +463,53 @@ static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr,
     srcRegFilt8 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 7));
 
     // merge the last 2 results together
-    srcRegFilt4 = _mm_unpacklo_epi8(
-                  _mm256_castsi256_si128(srcReg32b7), srcRegFilt8);
-    srcRegFilt7 = _mm_unpackhi_epi8(
-                  _mm256_castsi256_si128(srcReg32b7), srcRegFilt8);
+    srcRegFilt4 =
+        _mm_unpacklo_epi8(_mm256_castsi256_si128(srcReg32b7), srcRegFilt8);
+    srcRegFilt7 =
+        _mm_unpackhi_epi8(_mm256_castsi256_si128(srcReg32b7), srcRegFilt8);
 
     // multiply 2 adjacent elements with the filter and add the result
     srcRegFilt1 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b10),
-                  _mm256_castsi256_si128(firstFilters));
-    srcRegFilt4 = _mm_maddubs_epi16(srcRegFilt4,
-                  _mm256_castsi256_si128(forthFilters));
+                                    _mm256_castsi256_si128(firstFilters));
+    srcRegFilt4 =
+        _mm_maddubs_epi16(srcRegFilt4, _mm256_castsi256_si128(forthFilters));
     srcRegFilt3 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b1),
-                  _mm256_castsi256_si128(firstFilters));
-    srcRegFilt7 = _mm_maddubs_epi16(srcRegFilt7,
-                  _mm256_castsi256_si128(forthFilters));
+                                    _mm256_castsi256_si128(firstFilters));
+    srcRegFilt7 =
+        _mm_maddubs_epi16(srcRegFilt7, _mm256_castsi256_si128(forthFilters));
 
     // add and saturate the results together
     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4);
     srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, srcRegFilt7);
 
-
     // multiply 2 adjacent elements with the filter and add the result
     srcRegFilt4 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b11),
-                  _mm256_castsi256_si128(secondFilters));
+                                    _mm256_castsi256_si128(secondFilters));
     srcRegFilt5 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b3),
-                  _mm256_castsi256_si128(secondFilters));
+                                    _mm256_castsi256_si128(secondFilters));
 
     // multiply 2 adjacent elements with the filter and add the result
     srcRegFilt6 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b2),
-                  _mm256_castsi256_si128(thirdFilters));
+                                    _mm256_castsi256_si128(thirdFilters));
     srcRegFilt7 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b5),
-                  _mm256_castsi256_si128(thirdFilters));
+                                    _mm256_castsi256_si128(thirdFilters));
 
     // add and saturate the results together
-    srcRegFilt1 = _mm_adds_epi16(srcRegFilt1,
-                  _mm_min_epi16(srcRegFilt4, srcRegFilt6));
-    srcRegFilt3 = _mm_adds_epi16(srcRegFilt3,
-                  _mm_min_epi16(srcRegFilt5, srcRegFilt7));
+    srcRegFilt1 =
+        _mm_adds_epi16(srcRegFilt1, _mm_min_epi16(srcRegFilt4, srcRegFilt6));
+    srcRegFilt3 =
+        _mm_adds_epi16(srcRegFilt3, _mm_min_epi16(srcRegFilt5, srcRegFilt7));
 
     // add and saturate the results together
-    srcRegFilt1 = _mm_adds_epi16(srcRegFilt1,
-                  _mm_max_epi16(srcRegFilt4, srcRegFilt6));
-    srcRegFilt3 = _mm_adds_epi16(srcRegFilt3,
-                  _mm_max_epi16(srcRegFilt5, srcRegFilt7));
-
+    srcRegFilt1 =
+        _mm_adds_epi16(srcRegFilt1, _mm_max_epi16(srcRegFilt4, srcRegFilt6));
+    srcRegFilt3 =
+        _mm_adds_epi16(srcRegFilt3, _mm_max_epi16(srcRegFilt5, srcRegFilt7));
 
-    srcRegFilt1 = _mm_adds_epi16(srcRegFilt1,
-                  _mm256_castsi256_si128(addFilterReg64));
-    srcRegFilt3 = _mm_adds_epi16(srcRegFilt3,
-                  _mm256_castsi256_si128(addFilterReg64));
+    srcRegFilt1 =
+        _mm_adds_epi16(srcRegFilt1, _mm256_castsi256_si128(addFilterReg64));
+    srcRegFilt3 =
+        _mm_adds_epi16(srcRegFilt3, _mm256_castsi256_si128(addFilterReg64));
 
     // shift by 7 bit each 16 bit
     srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7);
@@ -549,7 +521,7 @@ static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr,
     srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt3);
 
     // save 16 bytes
-    _mm_store_si128((__m128i*)output_ptr, srcRegFilt1);
+    _mm_store_si128((__m128i *)output_ptr, srcRegFilt1);
   }
 }
 
@@ -579,10 +551,10 @@ filter8_1dfunction vpx_filter_block1d4_h2_ssse3;
 #define vpx_filter_block1d4_v8_avx2 vpx_filter_block1d4_v8_ssse3
 #define vpx_filter_block1d16_v2_avx2 vpx_filter_block1d16_v2_ssse3
 #define vpx_filter_block1d16_h2_avx2 vpx_filter_block1d16_h2_ssse3
-#define vpx_filter_block1d8_v2_avx2  vpx_filter_block1d8_v2_ssse3
-#define vpx_filter_block1d8_h2_avx2  vpx_filter_block1d8_h2_ssse3
-#define vpx_filter_block1d4_v2_avx2  vpx_filter_block1d4_v2_ssse3
-#define vpx_filter_block1d4_h2_avx2  vpx_filter_block1d4_h2_ssse3
+#define vpx_filter_block1d8_v2_avx2 vpx_filter_block1d8_v2_ssse3
+#define vpx_filter_block1d8_h2_avx2 vpx_filter_block1d8_h2_ssse3
+#define vpx_filter_block1d4_v2_avx2 vpx_filter_block1d4_v2_ssse3
+#define vpx_filter_block1d4_h2_avx2 vpx_filter_block1d4_h2_ssse3
 // void vpx_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride,
 //                                uint8_t *dst, ptrdiff_t dst_stride,
 //                                const int16_t *filter_x, int x_step_q4,
diff --git a/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c b/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
index deec0799726ad8a8cf3e5d1be5e0be15fa6756e1..7bf7debccf088d4953f7a40bededfa761ee273a8 100644
--- a/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
+++ b/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
@@ -52,23 +52,20 @@ filter8_1dfunction vpx_filter_block1d8_v8_intrin_ssse3;
 filter8_1dfunction vpx_filter_block1d8_h8_intrin_ssse3;
 filter8_1dfunction vpx_filter_block1d4_h8_intrin_ssse3;
 
-void vpx_filter_block1d4_h8_intrin_ssse3(const uint8_t *src_ptr,
-                                         ptrdiff_t src_pixels_per_line,
-                                         uint8_t *output_ptr,
-                                         ptrdiff_t output_pitch,
-                                         uint32_t output_height,
-                                         const int16_t *filter) {
+void vpx_filter_block1d4_h8_intrin_ssse3(
+    const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
+    ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i firstFilters, secondFilters, shuffle1, shuffle2;
   __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt4;
   __m128i addFilterReg64, filtersReg, srcReg, minReg;
   unsigned int i;
 
   // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64
-  addFilterReg64 =_mm_set1_epi32((int)0x0400040u);
+  addFilterReg64 = _mm_set1_epi32((int)0x0400040u);
   filtersReg = _mm_loadu_si128((const __m128i *)filter);
   // converting the 16 bit (short) to  8 bit (byte) and have the same data
   // in both lanes of 128 bit register.
-  filtersReg =_mm_packs_epi16(filtersReg, filtersReg);
+  filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
 
   // duplicate only the first 16 bits in the filter into the first lane
   firstFilters = _mm_shufflelo_epi16(filtersReg, 0);
@@ -82,23 +79,23 @@ void vpx_filter_block1d4_h8_intrin_ssse3(const uint8_t *src_ptr,
   secondFilters = _mm_shufflehi_epi16(secondFilters, 0xFFu);
 
   // loading the local filters
-  shuffle1 =_mm_load_si128((__m128i const *)filt1_4_h8);
+  shuffle1 = _mm_load_si128((__m128i const *)filt1_4_h8);
   shuffle2 = _mm_load_si128((__m128i const *)filt2_4_h8);
 
   for (i = 0; i < output_height; i++) {
     srcReg = _mm_loadu_si128((const __m128i *)(src_ptr - 3));
 
     // filter the source buffer
-    srcRegFilt1= _mm_shuffle_epi8(srcReg, shuffle1);
-    srcRegFilt2= _mm_shuffle_epi8(srcReg, shuffle2);
+    srcRegFilt1 = _mm_shuffle_epi8(srcReg, shuffle1);
+    srcRegFilt2 = _mm_shuffle_epi8(srcReg, shuffle2);
 
     // multiply 2 adjacent elements with the filter and add the result
     srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters);
     srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters);
 
     // extract the higher half of the lane
-    srcRegFilt3 =  _mm_srli_si128(srcRegFilt1, 8);
-    srcRegFilt4 =  _mm_srli_si128(srcRegFilt2, 8);
+    srcRegFilt3 = _mm_srli_si128(srcRegFilt1, 8);
+    srcRegFilt4 = _mm_srli_si128(srcRegFilt2, 8);
 
     minReg = _mm_min_epi16(srcRegFilt3, srcRegFilt2);
 
@@ -114,21 +111,18 @@ void vpx_filter_block1d4_h8_intrin_ssse3(const uint8_t *src_ptr,
 
     // shrink to 8 bit each 16 bits
     srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1);
-    src_ptr+=src_pixels_per_line;
+    src_ptr += src_pixels_per_line;
 
     // save only 4 bytes
-    *((int*)&output_ptr[0])= _mm_cvtsi128_si32(srcRegFilt1);
+    *((int *)&output_ptr[0]) = _mm_cvtsi128_si32(srcRegFilt1);
 
-    output_ptr+=output_pitch;
+    output_ptr += output_pitch;
   }
 }
 
-void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr,
-                                         ptrdiff_t src_pixels_per_line,
-                                         uint8_t *output_ptr,
-                                         ptrdiff_t output_pitch,
-                                         uint32_t output_height,
-                                         const int16_t *filter) {
+void vpx_filter_block1d8_h8_intrin_ssse3(
+    const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
+    ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i firstFilters, secondFilters, thirdFilters, forthFilters, srcReg;
   __m128i filt1Reg, filt2Reg, filt3Reg, filt4Reg;
   __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt4;
@@ -140,7 +134,7 @@ void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr,
   filtersReg = _mm_loadu_si128((const __m128i *)filter);
   // converting the 16 bit (short) to  8 bit (byte) and have the same data
   // in both lanes of 128 bit register.
-  filtersReg =_mm_packs_epi16(filtersReg, filtersReg);
+  filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
 
   // duplicate only the first 16 bits (first and second byte)
   // across 128 bit register
@@ -164,16 +158,16 @@ void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr,
     srcReg = _mm_loadu_si128((const __m128i *)(src_ptr - 3));
 
     // filter the source buffer
-    srcRegFilt1= _mm_shuffle_epi8(srcReg, filt1Reg);
-    srcRegFilt2= _mm_shuffle_epi8(srcReg, filt2Reg);
+    srcRegFilt1 = _mm_shuffle_epi8(srcReg, filt1Reg);
+    srcRegFilt2 = _mm_shuffle_epi8(srcReg, filt2Reg);
 
     // multiply 2 adjacent elements with the filter and add the result
     srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters);
     srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters);
 
     // filter the source buffer
-    srcRegFilt3= _mm_shuffle_epi8(srcReg, filt3Reg);
-    srcRegFilt4= _mm_shuffle_epi8(srcReg, filt4Reg);
+    srcRegFilt3 = _mm_shuffle_epi8(srcReg, filt3Reg);
+    srcRegFilt4 = _mm_shuffle_epi8(srcReg, filt4Reg);
 
     // multiply 2 adjacent elements with the filter and add the result
     srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, thirdFilters);
@@ -183,7 +177,7 @@ void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr,
     minReg = _mm_min_epi16(srcRegFilt2, srcRegFilt3);
     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4);
 
-    srcRegFilt2= _mm_max_epi16(srcRegFilt2, srcRegFilt3);
+    srcRegFilt2 = _mm_max_epi16(srcRegFilt2, srcRegFilt3);
     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, minReg);
     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt2);
     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, addFilterReg64);
@@ -194,21 +188,18 @@ void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr,
     // shrink to 8 bit each 16 bits
     srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1);
 
-    src_ptr+=src_pixels_per_line;
+    src_ptr += src_pixels_per_line;
 
     // save only 8 bytes
-    _mm_storel_epi64((__m128i*)&output_ptr[0], srcRegFilt1);
+    _mm_storel_epi64((__m128i *)&output_ptr[0], srcRegFilt1);
 
-    output_ptr+=output_pitch;
+    output_ptr += output_pitch;
   }
 }
 
-void vpx_filter_block1d8_v8_intrin_ssse3(const uint8_t *src_ptr,
-                                         ptrdiff_t src_pitch,
-                                         uint8_t *output_ptr,
-                                         ptrdiff_t out_pitch,
-                                         uint32_t output_height,
-                                         const int16_t *filter) {
+void vpx_filter_block1d8_v8_intrin_ssse3(
+    const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
+    ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
   __m128i addFilterReg64, filtersReg, minReg;
   __m128i firstFilters, secondFilters, thirdFilters, forthFilters;
   __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt5;
@@ -221,7 +212,7 @@ void vpx_filter_block1d8_v8_intrin_ssse3(const uint8_t *src_ptr,
   filtersReg = _mm_loadu_si128((const __m128i *)filter);
   // converting the 16 bit (short) to  8 bit (byte) and have the same data
   // in both lanes of 128 bit register.
-  filtersReg =_mm_packs_epi16(filtersReg, filtersReg);
+  filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
 
   // duplicate only the first 16 bits in the filter
   firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x100u));
@@ -273,7 +264,7 @@ void vpx_filter_block1d8_v8_intrin_ssse3(const uint8_t *src_ptr,
     // shrink to 8 bit each 16 bits
     srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1);
 
-    src_ptr+=src_pitch;
+    src_ptr += src_pitch;
 
     // shift down a row
     srcReg1 = srcReg2;
@@ -285,9 +276,9 @@ void vpx_filter_block1d8_v8_intrin_ssse3(const uint8_t *src_ptr,
     srcReg7 = srcReg8;
 
     // save only 8 bytes convolve result
-    _mm_storel_epi64((__m128i*)&output_ptr[0], srcRegFilt1);
+    _mm_storel_epi64((__m128i *)&output_ptr[0], srcRegFilt1);
 
-    output_ptr+=out_pitch;
+    output_ptr += out_pitch;
   }
 }
 
@@ -343,32 +334,33 @@ FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, ssse3);
 FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
             ssse3);
 
-#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7,           \
-                      out0, out1, out2, out3, out4, out5, out6, out7) { \
-  const __m128i tr0_0 = _mm_unpacklo_epi8(in0, in1);                    \
-  const __m128i tr0_1 = _mm_unpacklo_epi8(in2, in3);                    \
-  const __m128i tr0_2 = _mm_unpacklo_epi8(in4, in5);                    \
-  const __m128i tr0_3 = _mm_unpacklo_epi8(in6, in7);                    \
-                                                                        \
-  const __m128i tr1_0 = _mm_unpacklo_epi16(tr0_0, tr0_1);               \
-  const __m128i tr1_1 = _mm_unpackhi_epi16(tr0_0, tr0_1);               \
-  const __m128i tr1_2 = _mm_unpacklo_epi16(tr0_2, tr0_3);               \
-  const __m128i tr1_3 = _mm_unpackhi_epi16(tr0_2, tr0_3);               \
-                                                                        \
-  const __m128i tr2_0 = _mm_unpacklo_epi32(tr1_0, tr1_2);               \
-  const __m128i tr2_1 = _mm_unpackhi_epi32(tr1_0, tr1_2);               \
-  const __m128i tr2_2 = _mm_unpacklo_epi32(tr1_1, tr1_3);               \
-  const __m128i tr2_3 = _mm_unpackhi_epi32(tr1_1, tr1_3);               \
-                                                                        \
-  out0 = _mm_unpacklo_epi64(tr2_0, tr2_0);                              \
-  out1 = _mm_unpackhi_epi64(tr2_0, tr2_0);                              \
-  out2 = _mm_unpacklo_epi64(tr2_1, tr2_1);                              \
-  out3 = _mm_unpackhi_epi64(tr2_1, tr2_1);                              \
-  out4 = _mm_unpacklo_epi64(tr2_2, tr2_2);                              \
-  out5 = _mm_unpackhi_epi64(tr2_2, tr2_2);                              \
-  out6 = _mm_unpacklo_epi64(tr2_3, tr2_3);                              \
-  out7 = _mm_unpackhi_epi64(tr2_3, tr2_3);                              \
-}
+#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+                      out2, out3, out4, out5, out6, out7)                 \
+  {                                                                       \
+    const __m128i tr0_0 = _mm_unpacklo_epi8(in0, in1);                    \
+    const __m128i tr0_1 = _mm_unpacklo_epi8(in2, in3);                    \
+    const __m128i tr0_2 = _mm_unpacklo_epi8(in4, in5);                    \
+    const __m128i tr0_3 = _mm_unpacklo_epi8(in6, in7);                    \
+                                                                          \
+    const __m128i tr1_0 = _mm_unpacklo_epi16(tr0_0, tr0_1);               \
+    const __m128i tr1_1 = _mm_unpackhi_epi16(tr0_0, tr0_1);               \
+    const __m128i tr1_2 = _mm_unpacklo_epi16(tr0_2, tr0_3);               \
+    const __m128i tr1_3 = _mm_unpackhi_epi16(tr0_2, tr0_3);               \
+                                                                          \
+    const __m128i tr2_0 = _mm_unpacklo_epi32(tr1_0, tr1_2);               \
+    const __m128i tr2_1 = _mm_unpackhi_epi32(tr1_0, tr1_2);               \
+    const __m128i tr2_2 = _mm_unpacklo_epi32(tr1_1, tr1_3);               \
+    const __m128i tr2_3 = _mm_unpackhi_epi32(tr1_1, tr1_3);               \
+                                                                          \
+    out0 = _mm_unpacklo_epi64(tr2_0, tr2_0);                              \
+    out1 = _mm_unpackhi_epi64(tr2_0, tr2_0);                              \
+    out2 = _mm_unpacklo_epi64(tr2_1, tr2_1);                              \
+    out3 = _mm_unpackhi_epi64(tr2_1, tr2_1);                              \
+    out4 = _mm_unpacklo_epi64(tr2_2, tr2_2);                              \
+    out5 = _mm_unpackhi_epi64(tr2_2, tr2_2);                              \
+    out6 = _mm_unpacklo_epi64(tr2_3, tr2_3);                              \
+    out7 = _mm_unpackhi_epi64(tr2_3, tr2_3);                              \
+  }
 
 static void filter_horiz_w8_ssse3(const uint8_t *src_x, ptrdiff_t src_pitch,
                                   uint8_t *dst, const int16_t *x_filter) {
@@ -424,7 +416,7 @@ static void filter_horiz_w8_ssse3(const uint8_t *src_x, ptrdiff_t src_pitch,
   // shrink to 8 bit each 16 bits
   temp = _mm_packus_epi16(temp, temp);
   // save only 8 bytes convolve result
-  _mm_storel_epi64((__m128i*)dst, temp);
+  _mm_storel_epi64((__m128i *)dst, temp);
 }
 
 static void transpose8x8_to_dst(const uint8_t *src, ptrdiff_t src_stride,
@@ -440,23 +432,22 @@ static void transpose8x8_to_dst(const uint8_t *src, ptrdiff_t src_stride,
   G = _mm_loadl_epi64((const __m128i *)(src + src_stride * 6));
   H = _mm_loadl_epi64((const __m128i *)(src + src_stride * 7));
 
-  TRANSPOSE_8X8(A, B, C, D, E, F, G, H,
-                A, B, C, D, E, F, G, H);
-
-  _mm_storel_epi64((__m128i*)dst, A);
-  _mm_storel_epi64((__m128i*)(dst + dst_stride * 1), B);
-  _mm_storel_epi64((__m128i*)(dst + dst_stride * 2), C);
-  _mm_storel_epi64((__m128i*)(dst + dst_stride * 3), D);
-  _mm_storel_epi64((__m128i*)(dst + dst_stride * 4), E);
-  _mm_storel_epi64((__m128i*)(dst + dst_stride * 5), F);
-  _mm_storel_epi64((__m128i*)(dst + dst_stride * 6), G);
-  _mm_storel_epi64((__m128i*)(dst + dst_stride * 7), H);
+  TRANSPOSE_8X8(A, B, C, D, E, F, G, H, A, B, C, D, E, F, G, H);
+
+  _mm_storel_epi64((__m128i *)dst, A);
+  _mm_storel_epi64((__m128i *)(dst + dst_stride * 1), B);
+  _mm_storel_epi64((__m128i *)(dst + dst_stride * 2), C);
+  _mm_storel_epi64((__m128i *)(dst + dst_stride * 3), D);
+  _mm_storel_epi64((__m128i *)(dst + dst_stride * 4), E);
+  _mm_storel_epi64((__m128i *)(dst + dst_stride * 5), F);
+  _mm_storel_epi64((__m128i *)(dst + dst_stride * 6), G);
+  _mm_storel_epi64((__m128i *)(dst + dst_stride * 7), H);
 }
 
 static void scaledconvolve_horiz_w8(const uint8_t *src, ptrdiff_t src_stride,
                                     uint8_t *dst, ptrdiff_t dst_stride,
-                                    const InterpKernel *x_filters,
-                                    int x0_q4, int x_step_q4, int w, int h) {
+                                    const InterpKernel *x_filters, int x0_q4,
+                                    int x_step_q4, int w, int h) {
   DECLARE_ALIGNED(16, uint8_t, temp[8 * 8]);
   int x, y, z;
   src -= SUBPEL_TAPS / 2 - 1;
@@ -527,7 +518,7 @@ static void filter_horiz_w4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_pitch,
   // 20 21 30 31 22 23 32 33 24 25 34 35 26 27 36 37
   const __m128i tr0_1 = _mm_unpacklo_epi16(C, D);
   // 00 01 10 11 20 21 30 31 02 03 12 13 22 23 32 33
-  const __m128i s1s0  = _mm_unpacklo_epi32(tr0_0, tr0_1);
+  const __m128i s1s0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
   // 04 05 14 15 24 25 34 35 06 07 16 17 26 27 36 37
   const __m128i s5s4 = _mm_unpackhi_epi32(tr0_0, tr0_1);
   // 02 03 12 13 22 23 32 33
@@ -569,16 +560,16 @@ static void transpose4x4_to_dst(const uint8_t *src, ptrdiff_t src_stride,
   C = _mm_srli_si128(A, 8);
   D = _mm_srli_si128(A, 12);
 
-  *(int *)(dst) =  _mm_cvtsi128_si32(A);
-  *(int *)(dst + dst_stride) =  _mm_cvtsi128_si32(B);
-  *(int *)(dst + dst_stride * 2) =  _mm_cvtsi128_si32(C);
-  *(int *)(dst + dst_stride * 3) =  _mm_cvtsi128_si32(D);
+  *(int *)(dst) = _mm_cvtsi128_si32(A);
+  *(int *)(dst + dst_stride) = _mm_cvtsi128_si32(B);
+  *(int *)(dst + dst_stride * 2) = _mm_cvtsi128_si32(C);
+  *(int *)(dst + dst_stride * 3) = _mm_cvtsi128_si32(D);
 }
 
 static void scaledconvolve_horiz_w4(const uint8_t *src, ptrdiff_t src_stride,
                                     uint8_t *dst, ptrdiff_t dst_stride,
-                                    const InterpKernel *x_filters,
-                                    int x0_q4, int x_step_q4, int w, int h) {
+                                    const InterpKernel *x_filters, int x0_q4,
+                                    int x_step_q4, int w, int h) {
   DECLARE_ALIGNED(16, uint8_t, temp[4 * 4]);
   int x, y, z;
   src -= SUBPEL_TAPS / 2 - 1;
@@ -652,8 +643,8 @@ static void filter_vert_w4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_pitch,
 
 static void scaledconvolve_vert_w4(const uint8_t *src, ptrdiff_t src_stride,
                                    uint8_t *dst, ptrdiff_t dst_stride,
-                                   const InterpKernel *y_filters,
-                                   int y0_q4, int y_step_q4, int w, int h) {
+                                   const InterpKernel *y_filters, int y0_q4,
+                                   int y_step_q4, int w, int h) {
   int y;
   int y_q4 = y0_q4;
 
@@ -709,13 +700,13 @@ static void filter_vert_w8_ssse3(const uint8_t *src_ptr, ptrdiff_t src_pitch,
   // shrink to 8 bit each 16 bits
   temp = _mm_packus_epi16(temp, temp);
   // save only 8 bytes convolve result
-  _mm_storel_epi64((__m128i*)dst, temp);
+  _mm_storel_epi64((__m128i *)dst, temp);
 }
 
 static void scaledconvolve_vert_w8(const uint8_t *src, ptrdiff_t src_stride,
                                    uint8_t *dst, ptrdiff_t dst_stride,
-                                   const InterpKernel *y_filters,
-                                   int y0_q4, int y_step_q4, int w, int h) {
+                                   const InterpKernel *y_filters, int y0_q4,
+                                   int y_step_q4, int w, int h) {
   int y;
   int y_q4 = y0_q4;
 
@@ -798,15 +789,15 @@ static void filter_vert_w16_ssse3(const uint8_t *src_ptr, ptrdiff_t src_pitch,
     // result
     temp_hi = _mm_packus_epi16(temp_lo, temp_hi);
     src_ptr += 16;
-     // save 16 bytes convolve result
-    _mm_store_si128((__m128i*)&dst[i], temp_hi);
+    // save 16 bytes convolve result
+    _mm_store_si128((__m128i *)&dst[i], temp_hi);
   }
 }
 
 static void scaledconvolve_vert_w16(const uint8_t *src, ptrdiff_t src_stride,
                                     uint8_t *dst, ptrdiff_t dst_stride,
-                                    const InterpKernel *y_filters,
-                                    int y0_q4, int y_step_q4, int w, int h) {
+                                    const InterpKernel *y_filters, int y0_q4,
+                                    int y_step_q4, int w, int h) {
   int y;
   int y_q4 = y0_q4;
 
@@ -826,11 +817,9 @@ static void scaledconvolve_vert_w16(const uint8_t *src, ptrdiff_t src_stride,
 
 static void scaledconvolve2d(const uint8_t *src, ptrdiff_t src_stride,
                              uint8_t *dst, ptrdiff_t dst_stride,
-                             const InterpKernel *const x_filters,
-                             int x0_q4, int x_step_q4,
-                             const InterpKernel *const y_filters,
-                             int y0_q4, int y_step_q4,
-                             int w, int h) {
+                             const InterpKernel *const x_filters, int x0_q4,
+                             int x_step_q4, const InterpKernel *const y_filters,
+                             int y0_q4, int y_step_q4, int w, int h) {
   // Note: Fixed size intermediate buffer, temp, places limits on parameters.
   // 2d filtering proceeds in 2 steps:
   //   (1) Interpolate horizontally into an intermediate buffer, temp.
@@ -885,10 +874,9 @@ static int get_filter_offset(const int16_t *f, const InterpKernel *base) {
   return (int)((const InterpKernel *)(intptr_t)f - base);
 }
 
-void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride,
-                         uint8_t *dst, ptrdiff_t dst_stride,
-                         const int16_t *filter_x, int x_step_q4,
-                         const int16_t *filter_y, int y_step_q4,
+void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+                         ptrdiff_t dst_stride, const int16_t *filter_x,
+                         int x_step_q4, const int16_t *filter_y, int y_step_q4,
                          int w, int h) {
   const InterpKernel *const filters_x = get_filter_base(filter_x);
   const int x0_q4 = get_filter_offset(filter_x, filters_x);
@@ -896,9 +884,8 @@ void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride,
   const InterpKernel *const filters_y = get_filter_base(filter_y);
   const int y0_q4 = get_filter_offset(filter_y, filters_y);
 
-  scaledconvolve2d(src, src_stride, dst, dst_stride,
-                   filters_x, x0_q4, x_step_q4,
-                   filters_y, y0_q4, y_step_q4, w, h);
+  scaledconvolve2d(src, src_stride, dst, dst_stride, filters_x, x0_q4,
+                   x_step_q4, filters_y, y0_q4, y_step_q4, w, h);
 }
 
 // void vpx_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride,
@@ -912,4 +899,4 @@ void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride,
 //                              const int16_t *filter_y, int y_step_q4,
 //                              int w, int h);
 FUN_CONV_2D(, ssse3);
-FUN_CONV_2D(avg_ , ssse3);
+FUN_CONV_2D(avg_, ssse3);
diff --git a/vpx_mem/include/vpx_mem_intrnl.h b/vpx_mem/include/vpx_mem_intrnl.h
index c4dd78550f352f6f99b89b3e7ea0a7347c0aaa17..f1ab7f5503c4dfb8741fb02c7d023bbd73092ae5 100644
--- a/vpx_mem/include/vpx_mem_intrnl.h
+++ b/vpx_mem/include/vpx_mem_intrnl.h
@@ -8,24 +8,24 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VPX_MEM_INCLUDE_VPX_MEM_INTRNL_H_
 #define VPX_MEM_INCLUDE_VPX_MEM_INTRNL_H_
 #include "./vpx_config.h"
 
-#define ADDRESS_STORAGE_SIZE      sizeof(size_t)
+#define ADDRESS_STORAGE_SIZE sizeof(size_t)
 
 #ifndef DEFAULT_ALIGNMENT
-# if defined(VXWORKS)
-#  define DEFAULT_ALIGNMENT        32        /*default addr alignment to use in
-calls to vpx_* functions other
-than vpx_memalign*/
-# else
-#  define DEFAULT_ALIGNMENT        (2 * sizeof(void*))  /* NOLINT */
-# endif
+#if defined(VXWORKS)
+/*default addr alignment to use in calls to vpx_* functions other than
+  vpx_memalign*/
+#define DEFAULT_ALIGNMENT 32
+#else
+#define DEFAULT_ALIGNMENT (2 * sizeof(void *)) /* NOLINT */
+#endif
 #endif
 
 /*returns an addr aligned to the byte boundary specified by align*/
-#define align_addr(addr,align) (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align))
+#define align_addr(addr, align) \
+  (void *)(((size_t)(addr) + ((align)-1)) & (size_t) - (align))
 
 #endif  // VPX_MEM_INCLUDE_VPX_MEM_INTRNL_H_
diff --git a/vpx_mem/vpx_mem.c b/vpx_mem/vpx_mem.c
index d108b3c80b3cc8d3981131fc4704cd015f6da8d0..84b487cb0c4854de2e248edd2a2b6d1f381ed31c 100644
--- a/vpx_mem/vpx_mem.c
+++ b/vpx_mem/vpx_mem.c
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #define __VPX_MEM_C__
 
 #include "vpx_mem.h"
@@ -19,8 +18,7 @@
 #include "vpx/vpx_integer.h"
 
 void *vpx_memalign(size_t align, size_t size) {
-  void *addr,
-       * x = NULL;
+  void *addr, *x = NULL;
 
   addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE);
 
@@ -33,24 +31,20 @@ void *vpx_memalign(size_t align, size_t size) {
   return x;
 }
 
-void *vpx_malloc(size_t size) {
-  return vpx_memalign(DEFAULT_ALIGNMENT, size);
-}
+void *vpx_malloc(size_t size) { return vpx_memalign(DEFAULT_ALIGNMENT, size); }
 
 void *vpx_calloc(size_t num, size_t size) {
   void *x;
 
   x = vpx_memalign(DEFAULT_ALIGNMENT, num * size);
 
-  if (x)
-    memset(x, 0, num * size);
+  if (x) memset(x, 0, num * size);
 
   return x;
 }
 
 void *vpx_realloc(void *memblk, size_t size) {
-  void *addr,
-       * new_addr = NULL;
+  void *addr, *new_addr = NULL;
   int align = DEFAULT_ALIGNMENT;
 
   /*
@@ -66,16 +60,17 @@ void *vpx_realloc(void *memblk, size_t size) {
   else if (!size)
     vpx_free(memblk);
   else {
-    addr   = (void *)(((size_t *)memblk)[-1]);
+    addr = (void *)(((size_t *)memblk)[-1]);
     memblk = NULL;
 
     new_addr = realloc(addr, size + align + ADDRESS_STORAGE_SIZE);
 
     if (new_addr) {
       addr = new_addr;
-      new_addr = (void *)(((size_t)
-                           ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) &
-                          (size_t) - align);
+      new_addr =
+          (void *)(((size_t)((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) +
+                    (align - 1)) &
+                   (size_t)-align);
       /* save the actual malloc address */
       ((size_t *)new_addr)[-1] = (size_t)addr;
     }
@@ -95,8 +90,7 @@ void vpx_free(void *memblk) {
 void *vpx_memset16(void *dest, int val, size_t length) {
   size_t i;
   uint16_t *dest16 = (uint16_t *)dest;
-  for (i = 0; i < length; i++)
-    *dest16++ = val;
+  for (i = 0; i < length; i++) *dest16++ = val;
   return dest;
 }
 #endif  // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vpx_mem/vpx_mem.h b/vpx_mem/vpx_mem.h
index 0dd4d14ebcf41561163b45e9b453e7e19c31e516..21b8d248d55fd24b992ead3263872faad39706d6 100644
--- a/vpx_mem/vpx_mem.h
+++ b/vpx_mem/vpx_mem.h
@@ -8,13 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VPX_MEM_VPX_MEM_H_
 #define VPX_MEM_VPX_MEM_H_
 
 #include "vpx_config.h"
 #if defined(__uClinux__)
-# include <lddk.h>
+#include <lddk.h>
 #endif
 
 #include <stdlib.h>
@@ -24,20 +23,20 @@
 extern "C" {
 #endif
 
-  void *vpx_memalign(size_t align, size_t size);
-  void *vpx_malloc(size_t size);
-  void *vpx_calloc(size_t num, size_t size);
-  void *vpx_realloc(void *memblk, size_t size);
-  void vpx_free(void *memblk);
+void *vpx_memalign(size_t align, size_t size);
+void *vpx_malloc(size_t size);
+void *vpx_calloc(size_t num, size_t size);
+void *vpx_realloc(void *memblk, size_t size);
+void vpx_free(void *memblk);
 
 #if CONFIG_VPX_HIGHBITDEPTH
-  void *vpx_memset16(void *dest, int val, size_t length);
+void *vpx_memset16(void *dest, int val, size_t length);
 #endif
 
 #include <string.h>
 
 #ifdef VPX_MEM_PLTFRM
-# include VPX_MEM_PLTFRM
+#include VPX_MEM_PLTFRM
 #endif
 
 #if defined(__cplusplus)
diff --git a/vpx_ports/arm.h b/vpx_ports/arm.h
index 42c98f5a838241b9045ff35047b9a64da9b30686..7be6104a4f504615c5035dff432f1dbe9b9daa16 100644
--- a/vpx_ports/arm.h
+++ b/vpx_ports/arm.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VPX_PORTS_ARM_H_
 #define VPX_PORTS_ARM_H_
 #include <stdlib.h>
@@ -19,17 +18,17 @@ extern "C" {
 #endif
 
 /*ARMv5TE "Enhanced DSP" instructions.*/
-#define HAS_EDSP  0x01
+#define HAS_EDSP 0x01
 /*ARMv6 "Parallel" or "Media" instructions.*/
 #define HAS_MEDIA 0x02
 /*ARMv7 optional NEON instructions.*/
-#define HAS_NEON  0x04
+#define HAS_NEON 0x04
 
 int arm_cpu_caps(void);
 
 // Earlier gcc compilers have issues with some neon intrinsics
-#if !defined(__clang__) && defined(__GNUC__) && \
-    __GNUC__ == 4 && __GNUC_MINOR__ <= 6
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 4 && \
+    __GNUC_MINOR__ <= 6
 #define VPX_INCOMPATIBLE_GCC
 #endif
 
@@ -38,4 +37,3 @@ int arm_cpu_caps(void);
 #endif
 
 #endif  // VPX_PORTS_ARM_H_
-
diff --git a/vpx_ports/arm_cpudetect.c b/vpx_ports/arm_cpudetect.c
index 8a4b8af964fe742a0c00dc94234434cb039acab5..e446702e4028442b6b69d6cd728f12690c1d2d9a 100644
--- a/vpx_ports/arm_cpudetect.c
+++ b/vpx_ports/arm_cpudetect.c
@@ -71,33 +71,33 @@ int arm_cpu_caps(void) {
     return flags;
   }
   mask = arm_cpu_env_mask();
-  /* MSVC has no inline __asm support for ARM, but it does let you __emit
-   *  instructions via their assembled hex code.
-   * All of these instructions should be essentially nops.
-   */
+/* MSVC has no inline __asm support for ARM, but it does let you __emit
+ *  instructions via their assembled hex code.
+ * All of these instructions should be essentially nops.
+ */
 #if HAVE_MEDIA
-  if (mask & HAS_MEDIA)
+  if (mask & HAS_MEDIA) {
     __try {
       /*SHADD8 r3,r3,r3*/
       __emit(0xE6333F93);
       flags |= HAS_MEDIA;
     } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
-    /*Ignore exception.*/
+      /*Ignore exception.*/
+    }
   }
-}
 #endif /* HAVE_MEDIA */
 #if HAVE_NEON || HAVE_NEON_ASM
-if (mask &HAS_NEON) {
-  __try {
-    /*VORR q0,q0,q0*/
-    __emit(0xF2200150);
-    flags |= HAS_NEON;
-  } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
-    /*Ignore exception.*/
+  if (mask & HAS_NEON) {
+    __try {
+      /*VORR q0,q0,q0*/
+      __emit(0xF2200150);
+      flags |= HAS_NEON;
+    } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
+      /*Ignore exception.*/
+    }
   }
-}
 #endif /* HAVE_NEON || HAVE_NEON_ASM */
-return flags & mask;
+  return flags & mask;
 }
 
 #elif defined(__ANDROID__) /* end _MSC_VER */
@@ -117,8 +117,7 @@ int arm_cpu_caps(void) {
   flags |= HAS_MEDIA;
 #endif /* HAVE_MEDIA */
 #if HAVE_NEON || HAVE_NEON_ASM
-  if (features & ANDROID_CPU_ARM_FEATURE_NEON)
-    flags |= HAS_NEON;
+  if (features & ANDROID_CPU_ARM_FEATURE_NEON) flags |= HAS_NEON;
 #endif /* HAVE_NEON || HAVE_NEON_ASM */
   return flags & mask;
 }
@@ -169,7 +168,8 @@ int arm_cpu_caps(void) {
   }
   return flags & mask;
 }
-#else /* end __linux__ */
-#error "--enable-runtime-cpu-detect selected, but no CPU detection method " \
+#else  /* end __linux__ */
+#error \
+    "--enable-runtime-cpu-detect selected, but no CPU detection method " \
 "available for your platform. Reconfigure with --disable-runtime-cpu-detect."
 #endif
diff --git a/vpx_ports/bitops.h b/vpx_ports/bitops.h
index 84ff3659fee835102caad07318403a5a34d0a0d7..f2a1fe9331efa4cca55af4ad50a7c4616295e24b 100644
--- a/vpx_ports/bitops.h
+++ b/vpx_ports/bitops.h
@@ -16,11 +16,11 @@
 #include "vpx_ports/msvc.h"
 
 #ifdef _MSC_VER
-# include <math.h>  // the ceil() definition must precede intrin.h
-# if _MSC_VER > 1310 && (defined(_M_X64) || defined(_M_IX86))
-#  include <intrin.h>
-#  define USE_MSC_INTRINSICS
-# endif
+#include <math.h>  // the ceil() definition must precede intrin.h
+#if _MSC_VER > 1310 && (defined(_M_X64) || defined(_M_IX86))
+#include <intrin.h>
+#define USE_MSC_INTRINSICS
+#endif
 #endif
 
 #ifdef __cplusplus
diff --git a/vpx_ports/emmintrin_compat.h b/vpx_ports/emmintrin_compat.h
index 16176383d290de9766ddb1007d3f744020b387d7..903534e0c0f35b134a6e958d163827cf582ff030 100644
--- a/vpx_ports/emmintrin_compat.h
+++ b/vpx_ports/emmintrin_compat.h
@@ -15,40 +15,40 @@
 /* From emmintrin.h (gcc 4.5.3) */
 /* Casts between various SP, DP, INT vector types.  Note that these do no
    conversion of values, they just change the type.  */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_ps(__m128d __A)
-{
-  return (__m128) __A;
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castpd_ps(__m128d __A) {
+  return (__m128)__A;
 }
 
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_si128(__m128d __A)
-{
-  return (__m128i) __A;
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castpd_si128(__m128d __A) {
+  return (__m128i)__A;
 }
 
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_pd(__m128 __A)
-{
-  return (__m128d) __A;
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castps_pd(__m128 __A) {
+  return (__m128d)__A;
 }
 
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_si128(__m128 __A)
-{
-  return (__m128i) __A;
+extern __inline __m128i
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castps_si128(__m128 __A) {
+  return (__m128i)__A;
 }
 
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_ps(__m128i __A)
-{
-  return (__m128) __A;
+extern __inline __m128
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castsi128_ps(__m128i __A) {
+  return (__m128)__A;
 }
 
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_pd(__m128i __A)
-{
-  return (__m128d) __A;
+extern __inline __m128d
+    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+    _mm_castsi128_pd(__m128i __A) {
+  return (__m128d)__A;
 }
 #endif
 
diff --git a/vpx_ports/mem.h b/vpx_ports/mem.h
index 075203521512d0dc7c5d55ea443cffb322a9a2f2..81f28f6386834b7bc0c4e93761dbe25cb6c514e6 100644
--- a/vpx_ports/mem.h
+++ b/vpx_ports/mem.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VPX_PORTS_MEM_H_
 #define VPX_PORTS_MEM_H_
 
@@ -16,12 +15,12 @@
 #include "vpx/vpx_integer.h"
 
 #if (defined(__GNUC__) && __GNUC__) || defined(__SUNPRO_C)
-#define DECLARE_ALIGNED(n,typ,val)  typ val __attribute__ ((aligned (n)))
+#define DECLARE_ALIGNED(n, typ, val) typ val __attribute__((aligned(n)))
 #elif defined(_MSC_VER)
-#define DECLARE_ALIGNED(n,typ,val)  __declspec(align(n)) typ val
+#define DECLARE_ALIGNED(n, typ, val) __declspec(align(n)) typ val
 #else
 #warning No alignment directives known for this compiler.
-#define DECLARE_ALIGNED(n,typ,val)  typ val
+#define DECLARE_ALIGNED(n, typ, val) typ val
 #endif
 
 /* Indicates that the usage of the specified variable has been audited to assure
@@ -29,7 +28,7 @@
  * warnings on gcc.
  */
 #if defined(__GNUC__) && __GNUC__
-#define UNINITIALIZED_IS_SAFE(x) x=x
+#define UNINITIALIZED_IS_SAFE(x) x = x
 #else
 #define UNINITIALIZED_IS_SAFE(x) x
 #endif
@@ -39,15 +38,14 @@
 #endif
 
 /* Shift down with rounding */
-#define ROUND_POWER_OF_TWO(value, n) \
-    (((value) + (1 << ((n) - 1))) >> (n))
+#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n)-1))) >> (n))
 
 #define ALIGN_POWER_OF_TWO(value, n) \
-    (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1))
+  (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1))
 
 #if CONFIG_VPX_HIGHBITDEPTH
-#define CONVERT_TO_SHORTPTR(x) ((uint16_t*)(((uintptr_t)x) << 1))
-#define CONVERT_TO_BYTEPTR(x) ((uint8_t*)(((uintptr_t)x) >> 1))
+#define CONVERT_TO_SHORTPTR(x) ((uint16_t *)(((uintptr_t)x) << 1))
+#define CONVERT_TO_BYTEPTR(x) ((uint8_t *)(((uintptr_t)x) >> 1))
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
 #endif  // VPX_PORTS_MEM_H_
diff --git a/vpx_ports/mem_ops.h b/vpx_ports/mem_ops.h
index d4a3d773f3c34d12f744726000c53252c7ec3ca0..873009869513df8bf730b2bd558be8aa826c3839 100644
--- a/vpx_ports/mem_ops.h
+++ b/vpx_ports/mem_ops.h
@@ -46,36 +46,36 @@
 #undef MEM_VALUE_T_SZ_BITS
 #define MEM_VALUE_T_SZ_BITS (sizeof(MEM_VALUE_T) << 3)
 
-#undef  mem_ops_wrap_symbol
+#undef mem_ops_wrap_symbol
 #define mem_ops_wrap_symbol(fn) mem_ops_wrap_symbol2(fn, MEM_VALUE_T)
-#undef  mem_ops_wrap_symbol2
-#define mem_ops_wrap_symbol2(fn,typ) mem_ops_wrap_symbol3(fn,typ)
-#undef  mem_ops_wrap_symbol3
-#define mem_ops_wrap_symbol3(fn,typ) fn##_as_##typ
+#undef mem_ops_wrap_symbol2
+#define mem_ops_wrap_symbol2(fn, typ) mem_ops_wrap_symbol3(fn, typ)
+#undef mem_ops_wrap_symbol3
+#define mem_ops_wrap_symbol3(fn, typ) fn##_as_##typ
 
 /*
  * Include aligned access routines
  */
 #define INCLUDED_BY_MEM_OPS_H
 #include "mem_ops_aligned.h"
-#undef  INCLUDED_BY_MEM_OPS_H
+#undef INCLUDED_BY_MEM_OPS_H
 
-#undef  mem_get_be16
+#undef mem_get_be16
 #define mem_get_be16 mem_ops_wrap_symbol(mem_get_be16)
 static unsigned MEM_VALUE_T mem_get_be16(const void *vmem) {
-  unsigned MEM_VALUE_T  val;
-  const MAU_T          *mem = (const MAU_T *)vmem;
+  unsigned MEM_VALUE_T val;
+  const MAU_T *mem = (const MAU_T *)vmem;
 
   val = mem[0] << 8;
   val |= mem[1];
   return val;
 }
 
-#undef  mem_get_be24
+#undef mem_get_be24
 #define mem_get_be24 mem_ops_wrap_symbol(mem_get_be24)
 static unsigned MEM_VALUE_T mem_get_be24(const void *vmem) {
-  unsigned MEM_VALUE_T  val;
-  const MAU_T          *mem = (const MAU_T *)vmem;
+  unsigned MEM_VALUE_T val;
+  const MAU_T *mem = (const MAU_T *)vmem;
 
   val = mem[0] << 16;
   val |= mem[1] << 8;
@@ -83,11 +83,11 @@ static unsigned MEM_VALUE_T mem_get_be24(const void *vmem) {
   return val;
 }
 
-#undef  mem_get_be32
+#undef mem_get_be32
 #define mem_get_be32 mem_ops_wrap_symbol(mem_get_be32)
 static unsigned MEM_VALUE_T mem_get_be32(const void *vmem) {
-  unsigned MEM_VALUE_T  val;
-  const MAU_T          *mem = (const MAU_T *)vmem;
+  unsigned MEM_VALUE_T val;
+  const MAU_T *mem = (const MAU_T *)vmem;
 
   val = mem[0] << 24;
   val |= mem[1] << 16;
@@ -96,22 +96,22 @@ static unsigned MEM_VALUE_T mem_get_be32(const void *vmem) {
   return val;
 }
 
-#undef  mem_get_le16
+#undef mem_get_le16
 #define mem_get_le16 mem_ops_wrap_symbol(mem_get_le16)
 static unsigned MEM_VALUE_T mem_get_le16(const void *vmem) {
-  unsigned MEM_VALUE_T  val;
-  const MAU_T          *mem = (const MAU_T *)vmem;
+  unsigned MEM_VALUE_T val;
+  const MAU_T *mem = (const MAU_T *)vmem;
 
   val = mem[1] << 8;
   val |= mem[0];
   return val;
 }
 
-#undef  mem_get_le24
+#undef mem_get_le24
 #define mem_get_le24 mem_ops_wrap_symbol(mem_get_le24)
 static unsigned MEM_VALUE_T mem_get_le24(const void *vmem) {
-  unsigned MEM_VALUE_T  val;
-  const MAU_T          *mem = (const MAU_T *)vmem;
+  unsigned MEM_VALUE_T val;
+  const MAU_T *mem = (const MAU_T *)vmem;
 
   val = mem[2] << 16;
   val |= mem[1] << 8;
@@ -119,11 +119,11 @@ static unsigned MEM_VALUE_T mem_get_le24(const void *vmem) {
   return val;
 }
 
-#undef  mem_get_le32
+#undef mem_get_le32
 #define mem_get_le32 mem_ops_wrap_symbol(mem_get_le32)
 static unsigned MEM_VALUE_T mem_get_le32(const void *vmem) {
-  unsigned MEM_VALUE_T  val;
-  const MAU_T          *mem = (const MAU_T *)vmem;
+  unsigned MEM_VALUE_T val;
+  const MAU_T *mem = (const MAU_T *)vmem;
 
   val = mem[3] << 24;
   val |= mem[2] << 16;
@@ -132,93 +132,93 @@ static unsigned MEM_VALUE_T mem_get_le32(const void *vmem) {
   return val;
 }
 
-#define mem_get_s_generic(end,sz) \
-  static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) {\
-    const MAU_T *mem = (const MAU_T*)vmem;\
-    signed MEM_VALUE_T val = mem_get_##end##sz(mem);\
-    return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz);\
+#define mem_get_s_generic(end, sz)                                            \
+  static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) { \
+    const MAU_T *mem = (const MAU_T *)vmem;                                   \
+    signed MEM_VALUE_T val = mem_get_##end##sz(mem);                          \
+    return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz); \
   }
 
-#undef  mem_get_sbe16
+#undef mem_get_sbe16
 #define mem_get_sbe16 mem_ops_wrap_symbol(mem_get_sbe16)
 mem_get_s_generic(be, 16)
-
-#undef  mem_get_sbe24
+#undef mem_get_sbe24
 #define mem_get_sbe24 mem_ops_wrap_symbol(mem_get_sbe24)
-mem_get_s_generic(be, 24)
+    mem_get_s_generic(be, 24)
 
-#undef  mem_get_sbe32
+#undef mem_get_sbe32
 #define mem_get_sbe32 mem_ops_wrap_symbol(mem_get_sbe32)
-mem_get_s_generic(be, 32)
+        mem_get_s_generic(be, 32)
 
-#undef  mem_get_sle16
+#undef mem_get_sle16
 #define mem_get_sle16 mem_ops_wrap_symbol(mem_get_sle16)
-mem_get_s_generic(le, 16)
+            mem_get_s_generic(le, 16)
 
-#undef  mem_get_sle24
+#undef mem_get_sle24
 #define mem_get_sle24 mem_ops_wrap_symbol(mem_get_sle24)
-mem_get_s_generic(le, 24)
+                mem_get_s_generic(le, 24)
 
-#undef  mem_get_sle32
+#undef mem_get_sle32
 #define mem_get_sle32 mem_ops_wrap_symbol(mem_get_sle32)
-mem_get_s_generic(le, 32)
+                    mem_get_s_generic(le, 32)
 
-#undef  mem_put_be16
+#undef mem_put_be16
 #define mem_put_be16 mem_ops_wrap_symbol(mem_put_be16)
-static VPX_INLINE void mem_put_be16(void *vmem, MEM_VALUE_T val) {
+                        static VPX_INLINE
+    void mem_put_be16(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
   mem[0] = (val >> 8) & 0xff;
   mem[1] = (val >> 0) & 0xff;
 }
 
-#undef  mem_put_be24
+#undef mem_put_be24
 #define mem_put_be24 mem_ops_wrap_symbol(mem_put_be24)
 static VPX_INLINE void mem_put_be24(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
   mem[0] = (val >> 16) & 0xff;
-  mem[1] = (val >>  8) & 0xff;
-  mem[2] = (val >>  0) & 0xff;
+  mem[1] = (val >> 8) & 0xff;
+  mem[2] = (val >> 0) & 0xff;
 }
 
-#undef  mem_put_be32
+#undef mem_put_be32
 #define mem_put_be32 mem_ops_wrap_symbol(mem_put_be32)
 static VPX_INLINE void mem_put_be32(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
   mem[0] = (val >> 24) & 0xff;
   mem[1] = (val >> 16) & 0xff;
-  mem[2] = (val >>  8) & 0xff;
-  mem[3] = (val >>  0) & 0xff;
+  mem[2] = (val >> 8) & 0xff;
+  mem[3] = (val >> 0) & 0xff;
 }
 
-#undef  mem_put_le16
+#undef mem_put_le16
 #define mem_put_le16 mem_ops_wrap_symbol(mem_put_le16)
 static VPX_INLINE void mem_put_le16(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
-  mem[0] = (val >>  0) & 0xff;
-  mem[1] = (val >>  8) & 0xff;
+  mem[0] = (val >> 0) & 0xff;
+  mem[1] = (val >> 8) & 0xff;
 }
 
-#undef  mem_put_le24
+#undef mem_put_le24
 #define mem_put_le24 mem_ops_wrap_symbol(mem_put_le24)
 static VPX_INLINE void mem_put_le24(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
-  mem[0] = (val >>  0) & 0xff;
-  mem[1] = (val >>  8) & 0xff;
+  mem[0] = (val >> 0) & 0xff;
+  mem[1] = (val >> 8) & 0xff;
   mem[2] = (val >> 16) & 0xff;
 }
 
-#undef  mem_put_le32
+#undef mem_put_le32
 #define mem_put_le32 mem_ops_wrap_symbol(mem_put_le32)
 static VPX_INLINE void mem_put_le32(void *vmem, MEM_VALUE_T val) {
   MAU_T *mem = (MAU_T *)vmem;
 
-  mem[0] = (val >>  0) & 0xff;
-  mem[1] = (val >>  8) & 0xff;
+  mem[0] = (val >> 0) & 0xff;
+  mem[1] = (val >> 8) & 0xff;
   mem[2] = (val >> 16) & 0xff;
   mem[3] = (val >> 24) & 0xff;
 }
diff --git a/vpx_ports/mem_ops_aligned.h b/vpx_ports/mem_ops_aligned.h
index c16111fec5d19bd8aeca404875dcba3e17a06300..e3c0f42e8d7a324a27e68fef75efd013c3c5662a 100644
--- a/vpx_ports/mem_ops_aligned.h
+++ b/vpx_ports/mem_ops_aligned.h
@@ -27,133 +27,133 @@
 /* Architectures that provide instructions for doing this byte swapping
  * could redefine these macros.
  */
-#define swap_endian_16(val,raw) do {\
-    val = ((raw>>8) & 0x00ff) \
-          | ((raw<<8) & 0xff00);\
-  } while(0)
-#define swap_endian_32(val,raw) do {\
-    val = ((raw>>24) & 0x000000ff) \
-          | ((raw>>8)  & 0x0000ff00) \
-          | ((raw<<8)  & 0x00ff0000) \
-          | ((raw<<24) & 0xff000000); \
-  } while(0)
-#define swap_endian_16_se(val,raw) do {\
-    swap_endian_16(val,raw);\
-    val = ((val << 16) >> 16);\
-  } while(0)
-#define swap_endian_32_se(val,raw) swap_endian_32(val,raw)
-
-#define mem_get_ne_aligned_generic(end,sz) \
-  static VPX_INLINE unsigned MEM_VALUE_T \
-    mem_get_##end##sz##_aligned(const void *vmem) {\
-    const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
-    return *mem;\
+#define swap_endian_16(val, raw)                         \
+  do {                                                   \
+    val = ((raw >> 8) & 0x00ff) | ((raw << 8) & 0xff00); \
+  } while (0)
+#define swap_endian_32(val, raw)                                   \
+  do {                                                             \
+    val = ((raw >> 24) & 0x000000ff) | ((raw >> 8) & 0x0000ff00) | \
+          ((raw << 8) & 0x00ff0000) | ((raw << 24) & 0xff000000);  \
+  } while (0)
+#define swap_endian_16_se(val, raw) \
+  do {                              \
+    swap_endian_16(val, raw);       \
+    val = ((val << 16) >> 16);      \
+  } while (0)
+#define swap_endian_32_se(val, raw) swap_endian_32(val, raw)
+
+#define mem_get_ne_aligned_generic(end, sz)                           \
+  static VPX_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \
+      const void *vmem) {                                             \
+    const uint##sz##_t *mem = (const uint##sz##_t *)vmem;             \
+    return *mem;                                                      \
   }
 
-#define mem_get_sne_aligned_generic(end,sz) \
-  static VPX_INLINE signed MEM_VALUE_T \
-    mem_get_s##end##sz##_aligned(const void *vmem) {\
-    const int##sz##_t *mem = (const int##sz##_t *)vmem;\
-    return *mem;\
+#define mem_get_sne_aligned_generic(end, sz)                         \
+  static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \
+      const void *vmem) {                                            \
+    const int##sz##_t *mem = (const int##sz##_t *)vmem;              \
+    return *mem;                                                     \
   }
 
-#define mem_get_se_aligned_generic(end,sz) \
-  static VPX_INLINE unsigned MEM_VALUE_T \
-    mem_get_##end##sz##_aligned(const void *vmem) {\
-    const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
-    unsigned MEM_VALUE_T val, raw = *mem;\
-    swap_endian_##sz(val,raw);\
-    return val;\
+#define mem_get_se_aligned_generic(end, sz)                           \
+  static VPX_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \
+      const void *vmem) {                                             \
+    const uint##sz##_t *mem = (const uint##sz##_t *)vmem;             \
+    unsigned MEM_VALUE_T val, raw = *mem;                             \
+    swap_endian_##sz(val, raw);                                       \
+    return val;                                                       \
   }
 
-#define mem_get_sse_aligned_generic(end,sz) \
-  static VPX_INLINE signed MEM_VALUE_T \
-    mem_get_s##end##sz##_aligned(const void *vmem) {\
-    const int##sz##_t *mem = (const int##sz##_t *)vmem;\
-    unsigned MEM_VALUE_T val, raw = *mem;\
-    swap_endian_##sz##_se(val,raw);\
-    return val;\
+#define mem_get_sse_aligned_generic(end, sz)                         \
+  static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \
+      const void *vmem) {                                            \
+    const int##sz##_t *mem = (const int##sz##_t *)vmem;              \
+    unsigned MEM_VALUE_T val, raw = *mem;                            \
+    swap_endian_##sz##_se(val, raw);                                 \
+    return val;                                                      \
   }
 
-#define mem_put_ne_aligned_generic(end,sz) \
-  static VPX_INLINE void \
-    mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
-    uint##sz##_t *mem = (uint##sz##_t *)vmem;\
-    *mem = (uint##sz##_t)val;\
+#define mem_put_ne_aligned_generic(end, sz)                             \
+  static VPX_INLINE void mem_put_##end##sz##_aligned(void *vmem,        \
+                                                     MEM_VALUE_T val) { \
+    uint##sz##_t *mem = (uint##sz##_t *)vmem;                           \
+    *mem = (uint##sz##_t)val;                                           \
   }
 
-#define mem_put_se_aligned_generic(end,sz) \
-  static VPX_INLINE void \
-    mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
-    uint##sz##_t *mem = (uint##sz##_t *)vmem, raw;\
-    swap_endian_##sz(raw,val);\
-    *mem = (uint##sz##_t)raw;\
+#define mem_put_se_aligned_generic(end, sz)                             \
+  static VPX_INLINE void mem_put_##end##sz##_aligned(void *vmem,        \
+                                                     MEM_VALUE_T val) { \
+    uint##sz##_t *mem = (uint##sz##_t *)vmem, raw;                      \
+    swap_endian_##sz(raw, val);                                         \
+    *mem = (uint##sz##_t)raw;                                           \
   }
 
 #include "vpx_config.h"
 #if CONFIG_BIG_ENDIAN
-#define mem_get_be_aligned_generic(sz)  mem_get_ne_aligned_generic(be,sz)
-#define mem_get_sbe_aligned_generic(sz) mem_get_sne_aligned_generic(be,sz)
-#define mem_get_le_aligned_generic(sz)  mem_get_se_aligned_generic(le,sz)
-#define mem_get_sle_aligned_generic(sz) mem_get_sse_aligned_generic(le,sz)
-#define mem_put_be_aligned_generic(sz)  mem_put_ne_aligned_generic(be,sz)
-#define mem_put_le_aligned_generic(sz)  mem_put_se_aligned_generic(le,sz)
+#define mem_get_be_aligned_generic(sz) mem_get_ne_aligned_generic(be, sz)
+#define mem_get_sbe_aligned_generic(sz) mem_get_sne_aligned_generic(be, sz)
+#define mem_get_le_aligned_generic(sz) mem_get_se_aligned_generic(le, sz)
+#define mem_get_sle_aligned_generic(sz) mem_get_sse_aligned_generic(le, sz)
+#define mem_put_be_aligned_generic(sz) mem_put_ne_aligned_generic(be, sz)
+#define mem_put_le_aligned_generic(sz) mem_put_se_aligned_generic(le, sz)
 #else
-#define mem_get_be_aligned_generic(sz)  mem_get_se_aligned_generic(be,sz)
-#define mem_get_sbe_aligned_generic(sz) mem_get_sse_aligned_generic(be,sz)
-#define mem_get_le_aligned_generic(sz)  mem_get_ne_aligned_generic(le,sz)
-#define mem_get_sle_aligned_generic(sz) mem_get_sne_aligned_generic(le,sz)
-#define mem_put_be_aligned_generic(sz)  mem_put_se_aligned_generic(be,sz)
-#define mem_put_le_aligned_generic(sz)  mem_put_ne_aligned_generic(le,sz)
+#define mem_get_be_aligned_generic(sz) mem_get_se_aligned_generic(be, sz)
+#define mem_get_sbe_aligned_generic(sz) mem_get_sse_aligned_generic(be, sz)
+#define mem_get_le_aligned_generic(sz) mem_get_ne_aligned_generic(le, sz)
+#define mem_get_sle_aligned_generic(sz) mem_get_sne_aligned_generic(le, sz)
+#define mem_put_be_aligned_generic(sz) mem_put_se_aligned_generic(be, sz)
+#define mem_put_le_aligned_generic(sz) mem_put_ne_aligned_generic(le, sz)
 #endif
 
-#undef  mem_get_be16_aligned
+#undef mem_get_be16_aligned
 #define mem_get_be16_aligned mem_ops_wrap_symbol(mem_get_be16_aligned)
 mem_get_be_aligned_generic(16)
 
-#undef  mem_get_be32_aligned
+#undef mem_get_be32_aligned
 #define mem_get_be32_aligned mem_ops_wrap_symbol(mem_get_be32_aligned)
-mem_get_be_aligned_generic(32)
+    mem_get_be_aligned_generic(32)
 
-#undef  mem_get_le16_aligned
+#undef mem_get_le16_aligned
 #define mem_get_le16_aligned mem_ops_wrap_symbol(mem_get_le16_aligned)
-mem_get_le_aligned_generic(16)
+        mem_get_le_aligned_generic(16)
 
-#undef  mem_get_le32_aligned
+#undef mem_get_le32_aligned
 #define mem_get_le32_aligned mem_ops_wrap_symbol(mem_get_le32_aligned)
-mem_get_le_aligned_generic(32)
+            mem_get_le_aligned_generic(32)
 
-#undef  mem_get_sbe16_aligned
+#undef mem_get_sbe16_aligned
 #define mem_get_sbe16_aligned mem_ops_wrap_symbol(mem_get_sbe16_aligned)
-mem_get_sbe_aligned_generic(16)
+                mem_get_sbe_aligned_generic(16)
 
-#undef  mem_get_sbe32_aligned
+#undef mem_get_sbe32_aligned
 #define mem_get_sbe32_aligned mem_ops_wrap_symbol(mem_get_sbe32_aligned)
-mem_get_sbe_aligned_generic(32)
+                    mem_get_sbe_aligned_generic(32)
 
-#undef  mem_get_sle16_aligned
+#undef mem_get_sle16_aligned
 #define mem_get_sle16_aligned mem_ops_wrap_symbol(mem_get_sle16_aligned)
-mem_get_sle_aligned_generic(16)
+                        mem_get_sle_aligned_generic(16)
 
-#undef  mem_get_sle32_aligned
+#undef mem_get_sle32_aligned
 #define mem_get_sle32_aligned mem_ops_wrap_symbol(mem_get_sle32_aligned)
-mem_get_sle_aligned_generic(32)
+                            mem_get_sle_aligned_generic(32)
 
-#undef  mem_put_be16_aligned
+#undef mem_put_be16_aligned
 #define mem_put_be16_aligned mem_ops_wrap_symbol(mem_put_be16_aligned)
-mem_put_be_aligned_generic(16)
+                                mem_put_be_aligned_generic(16)
 
-#undef  mem_put_be32_aligned
+#undef mem_put_be32_aligned
 #define mem_put_be32_aligned mem_ops_wrap_symbol(mem_put_be32_aligned)
-mem_put_be_aligned_generic(32)
+                                    mem_put_be_aligned_generic(32)
 
-#undef  mem_put_le16_aligned
+#undef mem_put_le16_aligned
 #define mem_put_le16_aligned mem_ops_wrap_symbol(mem_put_le16_aligned)
-mem_put_le_aligned_generic(16)
+                                        mem_put_le_aligned_generic(16)
 
-#undef  mem_put_le32_aligned
+#undef mem_put_le32_aligned
 #define mem_put_le32_aligned mem_ops_wrap_symbol(mem_put_le32_aligned)
-mem_put_le_aligned_generic(32)
+                                            mem_put_le_aligned_generic(32)
 
 #undef mem_get_ne_aligned_generic
 #undef mem_get_se_aligned_generic
diff --git a/vpx_ports/msvc.h b/vpx_ports/msvc.h
index cab77405f47ef34f7406c81dc4da4741f62ca630..3ff71474b3b43222aa9dba6a987f3e2331208132 100644
--- a/vpx_ports/msvc.h
+++ b/vpx_ports/msvc.h
@@ -14,9 +14,9 @@
 
 #include "./vpx_config.h"
 
-# if _MSC_VER < 1900  // VS2015 provides snprintf
-#  define snprintf _snprintf
-# endif  // _MSC_VER < 1900
+#if _MSC_VER < 1900  // VS2015 provides snprintf
+#define snprintf _snprintf
+#endif  // _MSC_VER < 1900
 
 #if _MSC_VER < 1800  // VS2013 provides round
 #include <math.h>
diff --git a/vpx_ports/vpx_once.h b/vpx_ports/vpx_once.h
index da04db459075c1490b4fab923164925ca6576dd4..7d9fc3b4063bb22bec72f1e42d0d13d96d9f6047 100644
--- a/vpx_ports/vpx_once.h
+++ b/vpx_ports/vpx_once.h
@@ -48,102 +48,92 @@
  * As a static, once_state will be zero-initialized as program start.
  */
 static LONG once_state;
-static void once(void (*func)(void))
-{
-    /* Try to advance once_state from its initial value of 0 to 1.
-     * Only one thread can succeed in doing so.
-     */
-    if (InterlockedCompareExchange(&once_state, 1, 0) == 0) {
-        /* We're the winning thread, having set once_state to 1.
-         * Call our function. */
-        func();
-        /* Now advance once_state to 2, unblocking any other threads. */
-        InterlockedIncrement(&once_state);
-        return;
-    }
-
-    /* We weren't the winning thread, but we want to block on
-     * the state variable so we don't return before func()
-     * has finished executing elsewhere.
+static void once(void (*func)(void)) {
+  /* Try to advance once_state from its initial value of 0 to 1.
+   * Only one thread can succeed in doing so.
+   */
+  if (InterlockedCompareExchange(&once_state, 1, 0) == 0) {
+    /* We're the winning thread, having set once_state to 1.
+     * Call our function. */
+    func();
+    /* Now advance once_state to 2, unblocking any other threads. */
+    InterlockedIncrement(&once_state);
+    return;
+  }
+
+  /* We weren't the winning thread, but we want to block on
+   * the state variable so we don't return before func()
+   * has finished executing elsewhere.
+   *
+   * Try to advance once_state from 2 to 2, which is only possible
+   * after the winning thead advances it from 1 to 2.
+   */
+  while (InterlockedCompareExchange(&once_state, 2, 2) != 2) {
+    /* State isn't yet 2. Try again.
      *
-     * Try to advance once_state from 2 to 2, which is only possible
-     * after the winning thead advances it from 1 to 2.
-     */
-    while (InterlockedCompareExchange(&once_state, 2, 2) != 2) {
-        /* State isn't yet 2. Try again.
-         *
-         * We are used for singleton initialization functions,
-         * which should complete quickly. Contention will likewise
-         * be rare, so it's worthwhile to use a simple but cpu-
-         * intensive busy-wait instead of successive backoff,
-         * waiting on a kernel object, or another heavier-weight scheme.
-         *
-         * We can at least yield our timeslice.
-         */
-        Sleep(0);
-    }
-
-    /* We've seen once_state advance to 2, so we know func()
-     * has been called. And we've left once_state as we found it,
-     * so other threads will have the same experience.
+     * We are used for singleton initialization functions,
+     * which should complete quickly. Contention will likewise
+     * be rare, so it's worthwhile to use a simple but cpu-
+     * intensive busy-wait instead of successive backoff,
+     * waiting on a kernel object, or another heavier-weight scheme.
      *
-     * It's safe to return now.
+     * We can at least yield our timeslice.
      */
-    return;
+    Sleep(0);
+  }
+
+  /* We've seen once_state advance to 2, so we know func()
+   * has been called. And we've left once_state as we found it,
+   * so other threads will have the same experience.
+   *
+   * It's safe to return now.
+   */
+  return;
 }
 
-
 #elif CONFIG_MULTITHREAD && defined(__OS2__)
 #define INCL_DOS
 #include <os2.h>
-static void once(void (*func)(void))
-{
-    static int done;
+static void once(void (*func)(void)) {
+  static int done;
 
-    /* If the initialization is complete, return early. */
-    if(done)
-        return;
+  /* If the initialization is complete, return early. */
+  if (done) return;
 
-    /* Causes all other threads in the process to block themselves
-     * and give up their time slice.
-     */
-    DosEnterCritSec();
+  /* Causes all other threads in the process to block themselves
+   * and give up their time slice.
+   */
+  DosEnterCritSec();
 
-    if (!done)
-    {
-        func();
-        done = 1;
-    }
+  if (!done) {
+    func();
+    done = 1;
+  }
 
-    /* Restores normal thread dispatching for the current process. */
-    DosExitCritSec();
+  /* Restores normal thread dispatching for the current process. */
+  DosExitCritSec();
 }
 
-
 #elif CONFIG_MULTITHREAD && HAVE_PTHREAD_H
 #include <pthread.h>
-static void once(void (*func)(void))
-{
-    static pthread_once_t lock = PTHREAD_ONCE_INIT;
-    pthread_once(&lock, func);
+static void once(void (*func)(void)) {
+  static pthread_once_t lock = PTHREAD_ONCE_INIT;
+  pthread_once(&lock, func);
 }
 
-
 #else
 /* No-op version that performs no synchronization. *_rtcd() is idempotent,
  * so as long as your platform provides atomic loads/stores of pointers
  * no synchronization is strictly necessary.
  */
 
-static void once(void (*func)(void))
-{
-    static int done;
+static void once(void (*func)(void)) {
+  static int done;
 
-    if(!done)
-    {
-        func();
-        done = 1;
-    }
+  if (!done) {
+    func();
+    done = 1;
+  }
 }
 #endif
 
diff --git a/vpx_ports/vpx_timer.h b/vpx_ports/vpx_timer.h
index dd98e291c2f7e76917475b4ef4d551f7df3f158b..4aae30e947451bf6f6b4be2f41c4a6aa1c938287 100644
--- a/vpx_ports/vpx_timer.h
+++ b/vpx_ports/vpx_timer.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VPX_PORTS_VPX_TIMER_H_
 #define VPX_PORTS_VPX_TIMER_H_
 
@@ -34,30 +33,27 @@
 
 /* timersub is not provided by msys at this time. */
 #ifndef timersub
-#define timersub(a, b, result) \
-  do { \
-    (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
+#define timersub(a, b, result)                       \
+  do {                                               \
+    (result)->tv_sec = (a)->tv_sec - (b)->tv_sec;    \
     (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
-    if ((result)->tv_usec < 0) { \
-      --(result)->tv_sec; \
-      (result)->tv_usec += 1000000; \
-    } \
+    if ((result)->tv_usec < 0) {                     \
+      --(result)->tv_sec;                            \
+      (result)->tv_usec += 1000000;                  \
+    }                                                \
   } while (0)
 #endif
 #endif
 
-
 struct vpx_usec_timer {
 #if defined(_WIN32)
-  LARGE_INTEGER  begin, end;
+  LARGE_INTEGER begin, end;
 #else
   struct timeval begin, end;
 #endif
 };
 
-
-static INLINE void
-vpx_usec_timer_start(struct vpx_usec_timer *t) {
+static INLINE void vpx_usec_timer_start(struct vpx_usec_timer *t) {
 #if defined(_WIN32)
   QueryPerformanceCounter(&t->begin);
 #else
@@ -65,9 +61,7 @@ vpx_usec_timer_start(struct vpx_usec_timer *t) {
 #endif
 }
 
-
-static INLINE void
-vpx_usec_timer_mark(struct vpx_usec_timer *t) {
+static INLINE void vpx_usec_timer_mark(struct vpx_usec_timer *t) {
 #if defined(_WIN32)
   QueryPerformanceCounter(&t->end);
 #else
@@ -75,9 +69,7 @@ vpx_usec_timer_mark(struct vpx_usec_timer *t) {
 #endif
 }
 
-
-static INLINE int64_t
-vpx_usec_timer_elapsed(struct vpx_usec_timer *t) {
+static INLINE int64_t vpx_usec_timer_elapsed(struct vpx_usec_timer *t) {
 #if defined(_WIN32)
   LARGE_INTEGER freq, diff;
 
@@ -104,16 +96,11 @@ struct vpx_usec_timer {
   void *dummy;
 };
 
-static INLINE void
-vpx_usec_timer_start(struct vpx_usec_timer *t) { }
+static INLINE void vpx_usec_timer_start(struct vpx_usec_timer *t) {}
 
-static INLINE void
-vpx_usec_timer_mark(struct vpx_usec_timer *t) { }
+static INLINE void vpx_usec_timer_mark(struct vpx_usec_timer *t) {}
 
-static INLINE int
-vpx_usec_timer_elapsed(struct vpx_usec_timer *t) {
-  return 0;
-}
+static INLINE int vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { return 0; }
 
 #endif /* CONFIG_OS_SUPPORT */
 
diff --git a/vpx_ports/x86.h b/vpx_ports/x86.h
index 5da346e58fc53ac525779374e71902a59814b31c..089b048d57e464d2ff3832def5484f647e7cc84c 100644
--- a/vpx_ports/x86.h
+++ b/vpx_ports/x86.h
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VPX_PORTS_X86_H_
 #define VPX_PORTS_X86_H_
 #include <stdlib.h>
@@ -36,64 +35,73 @@ typedef enum {
   VPX_CPU_VIA,
 
   VPX_CPU_LAST
-}  vpx_cpu_t;
+} vpx_cpu_t;
 
 #if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
 #if ARCH_X86_64
-#define cpuid(func, func2, ax, bx, cx, dx)\
-  __asm__ __volatile__ (\
-                        "cpuid           \n\t" \
-                        : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
-                        : "a" (func), "c" (func2));
+#define cpuid(func, func2, ax, bx, cx, dx)                      \
+  __asm__ __volatile__("cpuid           \n\t"                   \
+                       : "=a"(ax), "=b"(bx), "=c"(cx), "=d"(dx) \
+                       : "a"(func), "c"(func2));
 #else
-#define cpuid(func, func2, ax, bx, cx, dx)\
-  __asm__ __volatile__ (\
-                        "mov %%ebx, %%edi   \n\t" \
-                        "cpuid              \n\t" \
-                        "xchg %%edi, %%ebx  \n\t" \
-                        : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
-                        : "a" (func), "c" (func2));
+#define cpuid(func, func2, ax, bx, cx, dx)     \
+  __asm__ __volatile__(                        \
+      "mov %%ebx, %%edi   \n\t"                \
+      "cpuid              \n\t"                \
+      "xchg %%edi, %%ebx  \n\t"                \
+      : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
+      : "a"(func), "c"(func2));
 #endif
-#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
+#elif defined(__SUNPRO_C) || \
+    defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
 #if ARCH_X86_64
-#define cpuid(func, func2, ax, bx, cx, dx)\
-  asm volatile (\
-                "xchg %rsi, %rbx \n\t" \
-                "cpuid           \n\t" \
-                "movl %ebx, %edi \n\t" \
-                "xchg %rsi, %rbx \n\t" \
-                : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
-                : "a" (func), "c" (func2));
+#define cpuid(func, func2, ax, bx, cx, dx)     \
+  asm volatile(                                \
+      "xchg %rsi, %rbx \n\t"                   \
+      "cpuid           \n\t"                   \
+      "movl %ebx, %edi \n\t"                   \
+      "xchg %rsi, %rbx \n\t"                   \
+      : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
+      : "a"(func), "c"(func2));
 #else
-#define cpuid(func, func2, ax, bx, cx, dx)\
-  asm volatile (\
-                "pushl %ebx       \n\t" \
-                "cpuid            \n\t" \
-                "movl %ebx, %edi  \n\t" \
-                "popl %ebx        \n\t" \
-                : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
-                : "a" (func), "c" (func2));
+#define cpuid(func, func2, ax, bx, cx, dx)     \
+  asm volatile(                                \
+      "pushl %ebx       \n\t"                  \
+      "cpuid            \n\t"                  \
+      "movl %ebx, %edi  \n\t"                  \
+      "popl %ebx        \n\t"                  \
+      : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
+      : "a"(func), "c"(func2));
 #endif
 #else /* end __SUNPRO__ */
 #if ARCH_X86_64
 #if defined(_MSC_VER) && _MSC_VER > 1500
 void __cpuidex(int CPUInfo[4], int info_type, int ecxvalue);
 #pragma intrinsic(__cpuidex)
-#define cpuid(func, func2, a, b, c, d) do {\
-    int regs[4];\
-    __cpuidex(regs, func, func2); \
-    a = regs[0];  b = regs[1];  c = regs[2];  d = regs[3];\
-  } while(0)
+#define cpuid(func, func2, a, b, c, d) \
+  do {                                 \
+    int regs[4];                       \
+    __cpuidex(regs, func, func2);      \
+    a = regs[0];                       \
+    b = regs[1];                       \
+    c = regs[2];                       \
+    d = regs[3];                       \
+  } while (0)
 #else
 void __cpuid(int CPUInfo[4], int info_type);
 #pragma intrinsic(__cpuid)
-#define cpuid(func, func2, a, b, c, d) do {\
-    int regs[4];\
-    __cpuid(regs, func); \
-    a = regs[0];  b = regs[1];  c = regs[2];  d = regs[3];\
+#define cpuid(func, func2, a, b, c, d) \
+  do {                                 \
+    int regs[4];                       \
+    __cpuid(regs, func);               \
+    a = regs[0];                       \
+    b = regs[1];                       \
+    c = regs[2];                       \
+    d = regs[3];                       \
   } while (0)
 #endif
 #else
+/* clang-format off */
 #define cpuid(func, func2, a, b, c, d)\
   __asm mov eax, func\
   __asm mov ecx, func2\
@@ -103,6 +111,7 @@ void __cpuid(int CPUInfo[4], int info_type);
   __asm mov c, ecx\
   __asm mov d, edx
 #endif
+/* clang-format on */
 #endif /* end others */
 
 // NaCl has no support for xgetbv or the raw opcode.
@@ -111,13 +120,13 @@ static INLINE uint64_t xgetbv(void) {
   const uint32_t ecx = 0;
   uint32_t eax, edx;
   // Use the raw opcode for xgetbv for compatibility with older toolchains.
-  __asm__ volatile (
-    ".byte 0x0f, 0x01, 0xd0\n"
-    : "=a"(eax), "=d"(edx) : "c" (ecx));
+  __asm__ volatile(".byte 0x0f, 0x01, 0xd0\n"
+                   : "=a"(eax), "=d"(edx)
+                   : "c"(ecx));
   return ((uint64_t)edx << 32) | eax;
 }
-#elif (defined(_M_X64) || defined(_M_IX86)) && \
-      defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219  // >= VS2010 SP1
+#elif(defined(_M_X64) || defined(_M_IX86)) && defined(_MSC_FULL_VER) && \
+    _MSC_FULL_VER >= 160040219  // >= VS2010 SP1
 #include <immintrin.h>
 #define xgetbv() _xgetbv(0)
 #elif defined(_MSC_VER) && defined(_M_IX86)
@@ -143,20 +152,19 @@ static INLINE uint64_t xgetbv(void) {
 #endif
 #endif
 
-#define HAS_MMX     0x01
-#define HAS_SSE     0x02
-#define HAS_SSE2    0x04
-#define HAS_SSE3    0x08
-#define HAS_SSSE3   0x10
-#define HAS_SSE4_1  0x20
-#define HAS_AVX     0x40
-#define HAS_AVX2    0x80
+#define HAS_MMX 0x01
+#define HAS_SSE 0x02
+#define HAS_SSE2 0x04
+#define HAS_SSE3 0x08
+#define HAS_SSSE3 0x10
+#define HAS_SSE4_1 0x20
+#define HAS_AVX 0x40
+#define HAS_AVX2 0x80
 #ifndef BIT
-#define BIT(n) (1<<n)
+#define BIT(n) (1 << n)
 #endif
 
-static INLINE int
-x86_simd_caps(void) {
+static INLINE int x86_simd_caps(void) {
   unsigned int flags = 0;
   unsigned int mask = ~0;
   unsigned int max_cpuid_val, reg_eax, reg_ebx, reg_ecx, reg_edx;
@@ -166,19 +174,16 @@ x86_simd_caps(void) {
   /* See if the CPU capabilities are being overridden by the environment */
   env = getenv("VPX_SIMD_CAPS");
 
-  if (env && *env)
-    return (int)strtol(env, NULL, 0);
+  if (env && *env) return (int)strtol(env, NULL, 0);
 
   env = getenv("VPX_SIMD_CAPS_MASK");
 
-  if (env && *env)
-    mask = strtol(env, NULL, 0);
+  if (env && *env) mask = strtol(env, NULL, 0);
 
   /* Ensure that the CPUID instruction supports extended features */
   cpuid(0, 0, max_cpuid_val, reg_ebx, reg_ecx, reg_edx);
 
-  if (max_cpuid_val < 1)
-    return 0;
+  if (max_cpuid_val < 1) return 0;
 
   /* Get the standard feature flags */
   cpuid(1, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
@@ -216,91 +221,77 @@ x86_simd_caps(void) {
 unsigned __int64 __rdtsc(void);
 #pragma intrinsic(__rdtsc)
 #endif
-static INLINE unsigned int
-x86_readtsc(void) {
+static INLINE unsigned int x86_readtsc(void) {
 #if defined(__GNUC__) && __GNUC__
   unsigned int tsc;
-  __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
+  __asm__ __volatile__("rdtsc\n\t" : "=a"(tsc) :);
   return tsc;
 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
   unsigned int tsc;
-  asm volatile("rdtsc\n\t":"=a"(tsc):);
+  asm volatile("rdtsc\n\t" : "=a"(tsc) :);
   return tsc;
 #else
 #if ARCH_X86_64
   return (unsigned int)__rdtsc();
 #else
-  __asm  rdtsc;
+  __asm rdtsc;
 #endif
 #endif
 }
 
-
 #if defined(__GNUC__) && __GNUC__
-#define x86_pause_hint()\
-  __asm__ __volatile__ ("pause \n\t")
+#define x86_pause_hint() __asm__ __volatile__("pause \n\t")
 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
-#define x86_pause_hint()\
-  asm volatile ("pause \n\t")
+#define x86_pause_hint() asm volatile("pause \n\t")
 #else
 #if ARCH_X86_64
-#define x86_pause_hint()\
-  _mm_pause();
+#define x86_pause_hint() _mm_pause();
 #else
-#define x86_pause_hint()\
-  __asm pause
+#define x86_pause_hint() __asm pause
 #endif
 #endif
 
 #if defined(__GNUC__) && __GNUC__
-static void
-x87_set_control_word(unsigned short mode) {
+static void x87_set_control_word(unsigned short mode) {
   __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
 }
-static unsigned short
-x87_get_control_word(void) {
+static unsigned short x87_get_control_word(void) {
   unsigned short mode;
-  __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):);
-    return mode;
+  __asm__ __volatile__("fstcw %0\n\t" : "=m"(*&mode) :);
+  return mode;
 }
 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
-static void
-x87_set_control_word(unsigned short mode) {
+static void x87_set_control_word(unsigned short mode) {
   asm volatile("fldcw %0" : : "m"(*&mode));
 }
-static unsigned short
-x87_get_control_word(void) {
+static unsigned short x87_get_control_word(void) {
   unsigned short mode;
-  asm volatile("fstcw %0\n\t":"=m"(*&mode):);
+  asm volatile("fstcw %0\n\t" : "=m"(*&mode) :);
   return mode;
 }
 #elif ARCH_X86_64
 /* No fldcw intrinsics on Windows x64, punt to external asm */
-extern void           vpx_winx64_fldcw(unsigned short mode);
+extern void vpx_winx64_fldcw(unsigned short mode);
 extern unsigned short vpx_winx64_fstcw(void);
 #define x87_set_control_word vpx_winx64_fldcw
 #define x87_get_control_word vpx_winx64_fstcw
 #else
-static void
-x87_set_control_word(unsigned short mode) {
+static void x87_set_control_word(unsigned short mode) {
   __asm { fldcw mode }
 }
-static unsigned short
-x87_get_control_word(void) {
+static unsigned short x87_get_control_word(void) {
   unsigned short mode;
   __asm { fstcw mode }
   return mode;
 }
 #endif
 
-static INLINE unsigned int
-x87_set_double_precision(void) {
+static INLINE unsigned int x87_set_double_precision(void) {
   unsigned int mode = x87_get_control_word();
-  x87_set_control_word((mode&~0x300) | 0x200);
+  x87_set_control_word((mode & ~0x300) | 0x200);
   return mode;
 }
 
-
 extern void vpx_reset_mmx_state(void);
 
 #ifdef __cplusplus
diff --git a/vpx_scale/generic/gen_scalers.c b/vpx_scale/generic/gen_scalers.c
index e0e4591c7245196eead529c7d6aa65455700b5f7..5f391cb840dbc1431472b4e7e6d6efaae3f1825f 100644
--- a/vpx_scale/generic/gen_scalers.c
+++ b/vpx_scale/generic/gen_scalers.c
@@ -19,9 +19,10 @@
  *
  *
  *  INPUTS        : const unsigned char *source : Pointer to source data.
- *                  unsigned int source_width    : Stride of source.
+ *                  unsigned int source_width   : Stride of source.
  *                  unsigned char *dest         : Pointer to destination data.
- *                  unsigned int dest_width      : Stride of destination (NOT USED).
+ *                  unsigned int dest_width     : Stride of destination
+ *                                                (NOT USED).
  *
  *  OUTPUTS       : None.
  *
@@ -42,7 +43,7 @@ void vpx_horizontal_line_5_4_scale_c(const unsigned char *source,
   unsigned char *des = dest;
   const unsigned char *src = source;
 
-  (void) dest_width;
+  (void)dest_width;
 
   for (i = 0; i < source_width; i += 5) {
     a = src[0];
@@ -51,7 +52,7 @@ void vpx_horizontal_line_5_4_scale_c(const unsigned char *source,
     d = src[3];
     e = src[4];
 
-    des[0] = (unsigned char) a;
+    des[0] = (unsigned char)a;
     des[1] = (unsigned char)((b * 192 + c * 64 + 128) >> 8);
     des[2] = (unsigned char)((c * 128 + d * 128 + 128) >> 8);
     des[3] = (unsigned char)((d * 64 + e * 192 + 128) >> 8);
@@ -61,12 +62,8 @@ void vpx_horizontal_line_5_4_scale_c(const unsigned char *source,
   }
 }
 
-
-
-
 void vpx_vertical_band_5_4_scale_c(unsigned char *source,
-                                   unsigned int src_pitch,
-                                   unsigned char *dest,
+                                   unsigned int src_pitch, unsigned char *dest,
                                    unsigned int dest_pitch,
                                    unsigned int dest_width) {
   unsigned int i;
@@ -75,33 +72,31 @@ void vpx_vertical_band_5_4_scale_c(unsigned char *source,
   unsigned char *src = source;
 
   for (i = 0; i < dest_width; i++) {
-
     a = src[0 * src_pitch];
     b = src[1 * src_pitch];
     c = src[2 * src_pitch];
     d = src[3 * src_pitch];
     e = src[4 * src_pitch];
 
-    des[0 * dest_pitch] = (unsigned char) a;
+    des[0 * dest_pitch] = (unsigned char)a;
     des[1 * dest_pitch] = (unsigned char)((b * 192 + c * 64 + 128) >> 8);
     des[2 * dest_pitch] = (unsigned char)((c * 128 + d * 128 + 128) >> 8);
     des[3 * dest_pitch] = (unsigned char)((d * 64 + e * 192 + 128) >> 8);
 
     src++;
     des++;
-
   }
 }
 
-
 /*7***************************************************************************
  *
  *  ROUTINE       : vpx_horizontal_line_3_5_scale_c
  *
  *  INPUTS        : const unsigned char *source : Pointer to source data.
- *                  unsigned int source_width    : Stride of source.
+ *                  unsigned int source_width   : Stride of source.
  *                  unsigned char *dest         : Pointer to destination data.
- *                  unsigned int dest_width      : Stride of destination (NOT USED).
+ *                  unsigned int dest_width     : Stride of destination
+ *                                                (NOT USED).
  *
  *  OUTPUTS       : None.
  *
@@ -123,7 +118,7 @@ void vpx_horizontal_line_5_3_scale_c(const unsigned char *source,
   unsigned char *des = dest;
   const unsigned char *src = source;
 
-  (void) dest_width;
+  (void)dest_width;
 
   for (i = 0; i < source_width; i += 5) {
     a = src[0];
@@ -132,19 +127,17 @@ void vpx_horizontal_line_5_3_scale_c(const unsigned char *source,
     d = src[3];
     e = src[4];
 
-    des[0] = (unsigned char) a;
-    des[1] = (unsigned char)((b * 85  + c * 171 + 128) >> 8);
+    des[0] = (unsigned char)a;
+    des[1] = (unsigned char)((b * 85 + c * 171 + 128) >> 8);
     des[2] = (unsigned char)((d * 171 + e * 85 + 128) >> 8);
 
     src += 5;
     des += 3;
   }
-
 }
 
 void vpx_vertical_band_5_3_scale_c(unsigned char *source,
-                                   unsigned int src_pitch,
-                                   unsigned char *dest,
+                                   unsigned int src_pitch, unsigned char *dest,
                                    unsigned int dest_pitch,
                                    unsigned int dest_width) {
   unsigned int i;
@@ -153,20 +146,18 @@ void vpx_vertical_band_5_3_scale_c(unsigned char *source,
   unsigned char *src = source;
 
   for (i = 0; i < dest_width; i++) {
-
     a = src[0 * src_pitch];
     b = src[1 * src_pitch];
     c = src[2 * src_pitch];
     d = src[3 * src_pitch];
     e = src[4 * src_pitch];
 
-    des[0 * dest_pitch] = (unsigned char) a;
+    des[0 * dest_pitch] = (unsigned char)a;
     des[1 * dest_pitch] = (unsigned char)((b * 85 + c * 171 + 128) >> 8);
     des[2 * dest_pitch] = (unsigned char)((d * 171 + e * 85 + 128) >> 8);
 
     src++;
     des++;
-
   }
 }
 
@@ -175,9 +166,10 @@ void vpx_vertical_band_5_3_scale_c(unsigned char *source,
  *  ROUTINE       : vpx_horizontal_line_1_2_scale_c
  *
  *  INPUTS        : const unsigned char *source : Pointer to source data.
- *                  unsigned int source_width    : Stride of source.
+ *                  unsigned int source_width   : Stride of source.
  *                  unsigned char *dest         : Pointer to destination data.
- *                  unsigned int dest_width      : Stride of destination (NOT USED).
+ *                  unsigned int dest_width     : Stride of destination
+ *                                                (NOT USED).
  *
  *  OUTPUTS       : None.
  *
@@ -198,23 +190,22 @@ void vpx_horizontal_line_2_1_scale_c(const unsigned char *source,
   unsigned char *des = dest;
   const unsigned char *src = source;
 
-  (void) dest_width;
+  (void)dest_width;
 
   for (i = 0; i < source_width; i += 2) {
     a = src[0];
-    des [0] = (unsigned char)(a);
+    des[0] = (unsigned char)(a);
     src += 2;
     des += 1;
   }
 }
 
 void vpx_vertical_band_2_1_scale_c(unsigned char *source,
-                                   unsigned int src_pitch,
-                                   unsigned char *dest,
+                                   unsigned int src_pitch, unsigned char *dest,
                                    unsigned int dest_pitch,
                                    unsigned int dest_width) {
-  (void) dest_pitch;
-  (void) src_pitch;
+  (void)dest_pitch;
+  (void)src_pitch;
   memcpy(dest, source, dest_width);
 }
 
@@ -227,7 +218,7 @@ void vpx_vertical_band_2_1_scale_i_c(unsigned char *source,
   int temp;
   int width = dest_width;
 
-  (void) dest_pitch;
+  (void)dest_pitch;
 
   for (i = 0; i < width; i++) {
     temp = 8;
diff --git a/vpx_scale/generic/vpx_scale.c b/vpx_scale/generic/vpx_scale.c
index aaae4c7ad1f090719ae536b4c019e9397186ae7a..93bbd1deaffc81a85906ca1ad7663a15bb6d103c 100644
--- a/vpx_scale/generic/vpx_scale.c
+++ b/vpx_scale/generic/vpx_scale.c
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 /****************************************************************************
  *
  *   Module Title :     scale.c
@@ -26,8 +25,8 @@
 #include "vpx_scale/yv12config.h"
 
 typedef struct {
-  int     expanded_frame_width;
-  int     expanded_frame_height;
+  int expanded_frame_width;
+  int expanded_frame_height;
 
   int HScale;
   int HRatio;
@@ -44,13 +43,16 @@ typedef struct {
  *  ROUTINE       : scale1d_2t1_i
  *
  *  INPUTS        : const unsigned char *source : Pointer to data to be scaled.
- *                  int source_step              : Number of pixels to step on in source.
- *                  unsigned int source_scale    : Scale for source (UNUSED).
- *                  unsigned int source_length   : Length of source (UNUSED).
+ *                  int source_step             : Number of pixels to step on
+ *                                                in source.
+ *                  unsigned int source_scale   : Scale for source (UNUSED).
+ *                  unsigned int source_length  : Length of source (UNUSED).
  *                  unsigned char *dest         : Pointer to output data array.
- *                  int dest_step                : Number of pixels to step on in destination.
- *                  unsigned int dest_scale      : Scale for destination (UNUSED).
- *                  unsigned int dest_length     : Length of destination.
+ *                  int dest_step               : Number of pixels to step on
+ *                                                in destination.
+ *                  unsigned int dest_scale     : Scale for destination
+ *                                                (UNUSED).
+ *                  unsigned int dest_length    : Length of destination.
  *
  *  OUTPUTS       : None.
  *
@@ -61,29 +63,22 @@ typedef struct {
  *  SPECIAL NOTES : None.
  *
  ****************************************************************************/
-static
-void scale1d_2t1_i
-(
-  const unsigned char *source,
-  int source_step,
-  unsigned int source_scale,
-  unsigned int source_length,
-  unsigned char *dest,
-  int dest_step,
-  unsigned int dest_scale,
-  unsigned int dest_length
-) {
+static void scale1d_2t1_i(const unsigned char *source, int source_step,
+                          unsigned int source_scale, unsigned int source_length,
+                          unsigned char *dest, int dest_step,
+                          unsigned int dest_scale, unsigned int dest_length) {
   unsigned int i, j;
   unsigned int temp;
   int source_pitch = source_step;
-  (void) source_length;
-  (void) source_scale;
-  (void) dest_scale;
+  (void)source_length;
+  (void)source_scale;
+  (void)dest_scale;
 
   source_step *= 2;
   dest[0] = source[0];
 
-  for (i = dest_step, j = source_step; i < dest_length * dest_step; i += dest_step, j += source_step) {
+  for (i = dest_step, j = source_step; i < dest_length * dest_step;
+       i += dest_step, j += source_step) {
     temp = 8;
     temp += 3 * source[j - source_pitch];
     temp += 10 * source[j];
@@ -98,13 +93,16 @@ void scale1d_2t1_i
  *  ROUTINE       : scale1d_2t1_ps
  *
  *  INPUTS        : const unsigned char *source : Pointer to data to be scaled.
- *                  int source_step              : Number of pixels to step on in source.
- *                  unsigned int source_scale    : Scale for source (UNUSED).
- *                  unsigned int source_length   : Length of source (UNUSED).
+ *                  int source_step             : Number of pixels to step on
+ *                                                in source.
+ *                  unsigned int source_scale   : Scale for source (UNUSED).
+ *                  unsigned int source_length  : Length of source (UNUSED).
  *                  unsigned char *dest         : Pointer to output data array.
- *                  int dest_step                : Number of pixels to step on in destination.
- *                  unsigned int dest_scale      : Scale for destination (UNUSED).
- *                  unsigned int dest_length     : Length of destination.
+ *                  int dest_step               : Number of pixels to step on
+ *                                                in destination.
+ *                  unsigned int dest_scale     : Scale for destination
+ *                                                (UNUSED).
+ *                  unsigned int dest_length    : Length of destination.
  *
  *  OUTPUTS       : None.
  *
@@ -115,23 +113,16 @@ void scale1d_2t1_i
  *  SPECIAL NOTES : None.
  *
  ****************************************************************************/
-static
-void scale1d_2t1_ps
-(
-  const unsigned char *source,
-  int source_step,
-  unsigned int source_scale,
-  unsigned int source_length,
-  unsigned char *dest,
-  int dest_step,
-  unsigned int dest_scale,
-  unsigned int dest_length
-) {
+static void scale1d_2t1_ps(const unsigned char *source, int source_step,
+                           unsigned int source_scale,
+                           unsigned int source_length, unsigned char *dest,
+                           int dest_step, unsigned int dest_scale,
+                           unsigned int dest_length) {
   unsigned int i, j;
 
-  (void) source_length;
-  (void) source_scale;
-  (void) dest_scale;
+  (void)source_length;
+  (void)source_scale;
+  (void)dest_scale;
 
   source_step *= 2;
   j = 0;
@@ -144,13 +135,15 @@ void scale1d_2t1_ps
  *  ROUTINE       : scale1d_c
  *
  *  INPUTS        : const unsigned char *source : Pointer to data to be scaled.
- *                  int source_step              : Number of pixels to step on in source.
- *                  unsigned int source_scale    : Scale for source.
- *                  unsigned int source_length   : Length of source (UNUSED).
+ *                  int source_step             : Number of pixels to step on
+ *                                                in source.
+ *                  unsigned int source_scale   : Scale for source.
+ *                  unsigned int source_length  : Length of source (UNUSED).
  *                  unsigned char *dest         : Pointer to output data array.
- *                  int dest_step                : Number of pixels to step on in destination.
- *                  unsigned int dest_scale      : Scale for destination.
- *                  unsigned int dest_length     : Length of destination.
+ *                  int dest_step               : Number of pixels to step on
+ *                                                in destination.
+ *                  unsigned int dest_scale     : Scale for destination.
+ *                  unsigned int dest_length    : Length of destination.
  *
  *  OUTPUTS       : None.
  *
@@ -161,18 +154,10 @@ void scale1d_2t1_ps
  *  SPECIAL NOTES : None.
  *
  ****************************************************************************/
-static
-void scale1d_c
-(
-  const unsigned char *source,
-  int source_step,
-  unsigned int source_scale,
-  unsigned int source_length,
-  unsigned char *dest,
-  int dest_step,
-  unsigned int dest_scale,
-  unsigned int dest_length
-) {
+static void scale1d_c(const unsigned char *source, int source_step,
+                      unsigned int source_scale, unsigned int source_length,
+                      unsigned char *dest, int dest_step,
+                      unsigned int dest_scale, unsigned int dest_length) {
   unsigned int i;
   unsigned int round_value = dest_scale / 2;
   unsigned int left_modifier = dest_scale;
@@ -180,14 +165,17 @@ void scale1d_c
   unsigned char left_pixel = *source;
   unsigned char right_pixel = *(source + source_step);
 
-  (void) source_length;
+  (void)source_length;
 
   /* These asserts are needed if there are boundary issues... */
   /*assert ( dest_scale > source_scale );*/
-  /*assert ( (source_length-1) * dest_scale >= (dest_length-1) * source_scale );*/
+  /*assert ( (source_length-1) * dest_scale >= (dest_length-1) * source_scale
+   * );*/
 
   for (i = 0; i < dest_length * dest_step; i += dest_step) {
-    dest[i] = (char)((left_modifier * left_pixel + right_modifier * right_pixel + round_value) / dest_scale);
+    dest[i] = (char)((left_modifier * left_pixel +
+                      right_modifier * right_pixel + round_value) /
+                     dest_scale);
 
     right_modifier += source_scale;
 
@@ -206,21 +194,29 @@ void scale1d_c
  *
  *  ROUTINE       : Scale2D
  *
- *  INPUTS        : const unsigned char *source  : Pointer to data to be scaled.
- *                  int source_pitch              : Stride of source image.
- *                  unsigned int source_width     : Width of input image.
- *                  unsigned int source_height    : Height of input image.
- *                  unsigned char *dest          : Pointer to output data array.
- *                  int dest_pitch                : Stride of destination image.
- *                  unsigned int dest_width       : Width of destination image.
- *                  unsigned int dest_height      : Height of destination image.
- *                  unsigned char *temp_area      : Pointer to temp work area.
+ *  INPUTS        : const unsigned char *source    : Pointer to data to be
+ *                                                   scaled.
+ *                  int source_pitch               : Stride of source image.
+ *                  unsigned int source_width      : Width of input image.
+ *                  unsigned int source_height     : Height of input image.
+ *                  unsigned char *dest            : Pointer to output data
+ *                                                   array.
+ *                  int dest_pitch                 : Stride of destination
+ *                                                   image.
+ *                  unsigned int dest_width        : Width of destination image.
+ *                  unsigned int dest_height       : Height of destination
+ *                                                   image.
+ *                  unsigned char *temp_area       : Pointer to temp work area.
  *                  unsigned char temp_area_height : Height of temp work area.
- *                  unsigned int hscale          : Horizontal scale factor numerator.
- *                  unsigned int hratio          : Horizontal scale factor denominator.
- *                  unsigned int vscale          : Vertical scale factor numerator.
- *                  unsigned int vratio          : Vertical scale factor denominator.
- *                  unsigned int interlaced      : Interlace flag.
+ *                  unsigned int hscale            : Horizontal scale factor
+ *                                                   numerator.
+ *                  unsigned int hratio            : Horizontal scale factor
+ *                                                   denominator.
+ *                  unsigned int vscale            : Vertical scale factor
+ *                                                   numerator.
+ *                  unsigned int vratio            : Vertical scale factor
+ *                                                   denominator.
+ *                  unsigned int interlaced        : Interlace flag.
  *
  *  OUTPUTS       : None.
  *
@@ -232,48 +228,40 @@ void scale1d_c
  *                  caching.
  *
  ****************************************************************************/
-static
-void Scale2D
-(
-  /*const*/
-  unsigned char *source,
-  int source_pitch,
-  unsigned int source_width,
-  unsigned int source_height,
-  unsigned char *dest,
-  int dest_pitch,
-  unsigned int dest_width,
-  unsigned int dest_height,
-  unsigned char *temp_area,
-  unsigned char temp_area_height,
-  unsigned int hscale,
-  unsigned int hratio,
-  unsigned int vscale,
-  unsigned int vratio,
-  unsigned int interlaced
-) {
+static void Scale2D(
+    /*const*/
+    unsigned char *source, int source_pitch, unsigned int source_width,
+    unsigned int source_height, unsigned char *dest, int dest_pitch,
+    unsigned int dest_width, unsigned int dest_height, unsigned char *temp_area,
+    unsigned char temp_area_height, unsigned int hscale, unsigned int hratio,
+    unsigned int vscale, unsigned int vratio, unsigned int interlaced) {
   /*unsigned*/
   int i, j, k;
   int bands;
   int dest_band_height;
   int source_band_height;
 
-  typedef void (*Scale1D)(const unsigned char * source, int source_step, unsigned int source_scale, unsigned int source_length,
-                          unsigned char * dest, int dest_step, unsigned int dest_scale, unsigned int dest_length);
+  typedef void (*Scale1D)(const unsigned char *source, int source_step,
+                          unsigned int source_scale, unsigned int source_length,
+                          unsigned char *dest, int dest_step,
+                          unsigned int dest_scale, unsigned int dest_length);
 
   Scale1D Scale1Dv = scale1d_c;
   Scale1D Scale1Dh = scale1d_c;
 
-  void (*horiz_line_scale)(const unsigned char *, unsigned int, unsigned char *, unsigned int) = NULL;
-  void (*vert_band_scale)(unsigned char *, unsigned int, unsigned char *, unsigned int, unsigned int) = NULL;
+  void (*horiz_line_scale)(const unsigned char *, unsigned int, unsigned char *,
+                           unsigned int) = NULL;
+  void (*vert_band_scale)(unsigned char *, unsigned int, unsigned char *,
+                          unsigned int, unsigned int) = NULL;
 
   int ratio_scalable = 1;
   int interpolation = 0;
 
-  unsigned char *source_base; /* = (unsigned char *) ((source_pitch >= 0) ? source : (source + ((source_height-1) * source_pitch))); */
+  unsigned char *source_base; /* = (unsigned char *) ((source_pitch >= 0) ?
+                                 source : (source + ((source_height-1) *
+                                 source_pitch))); */
   unsigned char *line_src;
 
-
   source_base = (unsigned char *)source;
 
   if (source_pitch < 0) {
@@ -309,32 +297,30 @@ void Scale2D
   switch (vratio * 10 / vscale) {
     case 8:
       /* 4-5 Scale in vertical direction */
-      vert_band_scale     = vpx_vertical_band_5_4_scale;
-      source_band_height  = 5;
-      dest_band_height    = 4;
+      vert_band_scale = vpx_vertical_band_5_4_scale;
+      source_band_height = 5;
+      dest_band_height = 4;
       break;
     case 6:
       /* 3-5 Scale in vertical direction */
-      vert_band_scale     = vpx_vertical_band_5_3_scale;
-      source_band_height  = 5;
-      dest_band_height    = 3;
+      vert_band_scale = vpx_vertical_band_5_3_scale;
+      source_band_height = 5;
+      dest_band_height = 3;
       break;
     case 5:
       /* 1-2 Scale in vertical direction */
 
       if (interlaced) {
         /* if the content is interlaced, point sampling is used */
-        vert_band_scale     = vpx_vertical_band_2_1_scale;
+        vert_band_scale = vpx_vertical_band_2_1_scale;
       } else {
-
         interpolation = 1;
         /* if the content is progressive, interplo */
-        vert_band_scale     = vpx_vertical_band_2_1_scale_i;
-
+        vert_band_scale = vpx_vertical_band_2_1_scale_i;
       }
 
-      source_band_height  = 2;
-      dest_band_height    = 1;
+      source_band_height = 2;
+      dest_band_height = 1;
       break;
     default:
       /* The ratio is not acceptable now */
@@ -349,49 +335,50 @@ void Scale2D
       for (k = 0; k < (int)dest_height; k++) {
         horiz_line_scale(source, source_width, dest, dest_width);
         source += source_pitch;
-        dest   += dest_pitch;
+        dest += dest_pitch;
       }
 
       return;
     }
 
     if (interpolation) {
-      if (source < source_base)
-        source = source_base;
+      if (source < source_base) source = source_base;
 
       horiz_line_scale(source, source_width, temp_area, dest_width);
     }
 
-    for (k = 0; k < (int)(dest_height + dest_band_height - 1) / dest_band_height; k++) {
+    for (k = 0;
+         k < (int)(dest_height + dest_band_height - 1) / dest_band_height;
+         k++) {
       /* scale one band horizontally */
       for (i = 0; i < source_band_height; i++) {
         /* Trap case where we could read off the base of the source buffer */
 
         line_src = (unsigned char *)source + i * source_pitch;
 
-        if (line_src < source_base)
-          line_src = source_base;
+        if (line_src < source_base) line_src = source_base;
 
         horiz_line_scale(line_src, source_width,
-                         temp_area + (i + 1)*dest_pitch, dest_width);
+                         temp_area + (i + 1) * dest_pitch, dest_width);
       }
 
       /* Vertical scaling is in place */
-      vert_band_scale(temp_area + dest_pitch, dest_pitch, dest, dest_pitch, dest_width);
+      vert_band_scale(temp_area + dest_pitch, dest_pitch, dest, dest_pitch,
+                      dest_width);
 
       if (interpolation)
-        memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_width);
+        memcpy(temp_area, temp_area + source_band_height * dest_pitch,
+               dest_width);
 
       /* Next band... */
-      source += (unsigned long) source_band_height  * source_pitch;
-      dest   += (unsigned long) dest_band_height * dest_pitch;
+      source += (unsigned long)source_band_height * source_pitch;
+      dest += (unsigned long)dest_band_height * dest_pitch;
     }
 
     return;
   }
 
-  if (hscale == 2 && hratio == 1)
-    Scale1Dh = scale1d_2t1_ps;
+  if (hscale == 2 && hratio == 1) Scale1Dh = scale1d_2t1_ps;
 
   if (vscale == 2 && vratio == 1) {
     if (interlaced)
@@ -403,24 +390,27 @@ void Scale2D
   if (source_height == dest_height) {
     /* for each band of the image */
     for (k = 0; k < (int)dest_height; k++) {
-      Scale1Dh(source, 1, hscale, source_width + 1, dest, 1, hratio, dest_width);
+      Scale1Dh(source, 1, hscale, source_width + 1, dest, 1, hratio,
+               dest_width);
       source += source_pitch;
-      dest   += dest_pitch;
+      dest += dest_pitch;
     }
 
     return;
   }
 
   if (dest_height > source_height) {
-    dest_band_height   = temp_area_height - 1;
+    dest_band_height = temp_area_height - 1;
     source_band_height = dest_band_height * source_height / dest_height;
   } else {
     source_band_height = temp_area_height - 1;
-    dest_band_height   = source_band_height * vratio / vscale;
+    dest_band_height = source_band_height * vratio / vscale;
   }
 
-  /* first row needs to be done so that we can stay one row ahead for vertical zoom */
-  Scale1Dh(source, 1, hscale, source_width + 1, temp_area, 1, hratio, dest_width);
+  /* first row needs to be done so that we can stay one row ahead for vertical
+   * zoom */
+  Scale1Dh(source, 1, hscale, source_width + 1, temp_area, 1, hratio,
+           dest_width);
 
   /* for each band of the image */
   bands = (dest_height + dest_band_height - 1) / dest_band_height;
@@ -428,12 +418,13 @@ void Scale2D
   for (k = 0; k < bands; k++) {
     /* scale one band horizontally */
     for (i = 1; i < source_band_height + 1; i++) {
-      if (k * source_band_height + i < (int) source_height) {
+      if (k * source_band_height + i < (int)source_height) {
         Scale1Dh(source + i * source_pitch, 1, hscale, source_width + 1,
                  temp_area + i * dest_pitch, 1, hratio, dest_width);
       } else { /*  Duplicate the last row */
         /* copy temp_area row 0 over from last row in the past */
-        memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch);
+        memcpy(temp_area + i * dest_pitch, temp_area + (i - 1) * dest_pitch,
+               dest_pitch);
       }
     }
 
@@ -448,7 +439,7 @@ void Scale2D
 
     /* move to the next band */
     source += source_band_height * source_pitch;
-    dest   += dest_band_height * dest_pitch;
+    dest += dest_band_height * dest_pitch;
   }
 }
 
@@ -456,15 +447,21 @@ void Scale2D
  *
  *  ROUTINE       : vpx_scale_frame
  *
- *  INPUTS        : YV12_BUFFER_CONFIG *src       : Pointer to frame to be scaled.
- *                  YV12_BUFFER_CONFIG *dst       : Pointer to buffer to hold scaled frame.
- *                  unsigned char *temp_area      : Pointer to temp work area.
+ *  INPUTS        : YV12_BUFFER_CONFIG *src        : Pointer to frame to be
+ *                                                   scaled.
+ *                  YV12_BUFFER_CONFIG *dst        : Pointer to buffer to hold
+ *                                                   scaled frame.
+ *                  unsigned char *temp_area       : Pointer to temp work area.
  *                  unsigned char temp_area_height : Height of temp work area.
- *                  unsigned int hscale          : Horizontal scale factor numerator.
- *                  unsigned int hratio          : Horizontal scale factor denominator.
- *                  unsigned int vscale          : Vertical scale factor numerator.
- *                  unsigned int vratio          : Vertical scale factor denominator.
- *                  unsigned int interlaced      : Interlace flag.
+ *                  unsigned int hscale            : Horizontal scale factor
+ *                                                   numerator.
+ *                  unsigned int hratio            : Horizontal scale factor
+ *                                                   denominator.
+ *                  unsigned int vscale            : Vertical scale factor
+ *                                                   numerator.
+ *                  unsigned int vratio            : Vertical scale factor
+ *                                                   denominator.
+ *                  unsigned int interlaced        : Interlace flag.
  *
  *  OUTPUTS       : None.
  *
@@ -476,56 +473,59 @@ void Scale2D
  *                  caching.
  *
  ****************************************************************************/
-void vpx_scale_frame
-(
-  YV12_BUFFER_CONFIG *src,
-  YV12_BUFFER_CONFIG *dst,
-  unsigned char *temp_area,
-  unsigned char temp_height,
-  unsigned int hscale,
-  unsigned int hratio,
-  unsigned int vscale,
-  unsigned int vratio,
-  unsigned int interlaced
-) {
+void vpx_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
+                     unsigned char *temp_area, unsigned char temp_height,
+                     unsigned int hscale, unsigned int hratio,
+                     unsigned int vscale, unsigned int vratio,
+                     unsigned int interlaced) {
   int i;
   int dw = (hscale - 1 + src->y_width * hratio) / hscale;
   int dh = (vscale - 1 + src->y_height * vratio) / vscale;
 
   /* call our internal scaling routines!! */
-  Scale2D((unsigned char *) src->y_buffer, src->y_stride, src->y_width, src->y_height,
-          (unsigned char *) dst->y_buffer, dst->y_stride, dw, dh,
+  Scale2D((unsigned char *)src->y_buffer, src->y_stride, src->y_width,
+          src->y_height, (unsigned char *)dst->y_buffer, dst->y_stride, dw, dh,
           temp_area, temp_height, hscale, hratio, vscale, vratio, interlaced);
 
   if (dw < (int)dst->y_width)
     for (i = 0; i < dh; i++)
-      memset(dst->y_buffer + i * dst->y_stride + dw - 1, dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1);
+      memset(dst->y_buffer + i * dst->y_stride + dw - 1,
+             dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1);
 
   if (dh < (int)dst->y_height)
     for (i = dh - 1; i < (int)dst->y_height; i++)
-      memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
+      memcpy(dst->y_buffer + i * dst->y_stride,
+             dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
 
-  Scale2D((unsigned char *) src->u_buffer, src->uv_stride, src->uv_width, src->uv_height,
-          (unsigned char *) dst->u_buffer, dst->uv_stride, dw / 2, dh / 2,
-          temp_area, temp_height, hscale, hratio, vscale, vratio, interlaced);
+  Scale2D((unsigned char *)src->u_buffer, src->uv_stride, src->uv_width,
+          src->uv_height, (unsigned char *)dst->u_buffer, dst->uv_stride,
+          dw / 2, dh / 2, temp_area, temp_height, hscale, hratio, vscale,
+          vratio, interlaced);
 
   if (dw / 2 < (int)dst->uv_width)
     for (i = 0; i < dst->uv_height; i++)
-      memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1, dst->u_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
+      memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1,
+             dst->u_buffer[i * dst->uv_stride + dw / 2 - 2],
+             dst->uv_width - dw / 2 + 1);
 
   if (dh / 2 < (int)dst->uv_height)
     for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
-      memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
+      memcpy(dst->u_buffer + i * dst->uv_stride,
+             dst->u_buffer + (dh / 2 - 2) * dst->uv_stride, dst->uv_width);
 
-  Scale2D((unsigned char *) src->v_buffer, src->uv_stride, src->uv_width, src->uv_height,
-          (unsigned char *) dst->v_buffer, dst->uv_stride, dw / 2, dh / 2,
-          temp_area, temp_height, hscale, hratio, vscale, vratio, interlaced);
+  Scale2D((unsigned char *)src->v_buffer, src->uv_stride, src->uv_width,
+          src->uv_height, (unsigned char *)dst->v_buffer, dst->uv_stride,
+          dw / 2, dh / 2, temp_area, temp_height, hscale, hratio, vscale,
+          vratio, interlaced);
 
   if (dw / 2 < (int)dst->uv_width)
     for (i = 0; i < dst->uv_height; i++)
-      memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1, dst->v_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
+      memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1,
+             dst->v_buffer[i * dst->uv_stride + dw / 2 - 2],
+             dst->uv_width - dw / 2 + 1);
 
-  if (dh / 2 < (int) dst->uv_height)
+  if (dh / 2 < (int)dst->uv_height)
     for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
-      memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
+      memcpy(dst->v_buffer + i * dst->uv_stride,
+             dst->v_buffer + (dh / 2 - 2) * dst->uv_stride, dst->uv_width);
 }
diff --git a/vpx_scale/generic/yv12config.c b/vpx_scale/generic/yv12config.c
index c56ab0ede7f2faca356f51875b26f90e06e187b6..ecc2d76c7e30a53e829099da187e72ef20972a3f 100644
--- a/vpx_scale/generic/yv12config.c
+++ b/vpx_scale/generic/yv12config.c
@@ -22,10 +22,9 @@
  *
  ****************************************************************************/
 #define yv12_align_addr(addr, align) \
-    (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align))
+  (void *)(((size_t)(addr) + ((align)-1)) & (size_t) - (align))
 
-int
-vpx_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
+int vpx_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
   if (ybf) {
     // If libvpx is using frame buffer callbacks then buffer_alloc_sz must
     // not be set.
@@ -44,8 +43,8 @@ vpx_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
   return 0;
 }
 
-int vpx_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
-                                  int width, int height, int border) {
+int vpx_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width,
+                                  int height, int border) {
   if (ybf) {
     int aligned_width = (width + 15) & ~15;
     int aligned_height = (height + 15) & ~15;
@@ -64,20 +63,18 @@ int vpx_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
       ybf->buffer_alloc_sz = frame_size;
     }
 
-    if (!ybf->buffer_alloc || ybf->buffer_alloc_sz < frame_size)
-      return -1;
+    if (!ybf->buffer_alloc || ybf->buffer_alloc_sz < frame_size) return -1;
 
     /* Only support allocating buffers that have a border that's a multiple
      * of 32. The border restriction is required to get 16-byte alignment of
      * the start of the chroma rows without introducing an arbitrary gap
      * between planes, which would break the semantics of things like
      * vpx_img_set_rect(). */
-    if (border & 0x1f)
-      return -3;
+    if (border & 0x1f) return -3;
 
     ybf->y_crop_width = width;
     ybf->y_crop_height = height;
-    ybf->y_width  = aligned_width;
+    ybf->y_width = aligned_width;
     ybf->y_height = aligned_height;
     ybf->y_stride = y_stride;
 
@@ -95,8 +92,10 @@ int vpx_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
     ybf->frame_size = frame_size;
 
     ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border;
-    ybf->u_buffer = ybf->buffer_alloc + yplane_size + (border / 2  * uv_stride) + border / 2;
-    ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size + (border / 2  * uv_stride) + border / 2;
+    ybf->u_buffer =
+        ybf->buffer_alloc + yplane_size + (border / 2 * uv_stride) + border / 2;
+    ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size +
+                    (border / 2 * uv_stride) + border / 2;
     ybf->alpha_buffer = NULL;
 
     ybf->corrupted = 0; /* assume not currupted by errors */
@@ -105,8 +104,8 @@ int vpx_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
   return -2;
 }
 
-int vpx_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
-                                int width, int height, int border) {
+int vpx_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+                                int border) {
   if (ybf) {
     vpx_yv12_de_alloc_frame_buffer(ybf);
     return vpx_yv12_realloc_frame_buffer(ybf, width, height, border);
@@ -134,31 +133,28 @@ int vpx_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
   return 0;
 }
 
-int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
-                             int width, int height,
+int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                              int ss_x, int ss_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                              int use_highbitdepth,
 #endif
-                             int border,
-                             int byte_alignment,
+                             int border, int byte_alignment,
                              vpx_codec_frame_buffer_t *fb,
-                             vpx_get_frame_buffer_cb_fn_t cb,
-                             void *cb_priv) {
+                             vpx_get_frame_buffer_cb_fn_t cb, void *cb_priv) {
   if (ybf) {
     const int vpx_byte_align = (byte_alignment == 0) ? 1 : byte_alignment;
     const int aligned_width = (width + 7) & ~7;
     const int aligned_height = (height + 7) & ~7;
     const int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
-    const uint64_t yplane_size = (aligned_height + 2 * border) *
-                                 (uint64_t)y_stride + byte_alignment;
+    const uint64_t yplane_size =
+        (aligned_height + 2 * border) * (uint64_t)y_stride + byte_alignment;
     const int uv_width = aligned_width >> ss_x;
     const int uv_height = aligned_height >> ss_y;
     const int uv_stride = y_stride >> ss_x;
     const int uv_border_w = border >> ss_x;
     const int uv_border_h = border >> ss_y;
-    const uint64_t uvplane_size = (uv_height + 2 * uv_border_h) *
-                                  (uint64_t)uv_stride + byte_alignment;
+    const uint64_t uvplane_size =
+        (uv_height + 2 * uv_border_h) * (uint64_t)uv_stride + byte_alignment;
 
 #if CONFIG_ALPHA
     const int alpha_width = aligned_width;
@@ -166,14 +162,16 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
     const int alpha_stride = y_stride;
     const int alpha_border_w = border;
     const int alpha_border_h = border;
-    const uint64_t alpha_plane_size = (alpha_height + 2 * alpha_border_h) *
-                                      (uint64_t)alpha_stride + byte_alignment;
+    const uint64_t alpha_plane_size =
+        (alpha_height + 2 * alpha_border_h) * (uint64_t)alpha_stride +
+        byte_alignment;
 #if CONFIG_VPX_HIGHBITDEPTH
-    const uint64_t frame_size = (1 + use_highbitdepth) *
+    const uint64_t frame_size =
+        (1 + use_highbitdepth) *
         (yplane_size + 2 * uvplane_size + alpha_plane_size);
 #else
-    const uint64_t frame_size = yplane_size + 2 * uvplane_size +
-                                alpha_plane_size;
+    const uint64_t frame_size =
+        yplane_size + 2 * uvplane_size + alpha_plane_size;
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 #else
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -192,15 +190,12 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
 
       assert(fb != NULL);
 
-      if (external_frame_size != (size_t)external_frame_size)
-        return -1;
+      if (external_frame_size != (size_t)external_frame_size) return -1;
 
       // Allocation to hold larger frame, or first allocation.
-      if (cb(cb_priv, (size_t)external_frame_size, fb) < 0)
-        return -1;
+      if (cb(cb_priv, (size_t)external_frame_size, fb) < 0) return -1;
 
-      if (fb->data == NULL || fb->size < external_frame_size)
-        return -1;
+      if (fb->data == NULL || fb->size < external_frame_size) return -1;
 
       ybf->buffer_alloc = (uint8_t *)yv12_align_addr(fb->data, 32);
 
@@ -217,12 +212,10 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
       vpx_free(ybf->buffer_alloc);
       ybf->buffer_alloc = NULL;
 
-      if (frame_size != (size_t)frame_size)
-        return -1;
+      if (frame_size != (size_t)frame_size) return -1;
 
       ybf->buffer_alloc = (uint8_t *)vpx_memalign(32, (size_t)frame_size);
-      if (!ybf->buffer_alloc)
-        return -1;
+      if (!ybf->buffer_alloc) return -1;
 
       ybf->buffer_alloc_sz = (int)frame_size;
 
@@ -237,12 +230,11 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
      * the start of the chroma rows without introducing an arbitrary gap
      * between planes, which would break the semantics of things like
      * vpx_img_set_rect(). */
-    if (border & 0x1f)
-      return -3;
+    if (border & 0x1f) return -3;
 
     ybf->y_crop_width = width;
     ybf->y_crop_height = height;
-    ybf->y_width  = aligned_width;
+    ybf->y_width = aligned_width;
     ybf->y_height = aligned_height;
     ybf->y_stride = y_stride;
 
@@ -273,17 +265,19 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
     ybf->u_buffer = (uint8_t *)yv12_align_addr(
         buf + yplane_size + (uv_border_h * uv_stride) + uv_border_w,
         vpx_byte_align);
-    ybf->v_buffer = (uint8_t *)yv12_align_addr(
-        buf + yplane_size + uvplane_size + (uv_border_h * uv_stride) +
-        uv_border_w, vpx_byte_align);
+    ybf->v_buffer =
+        (uint8_t *)yv12_align_addr(buf + yplane_size + uvplane_size +
+                                       (uv_border_h * uv_stride) + uv_border_w,
+                                   vpx_byte_align);
 
 #if CONFIG_ALPHA
     ybf->alpha_width = alpha_width;
     ybf->alpha_height = alpha_height;
     ybf->alpha_stride = alpha_stride;
     ybf->alpha_buffer = (uint8_t *)yv12_align_addr(
-        buf + yplane_size + 2 * uvplane_size +
-        (alpha_border_h * alpha_stride) + alpha_border_w, vpx_byte_align);
+        buf + yplane_size + 2 * uvplane_size + (alpha_border_h * alpha_stride) +
+            alpha_border_w,
+        vpx_byte_align);
 #endif
     ybf->corrupted = 0; /* assume not corrupted by errors */
     return 0;
@@ -291,14 +285,12 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
   return -2;
 }
 
-int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
-                           int width, int height,
+int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                            int ss_x, int ss_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                            int use_highbitdepth,
 #endif
-                           int border,
-                           int byte_alignment) {
+                           int border, int byte_alignment) {
   if (ybf) {
     vpx_free_frame_buffer(ybf);
     return vpx_realloc_frame_buffer(ybf, width, height, ss_x, ss_y,
diff --git a/vpx_scale/generic/yv12extend.c b/vpx_scale/generic/yv12extend.c
index 4bdc8414a480b66fdf9f59e409e7a79b78b0aec4..ce7a287f0c8159fa6936c07dd3a154dbb70404fb 100644
--- a/vpx_scale/generic/yv12extend.c
+++ b/vpx_scale/generic/yv12extend.c
@@ -19,9 +19,8 @@
 #include "vp10/common/common.h"
 #endif
 
-static void extend_plane(uint8_t *const src, int src_stride,
-                         int width, int height,
-                         int extend_top, int extend_left,
+static void extend_plane(uint8_t *const src, int src_stride, int width,
+                         int height, int extend_top, int extend_left,
                          int extend_bottom, int extend_right) {
   int i;
   const int linesize = extend_left + extend_right + width;
@@ -61,9 +60,8 @@ static void extend_plane(uint8_t *const src, int src_stride,
 }
 
 #if CONFIG_VPX_HIGHBITDEPTH
-static void extend_plane_high(uint8_t *const src8, int src_stride,
-                              int width, int height,
-                              int extend_top, int extend_left,
+static void extend_plane_high(uint8_t *const src8, int src_stride, int width,
+                              int height, int extend_top, int extend_left,
                               int extend_bottom, int extend_right) {
   int i;
   const int linesize = extend_left + extend_right + width;
@@ -115,44 +113,35 @@ void vpx_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    extend_plane_high(
-        ybf->y_buffer, ybf->y_stride,
-        ybf->y_crop_width, ybf->y_crop_height,
-        ybf->border, ybf->border,
-        ybf->border + ybf->y_height - ybf->y_crop_height,
-        ybf->border + ybf->y_width - ybf->y_crop_width);
-
-    extend_plane_high(
-        ybf->u_buffer, ybf->uv_stride,
-        ybf->uv_crop_width, ybf->uv_crop_height,
-        uv_border, uv_border,
-        uv_border + ybf->uv_height - ybf->uv_crop_height,
-        uv_border + ybf->uv_width - ybf->uv_crop_width);
-
-    extend_plane_high(
-        ybf->v_buffer, ybf->uv_stride,
-        ybf->uv_crop_width, ybf->uv_crop_height,
-        uv_border, uv_border,
-        uv_border + ybf->uv_height - ybf->uv_crop_height,
-        uv_border + ybf->uv_width - ybf->uv_crop_width);
+    extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
+                      ybf->y_crop_height, ybf->border, ybf->border,
+                      ybf->border + ybf->y_height - ybf->y_crop_height,
+                      ybf->border + ybf->y_width - ybf->y_crop_width);
+
+    extend_plane_high(ybf->u_buffer, ybf->uv_stride, ybf->uv_crop_width,
+                      ybf->uv_crop_height, uv_border, uv_border,
+                      uv_border + ybf->uv_height - ybf->uv_crop_height,
+                      uv_border + ybf->uv_width - ybf->uv_crop_width);
+
+    extend_plane_high(ybf->v_buffer, ybf->uv_stride, ybf->uv_crop_width,
+                      ybf->uv_crop_height, uv_border, uv_border,
+                      uv_border + ybf->uv_height - ybf->uv_crop_height,
+                      uv_border + ybf->uv_width - ybf->uv_crop_width);
     return;
   }
 #endif
-  extend_plane(ybf->y_buffer, ybf->y_stride,
-               ybf->y_crop_width, ybf->y_crop_height,
-               ybf->border, ybf->border,
+  extend_plane(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
+               ybf->y_crop_height, ybf->border, ybf->border,
                ybf->border + ybf->y_height - ybf->y_crop_height,
                ybf->border + ybf->y_width - ybf->y_crop_width);
 
-  extend_plane(ybf->u_buffer, ybf->uv_stride,
-               ybf->uv_crop_width, ybf->uv_crop_height,
-               uv_border, uv_border,
+  extend_plane(ybf->u_buffer, ybf->uv_stride, ybf->uv_crop_width,
+               ybf->uv_crop_height, uv_border, uv_border,
                uv_border + ybf->uv_height - ybf->uv_crop_height,
                uv_border + ybf->uv_width - ybf->uv_crop_width);
 
-  extend_plane(ybf->v_buffer, ybf->uv_stride,
-               ybf->uv_crop_width, ybf->uv_crop_height,
-               uv_border, uv_border,
+  extend_plane(ybf->v_buffer, ybf->uv_stride, ybf->uv_crop_width,
+               ybf->uv_crop_height, uv_border, uv_border,
                uv_border + ybf->uv_height - ybf->uv_crop_height,
                uv_border + ybf->uv_width - ybf->uv_crop_width);
 }
@@ -175,29 +164,25 @@ static void extend_frame(YV12_BUFFER_CONFIG *const ybf, int ext_size) {
 
 #if CONFIG_VPX_HIGHBITDEPTH
   if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    extend_plane_high(ybf->y_buffer, ybf->y_stride,
-                      ybf->y_crop_width, ybf->y_crop_height,
-                      ext_size, ext_size,
+    extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
+                      ybf->y_crop_height, ext_size, ext_size,
                       ext_size + ybf->y_height - ybf->y_crop_height,
                       ext_size + ybf->y_width - ybf->y_crop_width);
-    extend_plane_high(ybf->u_buffer, ybf->uv_stride,
-                      c_w, c_h, c_et, c_el, c_eb, c_er);
-    extend_plane_high(ybf->v_buffer, ybf->uv_stride,
-                      c_w, c_h, c_et, c_el, c_eb, c_er);
+    extend_plane_high(ybf->u_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb,
+                      c_er);
+    extend_plane_high(ybf->v_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb,
+                      c_er);
     return;
   }
 #endif
-  extend_plane(ybf->y_buffer, ybf->y_stride,
-               ybf->y_crop_width, ybf->y_crop_height,
-               ext_size, ext_size,
+  extend_plane(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
+               ybf->y_crop_height, ext_size, ext_size,
                ext_size + ybf->y_height - ybf->y_crop_height,
                ext_size + ybf->y_width - ybf->y_crop_width);
 
-  extend_plane(ybf->u_buffer, ybf->uv_stride,
-               c_w, c_h, c_et, c_el, c_eb, c_er);
+  extend_plane(ybf->u_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er);
 
-  extend_plane(ybf->v_buffer, ybf->uv_stride,
-               c_w, c_h, c_et, c_el, c_eb, c_er);
+  extend_plane(ybf->v_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er);
 }
 
 void vpx_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
@@ -205,8 +190,9 @@ void vpx_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
 }
 
 void vpx_extend_frame_inner_borders_c(YV12_BUFFER_CONFIG *ybf) {
-  const int inner_bw = (ybf->border > VPXINNERBORDERINPIXELS) ?
-                       VPXINNERBORDERINPIXELS : ybf->border;
+  const int inner_bw = (ybf->border > VPXINNERBORDERINPIXELS)
+                           ? VPXINNERBORDERINPIXELS
+                           : ybf->border;
   extend_frame(ybf, inner_bw);
 }
 
diff --git a/vpx_scale/mips/dspr2/yv12extend_dspr2.c b/vpx_scale/mips/dspr2/yv12extend_dspr2.c
index 363c7fdf0d429e4dc30f007b0309a4571c6ff73c..8a76c60faff56fe2aea9626b6fee414a065907fa 100644
--- a/vpx_scale/mips/dspr2/yv12extend_dspr2.c
+++ b/vpx_scale/mips/dspr2/yv12extend_dspr2.c
@@ -16,69 +16,65 @@
 #include "vpx_scale/vpx_scale.h"
 
 #if HAVE_DSPR2
-static void extend_plane(uint8_t *const src, int src_stride,
-                         int width, int height,
-                         int extend_top, int extend_left,
+static void extend_plane(uint8_t *const src, int src_stride, int width,
+                         int height, int extend_top, int extend_left,
                          int extend_bottom, int extend_right) {
-  int       i, j;
-  uint8_t   *left_src, *right_src;
-  uint8_t   *left_dst_start, *right_dst_start;
-  uint8_t   *left_dst, *right_dst;
-  uint8_t   *top_src, *bot_src;
-  uint8_t   *top_dst, *bot_dst;
-  uint32_t  left_pix;
-  uint32_t  right_pix;
-  uint32_t  linesize;
+  int i, j;
+  uint8_t *left_src, *right_src;
+  uint8_t *left_dst_start, *right_dst_start;
+  uint8_t *left_dst, *right_dst;
+  uint8_t *top_src, *bot_src;
+  uint8_t *top_dst, *bot_dst;
+  uint32_t left_pix;
+  uint32_t right_pix;
+  uint32_t linesize;
 
   /* copy the left and right most columns out */
-  left_src  = src;
+  left_src = src;
   right_src = src + width - 1;
   left_dst_start = src - extend_left;
   right_dst_start = src + width;
 
-  for (i = height; i--; ) {
-    left_dst  = left_dst_start;
+  for (i = height; i--;) {
+    left_dst = left_dst_start;
     right_dst = right_dst_start;
 
-    __asm__ __volatile__ (
+    __asm__ __volatile__(
         "lb        %[left_pix],     0(%[left_src])      \n\t"
         "lb        %[right_pix],    0(%[right_src])     \n\t"
         "replv.qb  %[left_pix],     %[left_pix]         \n\t"
         "replv.qb  %[right_pix],    %[right_pix]        \n\t"
 
-        : [left_pix] "=&r" (left_pix), [right_pix] "=&r" (right_pix)
-        : [left_src] "r" (left_src), [right_src] "r" (right_src)
-    );
+        : [left_pix] "=&r"(left_pix), [right_pix] "=&r"(right_pix)
+        : [left_src] "r"(left_src), [right_src] "r"(right_src));
 
-    for (j = extend_left/4; j--; ) {
-      __asm__ __volatile__ (
-        "sw     %[left_pix],    0(%[left_dst])     \n\t"
-        "sw     %[right_pix],   0(%[right_dst])    \n\t"
+    for (j = extend_left / 4; j--;) {
+      __asm__ __volatile__(
+          "sw     %[left_pix],    0(%[left_dst])     \n\t"
+          "sw     %[right_pix],   0(%[right_dst])    \n\t"
 
-        :
-        : [left_dst] "r" (left_dst), [left_pix] "r" (left_pix),
-          [right_dst] "r" (right_dst), [right_pix] "r" (right_pix)
-      );
+          :
+          : [left_dst] "r"(left_dst), [left_pix] "r"(left_pix),
+            [right_dst] "r"(right_dst), [right_pix] "r"(right_pix));
 
       left_dst += 4;
       right_dst += 4;
     }
 
-    for (j = extend_left%4; j--; ) {
-      __asm__ __volatile__ (
-        "sb     %[left_pix],    0(%[left_dst])     \n\t"
-        "sb     %[right_pix],   0(%[right_dst])     \n\t"
+    for (j = extend_left % 4; j--;) {
+      __asm__ __volatile__(
+          "sb     %[left_pix],    0(%[left_dst])     \n\t"
+          "sb     %[right_pix],   0(%[right_dst])     \n\t"
 
-        :
-        : [left_dst] "r" (left_dst), [left_pix] "r" (left_pix),
-          [right_dst] "r" (right_dst), [right_pix] "r" (right_pix)
-      );
+          :
+          : [left_dst] "r"(left_dst), [left_pix] "r"(left_pix),
+            [right_dst] "r"(right_dst), [right_pix] "r"(right_pix));
 
       left_dst += 1;
       right_dst += 1;
     }
 
-    left_src  += src_stride;
+    left_src += src_stride;
     right_src += src_stride;
     left_dst_start += src_stride;
     right_dst_start += src_stride;
@@ -90,7 +86,7 @@ static void extend_plane(uint8_t *const src, int src_stride,
   top_src = src - extend_left;
   bot_src = src + src_stride * (height - 1) - extend_left;
   top_dst = src + src_stride * (-extend_top) - extend_left;
-  bot_dst = src + src_stride * (height) - extend_left;
+  bot_dst = src + src_stride * (height)-extend_left;
   linesize = extend_left + extend_right + width;
 
   for (i = 0; i < extend_top; i++) {
@@ -119,17 +115,14 @@ static void extend_frame(YV12_BUFFER_CONFIG *const ybf, int ext_size) {
   assert(ybf->y_height - ybf->y_crop_height >= 0);
   assert(ybf->y_width - ybf->y_crop_width >= 0);
 
-  extend_plane(ybf->y_buffer, ybf->y_stride,
-               ybf->y_crop_width, ybf->y_crop_height,
-               ext_size, ext_size,
+  extend_plane(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
+               ybf->y_crop_height, ext_size, ext_size,
                ext_size + ybf->y_height - ybf->y_crop_height,
                ext_size + ybf->y_width - ybf->y_crop_width);
 
-  extend_plane(ybf->u_buffer, ybf->uv_stride,
-               c_w, c_h, c_et, c_el, c_eb, c_er);
+  extend_plane(ybf->u_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er);
 
-  extend_plane(ybf->v_buffer, ybf->uv_stride,
-               c_w, c_h, c_et, c_el, c_eb, c_er);
+  extend_plane(ybf->v_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er);
 }
 
 void vpx_extend_frame_borders_dspr2(YV12_BUFFER_CONFIG *ybf) {
@@ -137,8 +130,9 @@ void vpx_extend_frame_borders_dspr2(YV12_BUFFER_CONFIG *ybf) {
 }
 
 void vpx_extend_frame_inner_borders_dspr2(YV12_BUFFER_CONFIG *ybf) {
-  const int inner_bw = (ybf->border > VPXINNERBORDERINPIXELS) ?
-                       VPXINNERBORDERINPIXELS : ybf->border;
+  const int inner_bw = (ybf->border > VPXINNERBORDERINPIXELS)
+                           ? VPXINNERBORDERINPIXELS
+                           : ybf->border;
   extend_frame(ybf, inner_bw);
 }
 #endif
diff --git a/vpx_scale/vpx_scale.h b/vpx_scale/vpx_scale.h
index 43fcf9d66e18fbc8e87d1a2239b8bd2ec813e6da..478a4834610cf9bdf428c75da915452ff7d1a0f6 100644
--- a/vpx_scale/vpx_scale.h
+++ b/vpx_scale/vpx_scale.h
@@ -8,20 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 #ifndef VPX_SCALE_VPX_SCALE_H_
 #define VPX_SCALE_VPX_SCALE_H_
 
 #include "vpx_scale/yv12config.h"
 
-extern void vpx_scale_frame(YV12_BUFFER_CONFIG *src,
-                            YV12_BUFFER_CONFIG *dst,
-                            unsigned char *temp_area,
-                            unsigned char temp_height,
-                            unsigned int hscale,
-                            unsigned int hratio,
-                            unsigned int vscale,
-                            unsigned int vratio,
+extern void vpx_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
+                            unsigned char *temp_area, unsigned char temp_height,
+                            unsigned int hscale, unsigned int hratio,
+                            unsigned int vscale, unsigned int vratio,
                             unsigned int interlaced);
 
 #endif  // VPX_SCALE_VPX_SCALE_H_
diff --git a/vpx_scale/vpx_scale_rtcd.c b/vpx_scale/vpx_scale_rtcd.c
index bea603fd1046a8acd98df9ce9d78d80ea437d8b6..dc4d9593a8c75e7bc1fd62fcc21b60b9b4288e7e 100644
--- a/vpx_scale/vpx_scale_rtcd.c
+++ b/vpx_scale/vpx_scale_rtcd.c
@@ -12,7 +12,4 @@
 #include "./vpx_scale_rtcd.h"
 #include "vpx_ports/vpx_once.h"
 
-void vpx_scale_rtcd()
-{
-    once(setup_rtcd_internal);
-}
+void vpx_scale_rtcd() { once(setup_rtcd_internal); }
diff --git a/vpx_scale/yv12config.h b/vpx_scale/yv12config.h
index ad1b32af934b19fe8b1d24f1ef327ed94a715204..68544d4621b3569159f932c92e799ac4e5436c0a 100644
--- a/vpx_scale/yv12config.h
+++ b/vpx_scale/yv12config.h
@@ -20,28 +20,28 @@ extern "C" {
 #include "vpx/vpx_frame_buffer.h"
 #include "vpx/vpx_integer.h"
 
-#define VP8BORDERINPIXELS           32
-#define VPXINNERBORDERINPIXELS      96
-#define VPX_INTERP_EXTEND           4
-#define VPX_ENC_BORDER_IN_PIXELS    160
-#define VPX_DEC_BORDER_IN_PIXELS    32
+#define VP8BORDERINPIXELS 32
+#define VPXINNERBORDERINPIXELS 96
+#define VPX_INTERP_EXTEND 4
+#define VPX_ENC_BORDER_IN_PIXELS 160
+#define VPX_DEC_BORDER_IN_PIXELS 32
 
 typedef struct yv12_buffer_config {
-  int   y_width;
-  int   y_height;
-  int   y_crop_width;
-  int   y_crop_height;
-  int   y_stride;
-
-  int   uv_width;
-  int   uv_height;
-  int   uv_crop_width;
-  int   uv_crop_height;
-  int   uv_stride;
-
-  int   alpha_width;
-  int   alpha_height;
-  int   alpha_stride;
+  int y_width;
+  int y_height;
+  int y_crop_width;
+  int y_crop_height;
+  int y_stride;
+
+  int uv_width;
+  int uv_height;
+  int uv_crop_width;
+  int uv_crop_height;
+  int uv_stride;
+
+  int alpha_width;
+  int alpha_height;
+  int alpha_stride;
 
   uint8_t *y_buffer;
   uint8_t *u_buffer;
@@ -66,14 +66,14 @@ typedef struct yv12_buffer_config {
 
 #define YV12_FLAG_HIGHBITDEPTH 8
 
-int vpx_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
-                                int width, int height, int border);
-int vpx_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
-                                  int width, int height, int border);
+int vpx_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+                                int border);
+int vpx_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width,
+                                  int height, int border);
 int vpx_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf);
 
-int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
-                           int width, int height, int ss_x, int ss_y,
+int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+                           int ss_x, int ss_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                            int use_highbitdepth,
 #endif
@@ -86,16 +86,14 @@ int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
 // to decode the current frame. If cb is NULL, libvpx will allocate memory
 // internally to decode the current frame. Returns 0 on success. Returns < 0
 // on failure.
-int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
-                             int width, int height, int ss_x, int ss_y,
+int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
+                             int ss_x, int ss_y,
 #if CONFIG_VPX_HIGHBITDEPTH
                              int use_highbitdepth,
 #endif
-                             int border,
-                             int byte_alignment,
+                             int border, int byte_alignment,
                              vpx_codec_frame_buffer_t *fb,
-                             vpx_get_frame_buffer_cb_fn_t cb,
-                             void *cb_priv);
+                             vpx_get_frame_buffer_cb_fn_t cb, void *cb_priv);
 int vpx_free_frame_buffer(YV12_BUFFER_CONFIG *ybf);
 
 #ifdef __cplusplus
diff --git a/vpx_util/endian_inl.h b/vpx_util/endian_inl.h
index 37bdce1ccd96e558caa76e739a85ae3b3623c546..dc387740958e85d4d8bd02362501e1356c35cbc8 100644
--- a/vpx_util/endian_inl.h
+++ b/vpx_util/endian_inl.h
@@ -17,21 +17,20 @@
 #include "vpx/vpx_integer.h"
 
 #if defined(__GNUC__)
-# define LOCAL_GCC_VERSION ((__GNUC__ << 8) | __GNUC_MINOR__)
-# define LOCAL_GCC_PREREQ(maj, min) \
-    (LOCAL_GCC_VERSION >= (((maj) << 8) | (min)))
+#define LOCAL_GCC_VERSION ((__GNUC__ << 8) | __GNUC_MINOR__)
+#define LOCAL_GCC_PREREQ(maj, min) (LOCAL_GCC_VERSION >= (((maj) << 8) | (min)))
 #else
-# define LOCAL_GCC_VERSION 0
-# define LOCAL_GCC_PREREQ(maj, min) 0
+#define LOCAL_GCC_VERSION 0
+#define LOCAL_GCC_PREREQ(maj, min) 0
 #endif
 
 // handle clang compatibility
 #ifndef __has_builtin
-# define __has_builtin(x) 0
+#define __has_builtin(x) 0
 #endif
 
 // some endian fix (e.g.: mips-gcc doesn't define __BIG_ENDIAN__)
-#if !defined(WORDS_BIGENDIAN) && \
+#if !defined(WORDS_BIGENDIAN) &&                   \
     (defined(__BIG_ENDIAN__) || defined(_M_PPC) || \
      (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)))
 #define WORDS_BIGENDIAN
@@ -80,12 +79,11 @@ static INLINE uint16_t BSwap16(uint16_t x) {
 static INLINE uint32_t BSwap32(uint32_t x) {
 #if defined(VPX_USE_MIPS32_R2)
   uint32_t ret;
-  __asm__ volatile (
-    "wsbh   %[ret], %[x]          \n\t"
-    "rotr   %[ret], %[ret],  16   \n\t"
-    : [ret]"=r"(ret)
-    : [x]"r"(x)
-  );
+  __asm__ volatile(
+      "wsbh   %[ret], %[x]          \n\t"
+      "rotr   %[ret], %[ret],  16   \n\t"
+      : [ret] "=r"(ret)
+      : [x] "r"(x));
   return ret;
 #elif defined(HAVE_BUILTIN_BSWAP32)
   return __builtin_bswap32(x);
@@ -109,10 +107,10 @@ static INLINE uint64_t BSwap64(uint64_t x) {
   return swapped_bytes;
 #elif defined(_MSC_VER)
   return (uint64_t)_byteswap_uint64(x);
-#else  // generic code for swapping 64-bit values (suggested by bdb@)
+#else   // generic code for swapping 64-bit values (suggested by bdb@)
   x = ((x & 0xffffffff00000000ull) >> 32) | ((x & 0x00000000ffffffffull) << 32);
   x = ((x & 0xffff0000ffff0000ull) >> 16) | ((x & 0x0000ffff0000ffffull) << 16);
-  x = ((x & 0xff00ff00ff00ff00ull) >>  8) | ((x & 0x00ff00ff00ff00ffull) <<  8);
+  x = ((x & 0xff00ff00ff00ff00ull) >> 8) | ((x & 0x00ff00ff00ff00ffull) << 8);
   return x;
 #endif  // HAVE_BUILTIN_BSWAP64
 }
diff --git a/vpx_util/vpx_thread.c b/vpx_util/vpx_thread.c
index 0bb0125bd49ecfb0bb94c886ab78befe539e3bcc..58054c835363c163a7739bc95725fa7065a8b99c 100644
--- a/vpx_util/vpx_thread.c
+++ b/vpx_util/vpx_thread.c
@@ -14,7 +14,7 @@
 //  100644 blob 264210ba2807e4da47eb5d18c04cf869d89b9784  src/utils/thread.c
 
 #include <assert.h>
-#include <string.h>   // for memset()
+#include <string.h>  // for memset()
 #include "./vpx_thread.h"
 #include "vpx_mem/vpx_mem.h"
 
@@ -22,8 +22,8 @@
 
 struct VPxWorkerImpl {
   pthread_mutex_t mutex_;
-  pthread_cond_t  condition_;
-  pthread_t       thread_;
+  pthread_cond_t condition_;
+  pthread_t thread_;
 };
 
 //------------------------------------------------------------------------------
@@ -31,29 +31,28 @@ struct VPxWorkerImpl {
 static void execute(VPxWorker *const worker);  // Forward declaration.
 
 static THREADFN thread_loop(void *ptr) {
-  VPxWorker *const worker = (VPxWorker*)ptr;
+  VPxWorker *const worker = (VPxWorker *)ptr;
   int done = 0;
   while (!done) {
     pthread_mutex_lock(&worker->impl_->mutex_);
-    while (worker->status_ == OK) {   // wait in idling mode
+    while (worker->status_ == OK) {  // wait in idling mode
       pthread_cond_wait(&worker->impl_->condition_, &worker->impl_->mutex_);
     }
     if (worker->status_ == WORK) {
       execute(worker);
       worker->status_ = OK;
-    } else if (worker->status_ == NOT_OK) {   // finish the worker
+    } else if (worker->status_ == NOT_OK) {  // finish the worker
       done = 1;
     }
     // signal to the main thread that we're done (for sync())
     pthread_cond_signal(&worker->impl_->condition_);
     pthread_mutex_unlock(&worker->impl_->mutex_);
   }
-  return THREAD_RETURN(NULL);    // Thread is finished
+  return THREAD_RETURN(NULL);  // Thread is finished
 }
 
 // main thread state control
-static void change_state(VPxWorker *const worker,
-                         VPxWorkerStatus new_status) {
+static void change_state(VPxWorker *const worker, VPxWorkerStatus new_status) {
   // No-op when attempting to change state on a thread that didn't come up.
   // Checking status_ without acquiring the lock first would result in a data
   // race.
@@ -96,7 +95,7 @@ static int reset(VPxWorker *const worker) {
   worker->had_error = 0;
   if (worker->status_ < OK) {
 #if CONFIG_MULTITHREAD
-    worker->impl_ = (VPxWorkerImpl*)vpx_calloc(1, sizeof(*worker->impl_));
+    worker->impl_ = (VPxWorkerImpl *)vpx_calloc(1, sizeof(*worker->impl_));
     if (worker->impl_ == NULL) {
       return 0;
     }
@@ -114,7 +113,7 @@ static int reset(VPxWorker *const worker) {
     if (!ok) {
       pthread_mutex_destroy(&worker->impl_->mutex_);
       pthread_cond_destroy(&worker->impl_->condition_);
- Error:
+    Error:
       vpx_free(worker->impl_);
       worker->impl_ = NULL;
       return 0;
@@ -162,15 +161,14 @@ static void end(VPxWorker *const worker) {
 
 //------------------------------------------------------------------------------
 
-static VPxWorkerInterface g_worker_interface = {
-  init, reset, sync, launch, execute, end
-};
+static VPxWorkerInterface g_worker_interface = { init,   reset,   sync,
+                                                 launch, execute, end };
 
-int vpx_set_worker_interface(const VPxWorkerInterface* const winterface) {
-  if (winterface == NULL ||
-      winterface->init == NULL || winterface->reset == NULL ||
-      winterface->sync == NULL || winterface->launch == NULL ||
-      winterface->execute == NULL || winterface->end == NULL) {
+int vpx_set_worker_interface(const VPxWorkerInterface *const winterface) {
+  if (winterface == NULL || winterface->init == NULL ||
+      winterface->reset == NULL || winterface->sync == NULL ||
+      winterface->launch == NULL || winterface->execute == NULL ||
+      winterface->end == NULL) {
     return 0;
   }
   g_worker_interface = *winterface;
diff --git a/vpx_util/vpx_thread.h b/vpx_util/vpx_thread.h
index de63c4da002e10db58701d2436494ce16cb9d560..848fe693ffaf2ce4b9b2ec517958933e18087d06 100644
--- a/vpx_util/vpx_thread.h
+++ b/vpx_util/vpx_thread.h
@@ -29,7 +29,7 @@ extern "C" {
 #if CONFIG_MULTITHREAD
 
 #if defined(_WIN32) && !HAVE_PTHREAD_H
-#include <errno.h>  // NOLINT
+#include <errno.h>    // NOLINT
 #include <process.h>  // NOLINT
 #include <windows.h>  // NOLINT
 typedef HANDLE pthread_t;
@@ -47,22 +47,20 @@ typedef struct {
 #define THREADFN unsigned int __stdcall
 #define THREAD_RETURN(val) (unsigned int)((DWORD_PTR)val)
 
-static INLINE int pthread_create(pthread_t* const thread, const void* attr,
-                                 unsigned int (__stdcall *start)(void*),
-                                 void* arg) {
+static INLINE int pthread_create(pthread_t *const thread, const void *attr,
+                                 unsigned int(__stdcall *start)(void *),
+                                 void *arg) {
   (void)attr;
-  *thread = (pthread_t)_beginthreadex(NULL,   /* void *security */
-                                      0,      /* unsigned stack_size */
-                                      start,
-                                      arg,
-                                      0,      /* unsigned initflag */
-                                      NULL);  /* unsigned *thrdaddr */
+  *thread = (pthread_t)_beginthreadex(NULL,          /* void *security */
+                                      0,             /* unsigned stack_size */
+                                      start, arg, 0, /* unsigned initflag */
+                                      NULL);         /* unsigned *thrdaddr */
   if (*thread == NULL) return 1;
   SetThreadPriority(*thread, THREAD_PRIORITY_ABOVE_NORMAL);
   return 0;
 }
 
-static INLINE int pthread_join(pthread_t thread, void** value_ptr) {
+static INLINE int pthread_join(pthread_t thread, void **value_ptr) {
   (void)value_ptr;
   return (WaitForSingleObject(thread, INFINITE) != WAIT_OBJECT_0 ||
           CloseHandle(thread) == 0);
@@ -70,7 +68,7 @@ static INLINE int pthread_join(pthread_t thread, void** value_ptr) {
 
 // Mutex
 static INLINE int pthread_mutex_init(pthread_mutex_t *const mutex,
-                                     void* mutexattr) {
+                                     void *mutexattr) {
   (void)mutexattr;
   InitializeCriticalSection(mutex);
   return 0;
@@ -105,13 +103,12 @@ static INLINE int pthread_cond_destroy(pthread_cond_t *const condition) {
 }
 
 static INLINE int pthread_cond_init(pthread_cond_t *const condition,
-                                    void* cond_attr) {
+                                    void *cond_attr) {
   (void)cond_attr;
   condition->waiting_sem_ = CreateSemaphore(NULL, 0, MAX_DECODE_THREADS, NULL);
   condition->received_sem_ = CreateSemaphore(NULL, 0, MAX_DECODE_THREADS, NULL);
   condition->signal_event_ = CreateEvent(NULL, FALSE, FALSE, NULL);
-  if (condition->waiting_sem_ == NULL ||
-      condition->received_sem_ == NULL ||
+  if (condition->waiting_sem_ == NULL || condition->received_sem_ == NULL ||
       condition->signal_event_ == NULL) {
     pthread_cond_destroy(condition);
     return 1;
@@ -137,8 +134,7 @@ static INLINE int pthread_cond_wait(pthread_cond_t *const condition,
   int ok;
   // note that there is a consumer available so the signal isn't dropped in
   // pthread_cond_signal
-  if (!ReleaseSemaphore(condition->waiting_sem_, 1, NULL))
-    return 1;
+  if (!ReleaseSemaphore(condition->waiting_sem_, 1, NULL)) return 1;
   // now unlock the mutex so pthread_cond_signal may be issued
   pthread_mutex_unlock(mutex);
   ok = (WaitForSingleObject(condition->signal_event_, INFINITE) ==
@@ -147,24 +143,24 @@ static INLINE int pthread_cond_wait(pthread_cond_t *const condition,
   pthread_mutex_lock(mutex);
   return !ok;
 }
-#else  // _WIN32
-#include <pthread.h> // NOLINT
-# define THREADFN void*
-# define THREAD_RETURN(val) val
+#else                 // _WIN32
+#include <pthread.h>  // NOLINT
+#define THREADFN void *
+#define THREAD_RETURN(val) val
 #endif
 
 #endif  // CONFIG_MULTITHREAD
 
 // State of the worker thread object
 typedef enum {
-  NOT_OK = 0,   // object is unusable
-  OK,           // ready to work
-  WORK          // busy finishing the current task
+  NOT_OK = 0,  // object is unusable
+  OK,          // ready to work
+  WORK         // busy finishing the current task
 } VPxWorkerStatus;
 
 // Function to be called by the worker thread. Takes two opaque pointers as
 // arguments (data1 and data2), and should return false in case of error.
-typedef int (*VPxWorkerHook)(void*, void*);
+typedef int (*VPxWorkerHook)(void *, void *);
 
 // Platform-dependent implementation details for the worker.
 typedef struct VPxWorkerImpl VPxWorkerImpl;
@@ -173,10 +169,10 @@ typedef struct VPxWorkerImpl VPxWorkerImpl;
 typedef struct {
   VPxWorkerImpl *impl_;
   VPxWorkerStatus status_;
-  VPxWorkerHook hook;     // hook to call
-  void *data1;            // first argument passed to 'hook'
-  void *data2;            // second argument passed to 'hook'
-  int had_error;          // return value of the last call to 'hook'
+  VPxWorkerHook hook;  // hook to call
+  void *data1;         // first argument passed to 'hook'
+  void *data2;         // second argument passed to 'hook'
+  int had_error;       // return value of the last call to 'hook'
 } VPxWorker;
 
 // The interface for all thread-worker related functions. All these functions
@@ -217,7 +213,7 @@ const VPxWorkerInterface *vpx_get_worker_interface(void);
 //------------------------------------------------------------------------------
 
 #ifdef __cplusplus
-}    // extern "C"
+}  // extern "C"
 #endif
 
 #endif  // VPX_THREAD_H_
diff --git a/vpxdec.c b/vpxdec.c
index d10fce6ee6a912d705dbad083d8089255c9bf966..14b1ec02dc3a675384a0a9687a78ebe226391472 100644
--- a/vpxdec.c
+++ b/vpxdec.c
@@ -47,53 +47,53 @@ struct VpxDecInputContext {
   struct WebmInputContext *webm_ctx;
 };
 
-static const arg_def_t looparg = ARG_DEF(
-    NULL, "loops", 1, "Number of times to decode the file");
-static const arg_def_t codecarg = ARG_DEF(
-    NULL, "codec", 1, "Codec to use");
-static const arg_def_t use_yv12 = ARG_DEF(
-    NULL, "yv12", 0, "Output raw YV12 frames");
-static const arg_def_t use_i420 = ARG_DEF(
-    NULL, "i420", 0, "Output raw I420 frames");
-static const arg_def_t flipuvarg = ARG_DEF(
-    NULL, "flipuv", 0, "Flip the chroma planes in the output");
-static const arg_def_t rawvideo = ARG_DEF(
-    NULL, "rawvideo", 0, "Output raw YUV frames");
-static const arg_def_t noblitarg = ARG_DEF(
-    NULL, "noblit", 0, "Don't process the decoded frames");
-static const arg_def_t progressarg = ARG_DEF(
-    NULL, "progress", 0, "Show progress after each frame decodes");
-static const arg_def_t limitarg = ARG_DEF(
-    NULL, "limit", 1, "Stop decoding after n frames");
-static const arg_def_t skiparg = ARG_DEF(
-    NULL, "skip", 1, "Skip the first n input frames");
-static const arg_def_t postprocarg = ARG_DEF(
-    NULL, "postproc", 0, "Postprocess decoded frames");
-static const arg_def_t summaryarg = ARG_DEF(
-    NULL, "summary", 0, "Show timing summary");
-static const arg_def_t outputfile = ARG_DEF(
-    "o", "output", 1, "Output file name pattern (see below)");
-static const arg_def_t threadsarg = ARG_DEF(
-    "t", "threads", 1, "Max threads to use");
-static const arg_def_t frameparallelarg = ARG_DEF(
-    NULL, "frame-parallel", 0, "Frame parallel decode");
-static const arg_def_t verbosearg = ARG_DEF(
-    "v", "verbose", 0, "Show version string");
-static const arg_def_t error_concealment = ARG_DEF(
-    NULL, "error-concealment", 0, "Enable decoder error-concealment");
-static const arg_def_t scalearg = ARG_DEF(
-    "S", "scale", 0, "Scale output frames uniformly");
-static const arg_def_t continuearg = ARG_DEF(
-    "k", "keep-going", 0, "(debug) Continue decoding after error");
-static const arg_def_t fb_arg = ARG_DEF(
-    NULL, "frame-buffers", 1, "Number of frame buffers to use");
-static const arg_def_t md5arg = ARG_DEF(
-    NULL, "md5", 0, "Compute the MD5 sum of the decoded frame");
+static const arg_def_t looparg =
+    ARG_DEF(NULL, "loops", 1, "Number of times to decode the file");
+static const arg_def_t codecarg = ARG_DEF(NULL, "codec", 1, "Codec to use");
+static const arg_def_t use_yv12 =
+    ARG_DEF(NULL, "yv12", 0, "Output raw YV12 frames");
+static const arg_def_t use_i420 =
+    ARG_DEF(NULL, "i420", 0, "Output raw I420 frames");
+static const arg_def_t flipuvarg =
+    ARG_DEF(NULL, "flipuv", 0, "Flip the chroma planes in the output");
+static const arg_def_t rawvideo =
+    ARG_DEF(NULL, "rawvideo", 0, "Output raw YUV frames");
+static const arg_def_t noblitarg =
+    ARG_DEF(NULL, "noblit", 0, "Don't process the decoded frames");
+static const arg_def_t progressarg =
+    ARG_DEF(NULL, "progress", 0, "Show progress after each frame decodes");
+static const arg_def_t limitarg =
+    ARG_DEF(NULL, "limit", 1, "Stop decoding after n frames");
+static const arg_def_t skiparg =
+    ARG_DEF(NULL, "skip", 1, "Skip the first n input frames");
+static const arg_def_t postprocarg =
+    ARG_DEF(NULL, "postproc", 0, "Postprocess decoded frames");
+static const arg_def_t summaryarg =
+    ARG_DEF(NULL, "summary", 0, "Show timing summary");
+static const arg_def_t outputfile =
+    ARG_DEF("o", "output", 1, "Output file name pattern (see below)");
+static const arg_def_t threadsarg =
+    ARG_DEF("t", "threads", 1, "Max threads to use");
+static const arg_def_t frameparallelarg =
+    ARG_DEF(NULL, "frame-parallel", 0, "Frame parallel decode");
+static const arg_def_t verbosearg =
+    ARG_DEF("v", "verbose", 0, "Show version string");
+static const arg_def_t error_concealment =
+    ARG_DEF(NULL, "error-concealment", 0, "Enable decoder error-concealment");
+static const arg_def_t scalearg =
+    ARG_DEF("S", "scale", 0, "Scale output frames uniformly");
+static const arg_def_t continuearg =
+    ARG_DEF("k", "keep-going", 0, "(debug) Continue decoding after error");
+static const arg_def_t fb_arg =
+    ARG_DEF(NULL, "frame-buffers", 1, "Number of frame buffers to use");
+static const arg_def_t md5arg =
+    ARG_DEF(NULL, "md5", 0, "Compute the MD5 sum of the decoded frame");
 #if CONFIG_VPX_HIGHBITDEPTH
-static const arg_def_t outbitdeptharg = ARG_DEF(
-    NULL, "output-bit-depth", 1, "Output bit-depth for decoded frames");
+static const arg_def_t outbitdeptharg =
+    ARG_DEF(NULL, "output-bit-depth", 1, "Output bit-depth for decoded frames");
 #endif
 
+/* clang-format off */
 static const arg_def_t *all_args[] = {
   &codecarg, &use_yv12, &use_i420, &flipuvarg, &rawvideo, &noblitarg,
   &progressarg, &limitarg, &skiparg, &postprocarg, &summaryarg, &outputfile,
@@ -104,50 +104,43 @@ static const arg_def_t *all_args[] = {
 #endif
   NULL
 };
-
+/* clang-format on */
 
 #if CONFIG_LIBYUV
 static INLINE int libyuv_scale(vpx_image_t *src, vpx_image_t *dst,
-                                  FilterModeEnum mode) {
+                               FilterModeEnum mode) {
 #if CONFIG_VPX_HIGHBITDEPTH
   if (src->fmt == VPX_IMG_FMT_I42016) {
     assert(dst->fmt == VPX_IMG_FMT_I42016);
-    return I420Scale_16((uint16_t*)src->planes[VPX_PLANE_Y],
-                        src->stride[VPX_PLANE_Y]/2,
-                        (uint16_t*)src->planes[VPX_PLANE_U],
-                        src->stride[VPX_PLANE_U]/2,
-                        (uint16_t*)src->planes[VPX_PLANE_V],
-                        src->stride[VPX_PLANE_V]/2,
-                        src->d_w, src->d_h,
-                        (uint16_t*)dst->planes[VPX_PLANE_Y],
-                        dst->stride[VPX_PLANE_Y]/2,
-                        (uint16_t*)dst->planes[VPX_PLANE_U],
-                        dst->stride[VPX_PLANE_U]/2,
-                        (uint16_t*)dst->planes[VPX_PLANE_V],
-                        dst->stride[VPX_PLANE_V]/2,
-                        dst->d_w, dst->d_h,
-                        mode);
+    return I420Scale_16(
+        (uint16_t *)src->planes[VPX_PLANE_Y], src->stride[VPX_PLANE_Y] / 2,
+        (uint16_t *)src->planes[VPX_PLANE_U], src->stride[VPX_PLANE_U] / 2,
+        (uint16_t *)src->planes[VPX_PLANE_V], src->stride[VPX_PLANE_V] / 2,
+        src->d_w, src->d_h, (uint16_t *)dst->planes[VPX_PLANE_Y],
+        dst->stride[VPX_PLANE_Y] / 2, (uint16_t *)dst->planes[VPX_PLANE_U],
+        dst->stride[VPX_PLANE_U] / 2, (uint16_t *)dst->planes[VPX_PLANE_V],
+        dst->stride[VPX_PLANE_V] / 2, dst->d_w, dst->d_h, mode);
   }
 #endif
   assert(src->fmt == VPX_IMG_FMT_I420);
   assert(dst->fmt == VPX_IMG_FMT_I420);
   return I420Scale(src->planes[VPX_PLANE_Y], src->stride[VPX_PLANE_Y],
                    src->planes[VPX_PLANE_U], src->stride[VPX_PLANE_U],
-                   src->planes[VPX_PLANE_V], src->stride[VPX_PLANE_V],
-                   src->d_w, src->d_h,
-                   dst->planes[VPX_PLANE_Y], dst->stride[VPX_PLANE_Y],
+                   src->planes[VPX_PLANE_V], src->stride[VPX_PLANE_V], src->d_w,
+                   src->d_h, dst->planes[VPX_PLANE_Y], dst->stride[VPX_PLANE_Y],
                    dst->planes[VPX_PLANE_U], dst->stride[VPX_PLANE_U],
-                   dst->planes[VPX_PLANE_V], dst->stride[VPX_PLANE_V],
-                   dst->d_w, dst->d_h,
-                   mode);
+                   dst->planes[VPX_PLANE_V], dst->stride[VPX_PLANE_V], dst->d_w,
+                   dst->d_h, mode);
 }
 #endif
 
 void usage_exit(void) {
   int i;
 
-  fprintf(stderr, "Usage: %s <options> filename\n\n"
-          "Options:\n", exec_name);
+  fprintf(stderr,
+          "Usage: %s <options> filename\n\n"
+          "Options:\n",
+          exec_name);
   arg_show_usage(stderr, all_args);
   fprintf(stderr,
           "\nOutput File Patterns:\n\n"
@@ -162,27 +155,25 @@ void usage_exit(void) {
           "\n\t%%<n> - Frame number, zero padded to <n> places (1..9)"
           "\n\n  Pattern arguments are only supported in conjunction "
           "with the --yv12 and\n  --i420 options. If the -o option is "
-          "not specified, the output will be\n  directed to stdout.\n"
-         );
+          "not specified, the output will be\n  directed to stdout.\n");
   fprintf(stderr, "\nIncluded decoders:\n\n");
 
   for (i = 0; i < get_vpx_decoder_count(); ++i) {
     const VpxInterface *const decoder = get_vpx_decoder_by_index(i);
-    fprintf(stderr, "    %-6s - %s\n",
-            decoder->name, vpx_codec_iface_name(decoder->codec_interface()));
+    fprintf(stderr, "    %-6s - %s\n", decoder->name,
+            vpx_codec_iface_name(decoder->codec_interface()));
   }
 
   exit(EXIT_FAILURE);
 }
 
-static int raw_read_frame(FILE *infile, uint8_t **buffer,
-                          size_t *bytes_read, size_t *buffer_size) {
+static int raw_read_frame(FILE *infile, uint8_t **buffer, size_t *bytes_read,
+                          size_t *buffer_size) {
   char raw_hdr[RAW_FRAME_HDR_SZ];
   size_t frame_size = 0;
 
   if (fread(raw_hdr, RAW_FRAME_HDR_SZ, 1, infile) != 1) {
-    if (!feof(infile))
-      warn("Failed to read RAW frame size\n");
+    if (!feof(infile)) warn("Failed to read RAW frame size\n");
   } else {
     const size_t kCorruptFrameThreshold = 256 * 1024 * 1024;
     const size_t kFrameTooSmallThreshold = 256 * 1024;
@@ -226,17 +217,16 @@ static int read_frame(struct VpxDecInputContext *input, uint8_t **buf,
   switch (input->vpx_input_ctx->file_type) {
 #if CONFIG_WEBM_IO
     case FILE_TYPE_WEBM:
-      return webm_read_frame(input->webm_ctx,
-                             buf, bytes_in_buffer, buffer_size);
+      return webm_read_frame(input->webm_ctx, buf, bytes_in_buffer,
+                             buffer_size);
 #endif
     case FILE_TYPE_RAW:
-      return raw_read_frame(input->vpx_input_ctx->file,
-                            buf, bytes_in_buffer, buffer_size);
+      return raw_read_frame(input->vpx_input_ctx->file, buf, bytes_in_buffer,
+                            buffer_size);
     case FILE_TYPE_IVF:
-      return ivf_read_frame(input->vpx_input_ctx->file,
-                            buf, bytes_in_buffer, buffer_size);
-    default:
-      return 1;
+      return ivf_read_frame(input->vpx_input_ctx->file, buf, bytes_in_buffer,
+                            buffer_size);
+    default: return 1;
   }
 }
 
@@ -249,7 +239,7 @@ static void update_image_md5(const vpx_image_t *img, const int planes[3],
     const unsigned char *buf = img->planes[plane];
     const int stride = img->stride[plane];
     const int w = vpx_img_plane_width(img, plane) *
-                ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
+                  ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
     const int h = vpx_img_plane_height(img, plane);
 
     for (y = 0; y < h; ++y) {
@@ -295,8 +285,8 @@ static int file_is_raw(struct VpxInputContext *input) {
     if (mem_get_le32(buf) < 256 * 1024 * 1024) {
       for (i = 0; i < get_vpx_decoder_count(); ++i) {
         const VpxInterface *const decoder = get_vpx_decoder_by_index(i);
-        if (!vpx_codec_peek_stream_info(decoder->codec_interface(),
-                                        buf + 4, 32 - 4, &si)) {
+        if (!vpx_codec_peek_stream_info(decoder->codec_interface(), buf + 4,
+                                        32 - 4, &si)) {
           is_raw = 1;
           input->fourcc = decoder->fourcc;
           input->width = si.w;
@@ -315,13 +305,13 @@ static int file_is_raw(struct VpxInputContext *input) {
 
 static void show_progress(int frame_in, int frame_out, uint64_t dx_time) {
   fprintf(stderr,
-          "%d decoded frames/%d showed frames in %"PRId64" us (%.2f fps)\r",
+          "%d decoded frames/%d showed frames in %" PRId64 " us (%.2f fps)\r",
           frame_in, frame_out, dx_time,
           (double)frame_out * 1000000.0 / (double)dx_time);
 }
 
 struct ExternalFrameBuffer {
-  uint8_t* data;
+  uint8_t *data;
   size_t size;
   int in_use;
 };
@@ -340,23 +330,19 @@ static int get_vp9_frame_buffer(void *cb_priv, size_t min_size,
   int i;
   struct ExternalFrameBufferList *const ext_fb_list =
       (struct ExternalFrameBufferList *)cb_priv;
-  if (ext_fb_list == NULL)
-    return -1;
+  if (ext_fb_list == NULL) return -1;
 
   // Find a free frame buffer.
   for (i = 0; i < ext_fb_list->num_external_frame_buffers; ++i) {
-    if (!ext_fb_list->ext_fb[i].in_use)
-      break;
+    if (!ext_fb_list->ext_fb[i].in_use) break;
   }
 
-  if (i == ext_fb_list->num_external_frame_buffers)
-    return -1;
+  if (i == ext_fb_list->num_external_frame_buffers) return -1;
 
   if (ext_fb_list->ext_fb[i].size < min_size) {
     free(ext_fb_list->ext_fb[i].data);
     ext_fb_list->ext_fb[i].data = (uint8_t *)calloc(min_size, sizeof(uint8_t));
-    if (!ext_fb_list->ext_fb[i].data)
-      return -1;
+    if (!ext_fb_list->ext_fb[i].data) return -1;
 
     ext_fb_list->ext_fb[i].size = min_size;
   }
@@ -397,47 +383,22 @@ static void generate_filename(const char *pattern, char *out, size_t q_len,
       /* parse the pattern */
       q[q_len - 1] = '\0';
       switch (p[1]) {
-        case 'w':
-          snprintf(q, q_len - 1, "%d", d_w);
-          break;
-        case 'h':
-          snprintf(q, q_len - 1, "%d", d_h);
-          break;
-        case '1':
-          snprintf(q, q_len - 1, "%d", frame_in);
-          break;
-        case '2':
-          snprintf(q, q_len - 1, "%02d", frame_in);
-          break;
-        case '3':
-          snprintf(q, q_len - 1, "%03d", frame_in);
-          break;
-        case '4':
-          snprintf(q, q_len - 1, "%04d", frame_in);
-          break;
-        case '5':
-          snprintf(q, q_len - 1, "%05d", frame_in);
-          break;
-        case '6':
-          snprintf(q, q_len - 1, "%06d", frame_in);
-          break;
-        case '7':
-          snprintf(q, q_len - 1, "%07d", frame_in);
-          break;
-        case '8':
-          snprintf(q, q_len - 1, "%08d", frame_in);
-          break;
-        case '9':
-          snprintf(q, q_len - 1, "%09d", frame_in);
-          break;
-        default:
-          die("Unrecognized pattern %%%c\n", p[1]);
-          break;
+        case 'w': snprintf(q, q_len - 1, "%d", d_w); break;
+        case 'h': snprintf(q, q_len - 1, "%d", d_h); break;
+        case '1': snprintf(q, q_len - 1, "%d", frame_in); break;
+        case '2': snprintf(q, q_len - 1, "%02d", frame_in); break;
+        case '3': snprintf(q, q_len - 1, "%03d", frame_in); break;
+        case '4': snprintf(q, q_len - 1, "%04d", frame_in); break;
+        case '5': snprintf(q, q_len - 1, "%05d", frame_in); break;
+        case '6': snprintf(q, q_len - 1, "%06d", frame_in); break;
+        case '7': snprintf(q, q_len - 1, "%07d", frame_in); break;
+        case '8': snprintf(q, q_len - 1, "%08d", frame_in); break;
+        case '9': snprintf(q, q_len - 1, "%09d", frame_in); break;
+        default: die("Unrecognized pattern %%%c\n", p[1]); break;
       }
 
       pat_len = strlen(q);
-      if (pat_len >= q_len - 1)
-        die("Output filename too long.\n");
+      if (pat_len >= q_len - 1) die("Output filename too long.\n");
       q += pat_len;
       p += 2;
       q_len -= pat_len;
@@ -450,8 +411,7 @@ static void generate_filename(const char *pattern, char *out, size_t q_len,
       else
         copy_len = next_pat - p;
 
-      if (copy_len >= q_len - 1)
-        die("Output filename too long.\n");
+      if (copy_len >= q_len - 1) die("Output filename too long.\n");
 
       memcpy(q, p, copy_len);
       q[copy_len] = '\0';
@@ -469,8 +429,7 @@ static int is_single_file(const char *outfile_pattern) {
     p = strchr(p, '%');
     if (p && p[1] >= '1' && p[1] <= '9')
       return 0;  // pattern contains sequence number, so it's not unique
-    if (p)
-      p++;
+    if (p) p++;
   } while (p);
 
   return 1;
@@ -479,8 +438,7 @@ static int is_single_file(const char *outfile_pattern) {
 static void print_md5(unsigned char digest[16], const char *filename) {
   int i;
 
-  for (i = 0; i < 16; ++i)
-    printf("%02x", digest[i]);
+  for (i = 0; i < 16; ++i) printf("%02x", digest[i]);
   printf("  %s\n", filename);
 }
 
@@ -490,8 +448,7 @@ static FILE *open_outfile(const char *name) {
     return stdout;
   } else {
     FILE *file = fopen(name, "wb");
-    if (!file)
-      fatal("Failed to open output file '%s'", name);
+    if (!file) fatal("Failed to open output file '%s'", name);
     return file;
   }
 }
@@ -500,58 +457,57 @@ static FILE *open_outfile(const char *name) {
 static int img_shifted_realloc_required(const vpx_image_t *img,
                                         const vpx_image_t *shifted,
                                         vpx_img_fmt_t required_fmt) {
-  return img->d_w != shifted->d_w ||
-         img->d_h != shifted->d_h ||
+  return img->d_w != shifted->d_w || img->d_h != shifted->d_h ||
          required_fmt != shifted->fmt;
 }
 #endif
 
 static int main_loop(int argc, const char **argv_) {
-  vpx_codec_ctx_t       decoder;
-  char                  *fn = NULL;
-  int                    i;
-  uint8_t               *buf = NULL;
-  size_t                 bytes_in_buffer = 0, buffer_size = 0;
-  FILE                  *infile;
-  int                    frame_in = 0, frame_out = 0, flipuv = 0, noblit = 0;
-  int                    do_md5 = 0, progress = 0, frame_parallel = 0;
-  int                    stop_after = 0, postproc = 0, summary = 0, quiet = 1;
-  int                    arg_skip = 0;
-  int                    ec_enabled = 0;
-  int                    keep_going = 0;
+  vpx_codec_ctx_t decoder;
+  char *fn = NULL;
+  int i;
+  uint8_t *buf = NULL;
+  size_t bytes_in_buffer = 0, buffer_size = 0;
+  FILE *infile;
+  int frame_in = 0, frame_out = 0, flipuv = 0, noblit = 0;
+  int do_md5 = 0, progress = 0, frame_parallel = 0;
+  int stop_after = 0, postproc = 0, summary = 0, quiet = 1;
+  int arg_skip = 0;
+  int ec_enabled = 0;
+  int keep_going = 0;
   const VpxInterface *interface = NULL;
   const VpxInterface *fourcc_interface = NULL;
   uint64_t dx_time = 0;
-  struct arg               arg;
-  char                   **argv, **argi, **argj;
-
-  int                     single_file;
-  int                     use_y4m = 1;
-  int                     opt_yv12 = 0;
-  int                     opt_i420 = 0;
-  vpx_codec_dec_cfg_t     cfg = {0, 0, 0};
+  struct arg arg;
+  char **argv, **argi, **argj;
+
+  int single_file;
+  int use_y4m = 1;
+  int opt_yv12 = 0;
+  int opt_i420 = 0;
+  vpx_codec_dec_cfg_t cfg = { 0, 0, 0 };
 #if CONFIG_VPX_HIGHBITDEPTH
-  unsigned int            output_bit_depth = 0;
+  unsigned int output_bit_depth = 0;
 #endif
-  int                     frames_corrupted = 0;
-  int                     dec_flags = 0;
-  int                     do_scale = 0;
-  vpx_image_t             *scaled_img = NULL;
+  int frames_corrupted = 0;
+  int dec_flags = 0;
+  int do_scale = 0;
+  vpx_image_t *scaled_img = NULL;
 #if CONFIG_VPX_HIGHBITDEPTH
-  vpx_image_t             *img_shifted = NULL;
+  vpx_image_t *img_shifted = NULL;
 #endif
-  int                     frame_avail, got_data, flush_decoder = 0;
-  int                     num_external_frame_buffers = 0;
-  struct ExternalFrameBufferList ext_fb_list = {0, NULL};
+  int frame_avail, got_data, flush_decoder = 0;
+  int num_external_frame_buffers = 0;
+  struct ExternalFrameBufferList ext_fb_list = { 0, NULL };
 
   const char *outfile_pattern = NULL;
-  char outfile_name[PATH_MAX] = {0};
+  char outfile_name[PATH_MAX] = { 0 };
   FILE *outfile = NULL;
 
   MD5Context md5_ctx;
   unsigned char md5_digest[16];
 
-  struct VpxDecInputContext input = {NULL, NULL};
+  struct VpxDecInputContext input = { NULL, NULL };
   struct VpxInputContext vpx_input_ctx;
 #if CONFIG_WEBM_IO
   struct WebmInputContext webm_ctx;
@@ -683,7 +639,8 @@ static int main_loop(int argc, const char **argv_) {
 
   if (use_y4m && !noblit) {
     if (!single_file) {
-      fprintf(stderr, "YUV4MPEG2 not supported with output patterns,"
+      fprintf(stderr,
+              "YUV4MPEG2 not supported with output patterns,"
               " try --i420 or --yv12 or --rawvideo.\n");
       return EXIT_FAILURE;
     }
@@ -691,7 +648,8 @@ static int main_loop(int argc, const char **argv_) {
 #if CONFIG_WEBM_IO
     if (vpx_input_ctx.file_type == FILE_TYPE_WEBM) {
       if (webm_guess_framerate(input.webm_ctx, input.vpx_input_ctx)) {
-        fprintf(stderr, "Failed to guess framerate -- error parsing "
+        fprintf(stderr,
+                "Failed to guess framerate -- error parsing "
                 "webm file?\n");
         return EXIT_FAILURE;
       }
@@ -705,27 +663,23 @@ static int main_loop(int argc, const char **argv_) {
   else
     interface = fourcc_interface;
 
-  if (!interface)
-    interface = get_vpx_decoder_by_index(0);
+  if (!interface) interface = get_vpx_decoder_by_index(0);
 
   dec_flags = (postproc ? VPX_CODEC_USE_POSTPROC : 0) |
               (ec_enabled ? VPX_CODEC_USE_ERROR_CONCEALMENT : 0) |
               (frame_parallel ? VPX_CODEC_USE_FRAME_THREADING : 0);
-  if (vpx_codec_dec_init(&decoder, interface->codec_interface(),
-                         &cfg, dec_flags)) {
+  if (vpx_codec_dec_init(&decoder, interface->codec_interface(), &cfg,
+                         dec_flags)) {
     fprintf(stderr, "Failed to initialize decoder: %s\n",
             vpx_codec_error(&decoder));
     return EXIT_FAILURE;
   }
 
-  if (!quiet)
-    fprintf(stderr, "%s\n", decoder.name);
+  if (!quiet) fprintf(stderr, "%s\n", decoder.name);
 
-  if (arg_skip)
-    fprintf(stderr, "Skipping first %d frames.\n", arg_skip);
+  if (arg_skip) fprintf(stderr, "Skipping first %d frames.\n", arg_skip);
   while (arg_skip) {
-    if (read_frame(&input, &buf, &bytes_in_buffer, &buffer_size))
-      break;
+    if (read_frame(&input, &buf, &bytes_in_buffer, &buffer_size)) break;
     arg_skip--;
   }
 
@@ -733,9 +687,9 @@ static int main_loop(int argc, const char **argv_) {
     ext_fb_list.num_external_frame_buffers = num_external_frame_buffers;
     ext_fb_list.ext_fb = (struct ExternalFrameBuffer *)calloc(
         num_external_frame_buffers, sizeof(*ext_fb_list.ext_fb));
-    if (vpx_codec_set_frame_buffer_functions(
-            &decoder, get_vp9_frame_buffer, release_vp9_frame_buffer,
-            &ext_fb_list)) {
+    if (vpx_codec_set_frame_buffer_functions(&decoder, get_vp9_frame_buffer,
+                                             release_vp9_frame_buffer,
+                                             &ext_fb_list)) {
       fprintf(stderr, "Failed to configure external frame buffers: %s\n",
               vpx_codec_error(&decoder));
       return EXIT_FAILURE;
@@ -747,10 +701,10 @@ static int main_loop(int argc, const char **argv_) {
 
   /* Decode file */
   while (frame_avail || got_data) {
-    vpx_codec_iter_t  iter = NULL;
-    vpx_image_t    *img;
+    vpx_codec_iter_t iter = NULL;
+    vpx_image_t *img;
     struct vpx_usec_timer timer;
-    int                   corrupted = 0;
+    int corrupted = 0;
 
     frame_avail = 0;
     if (!stop_after || frame_in < stop_after) {
@@ -760,16 +714,14 @@ static int main_loop(int argc, const char **argv_) {
 
         vpx_usec_timer_start(&timer);
 
-        if (vpx_codec_decode(&decoder, buf, (unsigned int)bytes_in_buffer,
-                             NULL, 0)) {
+        if (vpx_codec_decode(&decoder, buf, (unsigned int)bytes_in_buffer, NULL,
+                             0)) {
           const char *detail = vpx_codec_error_detail(&decoder);
-          warn("Failed to decode frame %d: %s",
-               frame_in, vpx_codec_error(&decoder));
+          warn("Failed to decode frame %d: %s", frame_in,
+               vpx_codec_error(&decoder));
 
-          if (detail)
-            warn("Additional information: %s", detail);
-          if (!keep_going)
-            goto fail;
+          if (detail) warn("Additional information: %s", detail);
+          if (!keep_going) goto fail;
         }
 
         vpx_usec_timer_mark(&timer);
@@ -802,17 +754,15 @@ static int main_loop(int argc, const char **argv_) {
     if (!frame_parallel &&
         vpx_codec_control(&decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted)) {
       warn("Failed VP8_GET_FRAME_CORRUPTED: %s", vpx_codec_error(&decoder));
-      if (!keep_going)
-        goto fail;
+      if (!keep_going) goto fail;
     }
     frames_corrupted += corrupted;
 
-    if (progress)
-      show_progress(frame_in, frame_out, dx_time);
+    if (progress) show_progress(frame_in, frame_out, dx_time);
 
     if (!noblit && img) {
-      const int PLANES_YUV[] = {VPX_PLANE_Y, VPX_PLANE_U, VPX_PLANE_V};
-      const int PLANES_YVU[] = {VPX_PLANE_Y, VPX_PLANE_V, VPX_PLANE_U};
+      const int PLANES_YUV[] = { VPX_PLANE_Y, VPX_PLANE_U, VPX_PLANE_V };
+      const int PLANES_YVU[] = { VPX_PLANE_Y, VPX_PLANE_V, VPX_PLANE_U };
       const int *planes = flipuv ? PLANES_YVU : PLANES_YUV;
 
       if (do_scale) {
@@ -836,8 +786,8 @@ static int main_loop(int argc, const char **argv_) {
               render_height = render_size[1];
             }
           }
-          scaled_img = vpx_img_alloc(NULL, img->fmt, render_width,
-                                     render_height, 16);
+          scaled_img =
+              vpx_img_alloc(NULL, img->fmt, render_width, render_height, 16);
           scaled_img->bit_depth = img->bit_depth;
         }
 
@@ -846,7 +796,8 @@ static int main_loop(int argc, const char **argv_) {
           libyuv_scale(img, scaled_img, kFilterBox);
           img = scaled_img;
 #else
-          fprintf(stderr, "Failed  to scale output frame: %s.\n"
+          fprintf(stderr,
+                  "Failed  to scale output frame: %s.\n"
                   "Scaling is disabled in this configuration. "
                   "To enable scaling, configure with --enable-libyuv\n",
                   vpx_codec_error(&decoder));
@@ -861,22 +812,22 @@ static int main_loop(int argc, const char **argv_) {
       }
       // Shift up or down if necessary
       if (output_bit_depth != 0 && output_bit_depth != img->bit_depth) {
-        const vpx_img_fmt_t shifted_fmt = output_bit_depth == 8 ?
-            img->fmt ^ (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) :
-            img->fmt | VPX_IMG_FMT_HIGHBITDEPTH;
+        const vpx_img_fmt_t shifted_fmt =
+            output_bit_depth == 8
+                ? img->fmt ^ (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH)
+                : img->fmt | VPX_IMG_FMT_HIGHBITDEPTH;
         if (img_shifted &&
             img_shifted_realloc_required(img, img_shifted, shifted_fmt)) {
           vpx_img_free(img_shifted);
           img_shifted = NULL;
         }
         if (!img_shifted) {
-          img_shifted = vpx_img_alloc(NULL, shifted_fmt,
-                                      img->d_w, img->d_h, 16);
+          img_shifted =
+              vpx_img_alloc(NULL, shifted_fmt, img->d_w, img->d_h, 16);
           img_shifted->bit_depth = output_bit_depth;
         }
         if (output_bit_depth > img->bit_depth) {
-          vpx_img_upshift(img_shifted, img,
-                          output_bit_depth - img->bit_depth);
+          vpx_img_upshift(img_shifted, img, output_bit_depth - img->bit_depth);
         } else {
           vpx_img_downshift(img_shifted, img,
                             img->bit_depth - output_bit_depth);
@@ -887,7 +838,7 @@ static int main_loop(int argc, const char **argv_) {
 
       if (single_file) {
         if (use_y4m) {
-          char buf[Y4M_BUFFER_SIZE] = {0};
+          char buf[Y4M_BUFFER_SIZE] = { 0 };
           size_t len = 0;
           if (img->fmt == VPX_IMG_FMT_I440 || img->fmt == VPX_IMG_FMT_I44016) {
             fprintf(stderr, "Cannot produce y4m output for 440 sampling.\n");
@@ -895,11 +846,9 @@ static int main_loop(int argc, const char **argv_) {
           }
           if (frame_out == 1) {
             // Y4M file header
-            len = y4m_write_file_header(buf, sizeof(buf),
-                                        vpx_input_ctx.width,
-                                        vpx_input_ctx.height,
-                                        &vpx_input_ctx.framerate,
-                                        img->fmt, img->bit_depth);
+            len = y4m_write_file_header(
+                buf, sizeof(buf), vpx_input_ctx.width, vpx_input_ctx.height,
+                &vpx_input_ctx.framerate, img->fmt, img->bit_depth);
             if (do_md5) {
               MD5Update(&md5_ctx, (md5byte *)buf, (unsigned int)len);
             } else {
@@ -927,7 +876,8 @@ static int main_loop(int argc, const char **argv_) {
             }
             if (opt_yv12) {
               if ((img->fmt != VPX_IMG_FMT_I420 &&
-                   img->fmt != VPX_IMG_FMT_YV12) || img->bit_depth != 8) {
+                   img->fmt != VPX_IMG_FMT_YV12) ||
+                  img->bit_depth != 8) {
                 fprintf(stderr, "Cannot produce yv12 output for bit-stream.\n");
                 goto fail;
               }
@@ -941,8 +891,8 @@ static int main_loop(int argc, const char **argv_) {
           write_image_file(img, planes, outfile);
         }
       } else {
-        generate_filename(outfile_pattern, outfile_name, PATH_MAX,
-                          img->d_w, img->d_h, frame_in);
+        generate_filename(outfile_pattern, outfile_name, PATH_MAX, img->d_w,
+                          img->d_h, frame_in);
         if (do_md5) {
           MD5Init(&md5_ctx);
           update_image_md5(img, planes, &md5_ctx);
@@ -987,8 +937,7 @@ fail:
     webm_free(input.webm_ctx);
 #endif
 
-  if (input.vpx_input_ctx->file_type != FILE_TYPE_WEBM)
-    free(buf);
+  if (input.vpx_input_ctx->file_type != FILE_TYPE_WEBM) free(buf);
 
   if (scaled_img) vpx_img_free(scaled_img);
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -1023,7 +972,6 @@ int main(int argc, const char **argv_) {
     }
   }
   free(argv);
-  for (i = 0; !error && i < loops; i++)
-    error = main_loop(argc, argv_);
+  for (i = 0; !error && i < loops; i++) error = main_loop(argc, argv_);
   return error;
 }
diff --git a/vpxenc.c b/vpxenc.c
index f3c82179801096bcf892cf29a056a24827ac394c..f9ff31c1960d372f66ad089648df4a0457323b9d 100644
--- a/vpxenc.c
+++ b/vpxenc.c
@@ -51,8 +51,7 @@
 #include "./y4minput.h"
 
 /* Swallow warnings about unused results of fread/fwrite */
-static size_t wrap_fread(void *ptr, size_t size, size_t nmemb,
-                         FILE *stream) {
+static size_t wrap_fread(void *ptr, size_t size, size_t nmemb, FILE *stream) {
   return fread(ptr, size, nmemb, stream);
 }
 #define fread wrap_fread
@@ -63,7 +62,6 @@ static size_t wrap_fwrite(const void *ptr, size_t size, size_t nmemb,
 }
 #define fwrite wrap_fwrite
 
-
 static const char *exec_name;
 
 static void warn_or_exit_on_errorv(vpx_codec_ctx_t *ctx, int fatal,
@@ -74,11 +72,9 @@ static void warn_or_exit_on_errorv(vpx_codec_ctx_t *ctx, int fatal,
     vfprintf(stderr, s, ap);
     fprintf(stderr, ": %s\n", vpx_codec_error(ctx));
 
-    if (detail)
-      fprintf(stderr, "    %s\n", detail);
+    if (detail) fprintf(stderr, "    %s\n", detail);
 
-    if (fatal)
-      exit(EXIT_FAILURE);
+    if (fatal) exit(EXIT_FAILURE);
   }
 }
 
@@ -105,8 +101,7 @@ static int read_frame(struct VpxInputContext *input_ctx, vpx_image_t *img) {
   int shortread = 0;
 
   if (input_ctx->file_type == FILE_TYPE_Y4M) {
-    if (y4m_input_fetch_frame(y4m, f, img) < 1)
-      return 0;
+    if (y4m_input_fetch_frame(y4m, f, img) < 1) return 0;
   } else {
     shortread = read_yuv_frame(input_ctx, img);
   }
@@ -128,250 +123,261 @@ static int fourcc_is_ivf(const char detect[4]) {
   return 0;
 }
 
-static const arg_def_t debugmode = ARG_DEF(
-    "D", "debug", 0, "Debug mode (makes output deterministic)");
-static const arg_def_t outputfile = ARG_DEF(
-    "o", "output", 1, "Output filename");
-static const arg_def_t use_yv12 = ARG_DEF(
-    NULL, "yv12", 0, "Input file is YV12 ");
-static const arg_def_t use_i420 = ARG_DEF(
-    NULL, "i420", 0, "Input file is I420 (default)");
-static const arg_def_t use_i422 = ARG_DEF(
-    NULL, "i422", 0, "Input file is I422");
-static const arg_def_t use_i444 = ARG_DEF(
-    NULL, "i444", 0, "Input file is I444");
-static const arg_def_t use_i440 = ARG_DEF(
-    NULL, "i440", 0, "Input file is I440");
-static const arg_def_t codecarg = ARG_DEF(
-    NULL, "codec", 1, "Codec to use");
-static const arg_def_t passes = ARG_DEF(
-    "p", "passes", 1, "Number of passes (1/2)");
-static const arg_def_t pass_arg = ARG_DEF(
-    NULL, "pass", 1, "Pass to execute (1/2)");
-static const arg_def_t fpf_name = ARG_DEF(
-    NULL, "fpf", 1, "First pass statistics file name");
+static const arg_def_t debugmode =
+    ARG_DEF("D", "debug", 0, "Debug mode (makes output deterministic)");
+static const arg_def_t outputfile =
+    ARG_DEF("o", "output", 1, "Output filename");
+static const arg_def_t use_yv12 =
+    ARG_DEF(NULL, "yv12", 0, "Input file is YV12 ");
+static const arg_def_t use_i420 =
+    ARG_DEF(NULL, "i420", 0, "Input file is I420 (default)");
+static const arg_def_t use_i422 =
+    ARG_DEF(NULL, "i422", 0, "Input file is I422");
+static const arg_def_t use_i444 =
+    ARG_DEF(NULL, "i444", 0, "Input file is I444");
+static const arg_def_t use_i440 =
+    ARG_DEF(NULL, "i440", 0, "Input file is I440");
+static const arg_def_t codecarg = ARG_DEF(NULL, "codec", 1, "Codec to use");
+static const arg_def_t passes =
+    ARG_DEF("p", "passes", 1, "Number of passes (1/2)");
+static const arg_def_t pass_arg =
+    ARG_DEF(NULL, "pass", 1, "Pass to execute (1/2)");
+static const arg_def_t fpf_name =
+    ARG_DEF(NULL, "fpf", 1, "First pass statistics file name");
 #if CONFIG_FP_MB_STATS
-static const arg_def_t fpmbf_name = ARG_DEF(
-    NULL, "fpmbf", 1, "First pass block statistics file name");
+static const arg_def_t fpmbf_name =
+    ARG_DEF(NULL, "fpmbf", 1, "First pass block statistics file name");
 #endif
-static const arg_def_t limit = ARG_DEF(
-    NULL, "limit", 1, "Stop encoding after n input frames");
-static const arg_def_t skip = ARG_DEF(
-    NULL, "skip", 1, "Skip the first n input frames");
-static const arg_def_t deadline = ARG_DEF(
-    "d", "deadline", 1, "Deadline per frame (usec)");
-static const arg_def_t best_dl = ARG_DEF(
-    NULL, "best", 0, "Use Best Quality Deadline");
-static const arg_def_t good_dl = ARG_DEF(
-    NULL, "good", 0, "Use Good Quality Deadline");
-static const arg_def_t rt_dl = ARG_DEF(
-    NULL, "rt", 0, "Use Realtime Quality Deadline");
-static const arg_def_t quietarg = ARG_DEF(
-    "q", "quiet", 0, "Do not print encode progress");
-static const arg_def_t verbosearg = ARG_DEF(
-    "v", "verbose", 0, "Show encoder parameters");
-static const arg_def_t psnrarg = ARG_DEF(
-    NULL, "psnr", 0, "Show PSNR in status line");
+static const arg_def_t limit =
+    ARG_DEF(NULL, "limit", 1, "Stop encoding after n input frames");
+static const arg_def_t skip =
+    ARG_DEF(NULL, "skip", 1, "Skip the first n input frames");
+static const arg_def_t deadline =
+    ARG_DEF("d", "deadline", 1, "Deadline per frame (usec)");
+static const arg_def_t best_dl =
+    ARG_DEF(NULL, "best", 0, "Use Best Quality Deadline");
+static const arg_def_t good_dl =
+    ARG_DEF(NULL, "good", 0, "Use Good Quality Deadline");
+static const arg_def_t rt_dl =
+    ARG_DEF(NULL, "rt", 0, "Use Realtime Quality Deadline");
+static const arg_def_t quietarg =
+    ARG_DEF("q", "quiet", 0, "Do not print encode progress");
+static const arg_def_t verbosearg =
+    ARG_DEF("v", "verbose", 0, "Show encoder parameters");
+static const arg_def_t psnrarg =
+    ARG_DEF(NULL, "psnr", 0, "Show PSNR in status line");
 
 static const struct arg_enum_list test_decode_enum[] = {
-  {"off",   TEST_DECODE_OFF},
-  {"fatal", TEST_DECODE_FATAL},
-  {"warn",  TEST_DECODE_WARN},
-  {NULL, 0}
+  { "off", TEST_DECODE_OFF },
+  { "fatal", TEST_DECODE_FATAL },
+  { "warn", TEST_DECODE_WARN },
+  { NULL, 0 }
 };
 static const arg_def_t recontest = ARG_DEF_ENUM(
     NULL, "test-decode", 1, "Test encode/decode mismatch", test_decode_enum);
-static const arg_def_t framerate = ARG_DEF(
-    NULL, "fps", 1, "Stream frame rate (rate/scale)");
-static const arg_def_t use_webm = ARG_DEF(
-    NULL, "webm", 0, "Output WebM (default when WebM IO is enabled)");
-static const arg_def_t use_ivf = ARG_DEF(
-    NULL, "ivf", 0, "Output IVF");
-static const arg_def_t out_part = ARG_DEF(
-    "P", "output-partitions", 0,
-    "Makes encoder output partitions. Requires IVF output!");
-static const arg_def_t q_hist_n = ARG_DEF(
-    NULL, "q-hist", 1, "Show quantizer histogram (n-buckets)");
-static const arg_def_t rate_hist_n = ARG_DEF(
-    NULL, "rate-hist", 1, "Show rate histogram (n-buckets)");
-static const arg_def_t disable_warnings = ARG_DEF(
-    NULL, "disable-warnings", 0,
-    "Disable warnings about potentially incorrect encode settings.");
-static const arg_def_t disable_warning_prompt = ARG_DEF(
-    "y", "disable-warning-prompt", 0,
-    "Display warnings, but do not prompt user to continue.");
+static const arg_def_t framerate =
+    ARG_DEF(NULL, "fps", 1, "Stream frame rate (rate/scale)");
+static const arg_def_t use_webm =
+    ARG_DEF(NULL, "webm", 0, "Output WebM (default when WebM IO is enabled)");
+static const arg_def_t use_ivf = ARG_DEF(NULL, "ivf", 0, "Output IVF");
+static const arg_def_t out_part =
+    ARG_DEF("P", "output-partitions", 0,
+            "Makes encoder output partitions. Requires IVF output!");
+static const arg_def_t q_hist_n =
+    ARG_DEF(NULL, "q-hist", 1, "Show quantizer histogram (n-buckets)");
+static const arg_def_t rate_hist_n =
+    ARG_DEF(NULL, "rate-hist", 1, "Show rate histogram (n-buckets)");
+static const arg_def_t disable_warnings =
+    ARG_DEF(NULL, "disable-warnings", 0,
+            "Disable warnings about potentially incorrect encode settings.");
+static const arg_def_t disable_warning_prompt =
+    ARG_DEF("y", "disable-warning-prompt", 0,
+            "Display warnings, but do not prompt user to continue.");
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static const arg_def_t test16bitinternalarg = ARG_DEF(
     NULL, "test-16bit-internal", 0, "Force use of 16 bit internal buffer");
 #endif
 
-static const arg_def_t *main_args[] = {
-  &debugmode,
-  &outputfile, &codecarg, &passes, &pass_arg, &fpf_name, &limit, &skip,
-  &deadline, &best_dl, &good_dl, &rt_dl,
-  &quietarg, &verbosearg, &psnrarg, &use_webm, &use_ivf, &out_part, &q_hist_n,
-  &rate_hist_n, &disable_warnings, &disable_warning_prompt, &recontest,
-  NULL
-};
-
-static const arg_def_t usage = ARG_DEF(
-    "u", "usage", 1, "Usage profile number to use");
-static const arg_def_t threads = ARG_DEF(
-    "t", "threads", 1, "Max number of threads to use");
-static const arg_def_t profile = ARG_DEF(
-    NULL, "profile", 1, "Bitstream profile number to use");
+static const arg_def_t *main_args[] = { &debugmode,
+                                        &outputfile,
+                                        &codecarg,
+                                        &passes,
+                                        &pass_arg,
+                                        &fpf_name,
+                                        &limit,
+                                        &skip,
+                                        &deadline,
+                                        &best_dl,
+                                        &good_dl,
+                                        &rt_dl,
+                                        &quietarg,
+                                        &verbosearg,
+                                        &psnrarg,
+                                        &use_webm,
+                                        &use_ivf,
+                                        &out_part,
+                                        &q_hist_n,
+                                        &rate_hist_n,
+                                        &disable_warnings,
+                                        &disable_warning_prompt,
+                                        &recontest,
+                                        NULL };
+
+static const arg_def_t usage =
+    ARG_DEF("u", "usage", 1, "Usage profile number to use");
+static const arg_def_t threads =
+    ARG_DEF("t", "threads", 1, "Max number of threads to use");
+static const arg_def_t profile =
+    ARG_DEF(NULL, "profile", 1, "Bitstream profile number to use");
 static const arg_def_t width = ARG_DEF("w", "width", 1, "Frame width");
 static const arg_def_t height = ARG_DEF("h", "height", 1, "Frame height");
 #if CONFIG_WEBM_IO
 static const struct arg_enum_list stereo_mode_enum[] = {
-  {"mono", STEREO_FORMAT_MONO},
-  {"left-right", STEREO_FORMAT_LEFT_RIGHT},
-  {"bottom-top", STEREO_FORMAT_BOTTOM_TOP},
-  {"top-bottom", STEREO_FORMAT_TOP_BOTTOM},
-  {"right-left", STEREO_FORMAT_RIGHT_LEFT},
-  {NULL, 0}
+  { "mono", STEREO_FORMAT_MONO },
+  { "left-right", STEREO_FORMAT_LEFT_RIGHT },
+  { "bottom-top", STEREO_FORMAT_BOTTOM_TOP },
+  { "top-bottom", STEREO_FORMAT_TOP_BOTTOM },
+  { "right-left", STEREO_FORMAT_RIGHT_LEFT },
+  { NULL, 0 }
 };
 static const arg_def_t stereo_mode = ARG_DEF_ENUM(
     NULL, "stereo-mode", 1, "Stereo 3D video format", stereo_mode_enum);
 #endif
 static const arg_def_t timebase = ARG_DEF(
     NULL, "timebase", 1, "Output timestamp precision (fractional seconds)");
-static const arg_def_t error_resilient = ARG_DEF(
-    NULL, "error-resilient", 1, "Enable error resiliency features");
-static const arg_def_t lag_in_frames = ARG_DEF(
-    NULL, "lag-in-frames", 1, "Max number of frames to lag");
-
-static const arg_def_t *global_args[] = {
-  &use_yv12, &use_i420, &use_i422, &use_i444, &use_i440,
-  &usage, &threads, &profile,
-  &width, &height,
+static const arg_def_t error_resilient =
+    ARG_DEF(NULL, "error-resilient", 1, "Enable error resiliency features");
+static const arg_def_t lag_in_frames =
+    ARG_DEF(NULL, "lag-in-frames", 1, "Max number of frames to lag");
+
+static const arg_def_t *global_args[] = { &use_yv12,
+                                          &use_i420,
+                                          &use_i422,
+                                          &use_i444,
+                                          &use_i440,
+                                          &usage,
+                                          &threads,
+                                          &profile,
+                                          &width,
+                                          &height,
 #if CONFIG_WEBM_IO
-  &stereo_mode,
+                                          &stereo_mode,
 #endif
-  &timebase, &framerate,
-  &error_resilient,
+                                          &timebase,
+                                          &framerate,
+                                          &error_resilient,
 #if CONFIG_VPX_HIGHBITDEPTH
-  &test16bitinternalarg,
+                                          &test16bitinternalarg,
 #endif
-  &lag_in_frames, NULL
-};
-
-static const arg_def_t dropframe_thresh = ARG_DEF(
-    NULL, "drop-frame", 1, "Temporal resampling threshold (buf %)");
-static const arg_def_t resize_allowed = ARG_DEF(
-    NULL, "resize-allowed", 1, "Spatial resampling enabled (bool)");
-static const arg_def_t resize_width = ARG_DEF(
-    NULL, "resize-width", 1, "Width of encoded frame");
-static const arg_def_t resize_height = ARG_DEF(
-    NULL, "resize-height", 1, "Height of encoded frame");
-static const arg_def_t resize_up_thresh = ARG_DEF(
-    NULL, "resize-up", 1, "Upscale threshold (buf %)");
-static const arg_def_t resize_down_thresh = ARG_DEF(
-    NULL, "resize-down", 1, "Downscale threshold (buf %)");
-static const struct arg_enum_list end_usage_enum[] = {
-  {"vbr", VPX_VBR},
-  {"cbr", VPX_CBR},
-  {"cq",  VPX_CQ},
-  {"q",   VPX_Q},
-  {NULL, 0}
-};
-static const arg_def_t end_usage = ARG_DEF_ENUM(
-    NULL, "end-usage", 1, "Rate control mode", end_usage_enum);
-static const arg_def_t target_bitrate = ARG_DEF(
-    NULL, "target-bitrate", 1, "Bitrate (kbps)");
-static const arg_def_t min_quantizer = ARG_DEF(
-    NULL, "min-q", 1, "Minimum (best) quantizer");
-static const arg_def_t max_quantizer = ARG_DEF(
-    NULL, "max-q", 1, "Maximum (worst) quantizer");
-static const arg_def_t undershoot_pct = ARG_DEF(
-    NULL, "undershoot-pct", 1, "Datarate undershoot (min) target (%)");
-static const arg_def_t overshoot_pct = ARG_DEF(
-    NULL, "overshoot-pct", 1, "Datarate overshoot (max) target (%)");
-static const arg_def_t buf_sz = ARG_DEF(
-    NULL, "buf-sz", 1, "Client buffer size (ms)");
-static const arg_def_t buf_initial_sz = ARG_DEF(
-    NULL, "buf-initial-sz", 1, "Client initial buffer size (ms)");
-static const arg_def_t buf_optimal_sz = ARG_DEF(
-    NULL, "buf-optimal-sz", 1, "Client optimal buffer size (ms)");
+                                          &lag_in_frames,
+                                          NULL };
+
+static const arg_def_t dropframe_thresh =
+    ARG_DEF(NULL, "drop-frame", 1, "Temporal resampling threshold (buf %)");
+static const arg_def_t resize_allowed =
+    ARG_DEF(NULL, "resize-allowed", 1, "Spatial resampling enabled (bool)");
+static const arg_def_t resize_width =
+    ARG_DEF(NULL, "resize-width", 1, "Width of encoded frame");
+static const arg_def_t resize_height =
+    ARG_DEF(NULL, "resize-height", 1, "Height of encoded frame");
+static const arg_def_t resize_up_thresh =
+    ARG_DEF(NULL, "resize-up", 1, "Upscale threshold (buf %)");
+static const arg_def_t resize_down_thresh =
+    ARG_DEF(NULL, "resize-down", 1, "Downscale threshold (buf %)");
+static const struct arg_enum_list end_usage_enum[] = { { "vbr", VPX_VBR },
+                                                       { "cbr", VPX_CBR },
+                                                       { "cq", VPX_CQ },
+                                                       { "q", VPX_Q },
+                                                       { NULL, 0 } };
+static const arg_def_t end_usage =
+    ARG_DEF_ENUM(NULL, "end-usage", 1, "Rate control mode", end_usage_enum);
+static const arg_def_t target_bitrate =
+    ARG_DEF(NULL, "target-bitrate", 1, "Bitrate (kbps)");
+static const arg_def_t min_quantizer =
+    ARG_DEF(NULL, "min-q", 1, "Minimum (best) quantizer");
+static const arg_def_t max_quantizer =
+    ARG_DEF(NULL, "max-q", 1, "Maximum (worst) quantizer");
+static const arg_def_t undershoot_pct =
+    ARG_DEF(NULL, "undershoot-pct", 1, "Datarate undershoot (min) target (%)");
+static const arg_def_t overshoot_pct =
+    ARG_DEF(NULL, "overshoot-pct", 1, "Datarate overshoot (max) target (%)");
+static const arg_def_t buf_sz =
+    ARG_DEF(NULL, "buf-sz", 1, "Client buffer size (ms)");
+static const arg_def_t buf_initial_sz =
+    ARG_DEF(NULL, "buf-initial-sz", 1, "Client initial buffer size (ms)");
+static const arg_def_t buf_optimal_sz =
+    ARG_DEF(NULL, "buf-optimal-sz", 1, "Client optimal buffer size (ms)");
 static const arg_def_t *rc_args[] = {
-  &dropframe_thresh, &resize_allowed, &resize_width, &resize_height,
-  &resize_up_thresh, &resize_down_thresh, &end_usage, &target_bitrate,
-  &min_quantizer, &max_quantizer, &undershoot_pct, &overshoot_pct, &buf_sz,
-  &buf_initial_sz, &buf_optimal_sz, NULL
+  &dropframe_thresh, &resize_allowed,     &resize_width,   &resize_height,
+  &resize_up_thresh, &resize_down_thresh, &end_usage,      &target_bitrate,
+  &min_quantizer,    &max_quantizer,      &undershoot_pct, &overshoot_pct,
+  &buf_sz,           &buf_initial_sz,     &buf_optimal_sz, NULL
 };
 
-
-static const arg_def_t bias_pct = ARG_DEF(
-    NULL, "bias-pct", 1, "CBR/VBR bias (0=CBR, 100=VBR)");
-static const arg_def_t minsection_pct = ARG_DEF(
-    NULL, "minsection-pct", 1, "GOP min bitrate (% of target)");
-static const arg_def_t maxsection_pct = ARG_DEF(
-    NULL, "maxsection-pct", 1, "GOP max bitrate (% of target)");
-static const arg_def_t *rc_twopass_args[] = {
-  &bias_pct, &minsection_pct, &maxsection_pct, NULL
-};
-
-
-static const arg_def_t kf_min_dist = ARG_DEF(
-    NULL, "kf-min-dist", 1, "Minimum keyframe interval (frames)");
-static const arg_def_t kf_max_dist = ARG_DEF(
-    NULL, "kf-max-dist", 1, "Maximum keyframe interval (frames)");
-static const arg_def_t kf_disabled = ARG_DEF(
-    NULL, "disable-kf", 0, "Disable keyframe placement");
-static const arg_def_t *kf_args[] = {
-  &kf_min_dist, &kf_max_dist, &kf_disabled, NULL
-};
-
-
-static const arg_def_t noise_sens = ARG_DEF(
-    NULL, "noise-sensitivity", 1, "Noise sensitivity (frames to blur)");
-static const arg_def_t sharpness = ARG_DEF(
-    NULL, "sharpness", 1, "Loop filter sharpness (0..7)");
-static const arg_def_t static_thresh = ARG_DEF(
-    NULL, "static-thresh", 1, "Motion detection threshold");
-static const arg_def_t auto_altref = ARG_DEF(
-    NULL, "auto-alt-ref", 1, "Enable automatic alt reference frames");
-static const arg_def_t arnr_maxframes = ARG_DEF(
-    NULL, "arnr-maxframes", 1, "AltRef max frames (0..15)");
-static const arg_def_t arnr_strength = ARG_DEF(
-    NULL, "arnr-strength", 1, "AltRef filter strength (0..6)");
-static const arg_def_t arnr_type = ARG_DEF(
-    NULL, "arnr-type", 1, "AltRef type");
+static const arg_def_t bias_pct =
+    ARG_DEF(NULL, "bias-pct", 1, "CBR/VBR bias (0=CBR, 100=VBR)");
+static const arg_def_t minsection_pct =
+    ARG_DEF(NULL, "minsection-pct", 1, "GOP min bitrate (% of target)");
+static const arg_def_t maxsection_pct =
+    ARG_DEF(NULL, "maxsection-pct", 1, "GOP max bitrate (% of target)");
+static const arg_def_t *rc_twopass_args[] = { &bias_pct, &minsection_pct,
+                                              &maxsection_pct, NULL };
+
+static const arg_def_t kf_min_dist =
+    ARG_DEF(NULL, "kf-min-dist", 1, "Minimum keyframe interval (frames)");
+static const arg_def_t kf_max_dist =
+    ARG_DEF(NULL, "kf-max-dist", 1, "Maximum keyframe interval (frames)");
+static const arg_def_t kf_disabled =
+    ARG_DEF(NULL, "disable-kf", 0, "Disable keyframe placement");
+static const arg_def_t *kf_args[] = { &kf_min_dist, &kf_max_dist, &kf_disabled,
+                                      NULL };
+
+static const arg_def_t noise_sens =
+    ARG_DEF(NULL, "noise-sensitivity", 1, "Noise sensitivity (frames to blur)");
+static const arg_def_t sharpness =
+    ARG_DEF(NULL, "sharpness", 1, "Loop filter sharpness (0..7)");
+static const arg_def_t static_thresh =
+    ARG_DEF(NULL, "static-thresh", 1, "Motion detection threshold");
+static const arg_def_t auto_altref =
+    ARG_DEF(NULL, "auto-alt-ref", 1, "Enable automatic alt reference frames");
+static const arg_def_t arnr_maxframes =
+    ARG_DEF(NULL, "arnr-maxframes", 1, "AltRef max frames (0..15)");
+static const arg_def_t arnr_strength =
+    ARG_DEF(NULL, "arnr-strength", 1, "AltRef filter strength (0..6)");
+static const arg_def_t arnr_type = ARG_DEF(NULL, "arnr-type", 1, "AltRef type");
 static const struct arg_enum_list tuning_enum[] = {
-  {"psnr", VPX_TUNE_PSNR},
-  {"ssim", VPX_TUNE_SSIM},
-  {NULL, 0}
+  { "psnr", VPX_TUNE_PSNR }, { "ssim", VPX_TUNE_SSIM }, { NULL, 0 }
 };
-static const arg_def_t tune_ssim = ARG_DEF_ENUM(
-    NULL, "tune", 1, "Material to favor", tuning_enum);
-static const arg_def_t cq_level = ARG_DEF(
-    NULL, "cq-level", 1, "Constant/Constrained Quality level");
-static const arg_def_t max_intra_rate_pct = ARG_DEF(
-    NULL, "max-intra-rate", 1, "Max I-frame bitrate (pct)");
+static const arg_def_t tune_ssim =
+    ARG_DEF_ENUM(NULL, "tune", 1, "Material to favor", tuning_enum);
+static const arg_def_t cq_level =
+    ARG_DEF(NULL, "cq-level", 1, "Constant/Constrained Quality level");
+static const arg_def_t max_intra_rate_pct =
+    ARG_DEF(NULL, "max-intra-rate", 1, "Max I-frame bitrate (pct)");
 
 #if CONFIG_VP10_ENCODER
-static const arg_def_t cpu_used_vp9 = ARG_DEF(
-    NULL, "cpu-used", 1, "CPU Used (-8..8)");
-static const arg_def_t tile_cols = ARG_DEF(
-    NULL, "tile-columns", 1, "Number of tile columns to use, log2");
-static const arg_def_t tile_rows = ARG_DEF(
-    NULL, "tile-rows", 1, "Number of tile rows to use, log2");
-static const arg_def_t lossless = ARG_DEF(
-    NULL, "lossless", 1, "Lossless mode (0: false (default), 1: true)");
+static const arg_def_t cpu_used_vp9 =
+    ARG_DEF(NULL, "cpu-used", 1, "CPU Used (-8..8)");
+static const arg_def_t tile_cols =
+    ARG_DEF(NULL, "tile-columns", 1, "Number of tile columns to use, log2");
+static const arg_def_t tile_rows =
+    ARG_DEF(NULL, "tile-rows", 1, "Number of tile rows to use, log2");
+static const arg_def_t lossless =
+    ARG_DEF(NULL, "lossless", 1, "Lossless mode (0: false (default), 1: true)");
 static const arg_def_t frame_parallel_decoding = ARG_DEF(
     NULL, "frame-parallel", 1, "Enable frame parallel decodability features");
 static const arg_def_t aq_mode = ARG_DEF(
     NULL, "aq-mode", 1,
     "Adaptive quantization mode (0: off (default), 1: variance 2: complexity, "
     "3: cyclic refresh, 4: equator360)");
-static const arg_def_t frame_periodic_boost = ARG_DEF(
-    NULL, "frame-boost", 1,
-    "Enable frame periodic boost (0: off (default), 1: on)");
+static const arg_def_t frame_periodic_boost =
+    ARG_DEF(NULL, "frame-boost", 1,
+            "Enable frame periodic boost (0: off (default), 1: on)");
 static const arg_def_t gf_cbr_boost_pct = ARG_DEF(
     NULL, "gf-cbr-boost", 1, "Boost for Golden Frame in CBR mode (pct)");
-static const arg_def_t max_inter_rate_pct = ARG_DEF(
-    NULL, "max-inter-rate", 1, "Max P-frame bitrate (pct)");
+static const arg_def_t max_inter_rate_pct =
+    ARG_DEF(NULL, "max-inter-rate", 1, "Max P-frame bitrate (pct)");
 static const arg_def_t min_gf_interval = ARG_DEF(
     NULL, "min-gf-interval", 1,
     "min gf/arf frame interval (default 0, indicating in-built behavior)");
@@ -391,30 +397,27 @@ static const struct arg_enum_list color_space_enum[] = {
   { NULL, 0 }
 };
 
-static const arg_def_t input_color_space = ARG_DEF_ENUM(
-    NULL, "color-space", 1,
-    "The color space of input content:", color_space_enum);
+static const arg_def_t input_color_space =
+    ARG_DEF_ENUM(NULL, "color-space", 1, "The color space of input content:",
+                 color_space_enum);
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static const struct arg_enum_list bitdepth_enum[] = {
-  {"8",  VPX_BITS_8},
-  {"10", VPX_BITS_10},
-  {"12", VPX_BITS_12},
-  {NULL, 0}
+  { "8", VPX_BITS_8 }, { "10", VPX_BITS_10 }, { "12", VPX_BITS_12 }, { NULL, 0 }
 };
 
 static const arg_def_t bitdeptharg = ARG_DEF_ENUM(
     "b", "bit-depth", 1,
     "Bit depth for codec (8 for version <=1, 10 or 12 for version 2)",
     bitdepth_enum);
-static const arg_def_t inbitdeptharg = ARG_DEF(
-    NULL, "input-bit-depth", 1, "Bit depth of input");
+static const arg_def_t inbitdeptharg =
+    ARG_DEF(NULL, "input-bit-depth", 1, "Bit depth of input");
 #endif
 
 static const struct arg_enum_list tune_content_enum[] = {
-  {"default", VPX_CONTENT_DEFAULT},
-  {"screen", VPX_CONTENT_SCREEN},
-  {NULL, 0}
+  { "default", VPX_CONTENT_DEFAULT },
+  { "screen", VPX_CONTENT_SCREEN },
+  { NULL, 0 }
 };
 
 static const arg_def_t tune_content = ARG_DEF_ENUM(
@@ -422,6 +425,7 @@ static const arg_def_t tune_content = ARG_DEF_ENUM(
 #endif
 
 #if CONFIG_VP10_ENCODER
+/* clang-format off */
 static const arg_def_t *vp10_args[] = {
   &cpu_used_vp9, &auto_altref, &sharpness, &static_thresh,
   &tile_cols, &tile_rows, &arnr_maxframes, &arnr_strength, &arnr_type,
@@ -445,6 +449,7 @@ static const int vp10_arg_ctrl_map[] = {
   VP9E_SET_MIN_GF_INTERVAL, VP9E_SET_MAX_GF_INTERVAL,
   0
 };
+/* clang-format on */
 #endif
 
 static const arg_def_t *no_args[] = { NULL };
@@ -470,17 +475,17 @@ void usage_exit(void) {
   fprintf(stderr, "\nVP10 Specific Options:\n");
   arg_show_usage(stderr, vp10_args);
 #endif
-  fprintf(stderr, "\nStream timebase (--timebase):\n"
+  fprintf(stderr,
+          "\nStream timebase (--timebase):\n"
           "  The desired precision of timestamps in the output, expressed\n"
           "  in fractional seconds. Default is 1/1000.\n");
   fprintf(stderr, "\nIncluded encoders:\n\n");
 
   for (i = 0; i < num_encoder; ++i) {
     const VpxInterface *const encoder = get_vpx_encoder_by_index(i);
-    const char* defstr = (i == (num_encoder - 1)) ? "(default)" : "";
-      fprintf(stderr, "    %-6s - %s %s\n",
-              encoder->name, vpx_codec_iface_name(encoder->codec_interface()),
-              defstr);
+    const char *defstr = (i == (num_encoder - 1)) ? "(default)" : "";
+    fprintf(stderr, "    %-6s - %s %s\n", encoder->name,
+            vpx_codec_iface_name(encoder->codec_interface()), defstr);
   }
   fprintf(stderr, "\n        ");
   fprintf(stderr, "Use --codec to switch to a non-default encoder.\n\n");
@@ -488,12 +493,12 @@ void usage_exit(void) {
   exit(EXIT_FAILURE);
 }
 
-#define mmin(a, b)  ((a) < (b) ? (a) : (b))
+#define mmin(a, b) ((a) < (b) ? (a) : (b))
 
 #if CONFIG_VPX_HIGHBITDEPTH
 static void find_mismatch_high(const vpx_image_t *const img1,
-                               const vpx_image_t *const img2,
-                               int yloc[4], int uloc[4], int vloc[4]) {
+                               const vpx_image_t *const img2, int yloc[4],
+                               int uloc[4], int vloc[4]) {
   uint16_t *plane1, *plane2;
   uint32_t stride1, stride2;
   const uint32_t bsize = 64;
@@ -506,10 +511,10 @@ static void find_mismatch_high(const vpx_image_t *const img1,
   int match = 1;
   uint32_t i, j;
   yloc[0] = yloc[1] = yloc[2] = yloc[3] = -1;
-  plane1 = (uint16_t*)img1->planes[VPX_PLANE_Y];
-  plane2 = (uint16_t*)img2->planes[VPX_PLANE_Y];
-  stride1 = img1->stride[VPX_PLANE_Y]/2;
-  stride2 = img2->stride[VPX_PLANE_Y]/2;
+  plane1 = (uint16_t *)img1->planes[VPX_PLANE_Y];
+  plane2 = (uint16_t *)img2->planes[VPX_PLANE_Y];
+  stride1 = img1->stride[VPX_PLANE_Y] / 2;
+  stride2 = img2->stride[VPX_PLANE_Y] / 2;
   for (i = 0, match = 1; match && i < img1->d_h; i += bsize) {
     for (j = 0; match && j < img1->d_w; j += bsize) {
       int k, l;
@@ -532,10 +537,10 @@ static void find_mismatch_high(const vpx_image_t *const img1,
   }
 
   uloc[0] = uloc[1] = uloc[2] = uloc[3] = -1;
-  plane1 = (uint16_t*)img1->planes[VPX_PLANE_U];
-  plane2 = (uint16_t*)img2->planes[VPX_PLANE_U];
-  stride1 = img1->stride[VPX_PLANE_U]/2;
-  stride2 = img2->stride[VPX_PLANE_U]/2;
+  plane1 = (uint16_t *)img1->planes[VPX_PLANE_U];
+  plane2 = (uint16_t *)img2->planes[VPX_PLANE_U];
+  stride1 = img1->stride[VPX_PLANE_U] / 2;
+  stride2 = img2->stride[VPX_PLANE_U] / 2;
   for (i = 0, match = 1; match && i < c_h; i += bsizey) {
     for (j = 0; match && j < c_w; j += bsizex) {
       int k, l;
@@ -558,10 +563,10 @@ static void find_mismatch_high(const vpx_image_t *const img1,
   }
 
   vloc[0] = vloc[1] = vloc[2] = vloc[3] = -1;
-  plane1 = (uint16_t*)img1->planes[VPX_PLANE_V];
-  plane2 = (uint16_t*)img2->planes[VPX_PLANE_V];
-  stride1 = img1->stride[VPX_PLANE_V]/2;
-  stride2 = img2->stride[VPX_PLANE_V]/2;
+  plane1 = (uint16_t *)img1->planes[VPX_PLANE_V];
+  plane2 = (uint16_t *)img2->planes[VPX_PLANE_V];
+  stride1 = img1->stride[VPX_PLANE_V] / 2;
+  stride2 = img2->stride[VPX_PLANE_V] / 2;
   for (i = 0, match = 1; match && i < c_h; i += bsizey) {
     for (j = 0; match && j < c_w; j += bsizex) {
       int k, l;
@@ -586,8 +591,8 @@ static void find_mismatch_high(const vpx_image_t *const img1,
 #endif
 
 static void find_mismatch(const vpx_image_t *const img1,
-                          const vpx_image_t *const img2,
-                          int yloc[4], int uloc[4], int vloc[4]) {
+                          const vpx_image_t *const img2, int yloc[4],
+                          int uloc[4], int vloc[4]) {
   const uint32_t bsize = 64;
   const uint32_t bsizey = bsize >> img1->y_chroma_shift;
   const uint32_t bsizex = bsize >> img1->x_chroma_shift;
@@ -678,8 +683,7 @@ static void find_mismatch(const vpx_image_t *const img1,
 static int compare_img(const vpx_image_t *const img1,
                        const vpx_image_t *const img2) {
   uint32_t l_w = img1->d_w;
-  uint32_t c_w =
-      (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift;
+  uint32_t c_w = (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift;
   const uint32_t c_h =
       (img1->d_h + img1->y_chroma_shift) >> img1->y_chroma_shift;
   uint32_t i;
@@ -713,85 +717,80 @@ static int compare_img(const vpx_image_t *const img1,
   return match;
 }
 
-
-#define NELEMENTS(x) (sizeof(x)/sizeof(x[0]))
+#define NELEMENTS(x) (sizeof(x) / sizeof(x[0]))
 #if CONFIG_VP10_ENCODER
 #define ARG_CTRL_CNT_MAX NELEMENTS(vp10_arg_ctrl_map)
 #endif
 
 #if !CONFIG_WEBM_IO
 typedef int stereo_format_t;
-struct EbmlGlobal { int debug; };
+struct EbmlGlobal {
+  int debug;
+};
 #endif
 
 /* Per-stream configuration */
 struct stream_config {
-  struct vpx_codec_enc_cfg  cfg;
-  const char               *out_fn;
-  const char               *stats_fn;
+  struct vpx_codec_enc_cfg cfg;
+  const char *out_fn;
+  const char *stats_fn;
 #if CONFIG_FP_MB_STATS
-  const char               *fpmb_stats_fn;
+  const char *fpmb_stats_fn;
 #endif
-  stereo_format_t           stereo_fmt;
-  int                       arg_ctrls[ARG_CTRL_CNT_MAX][2];
-  int                       arg_ctrl_cnt;
-  int                       write_webm;
-  int                       have_kf_max_dist;
+  stereo_format_t stereo_fmt;
+  int arg_ctrls[ARG_CTRL_CNT_MAX][2];
+  int arg_ctrl_cnt;
+  int write_webm;
+  int have_kf_max_dist;
 #if CONFIG_VPX_HIGHBITDEPTH
   // whether to use 16bit internal buffers
-  int                       use_16bit_internal;
+  int use_16bit_internal;
 #endif
 };
 
-
 struct stream_state {
-  int                       index;
-  struct stream_state      *next;
-  struct stream_config      config;
-  FILE                     *file;
-  struct rate_hist         *rate_hist;
-  struct EbmlGlobal         ebml;
-  uint64_t                  psnr_sse_total;
-  uint64_t                  psnr_samples_total;
-  double                    psnr_totals[4];
-  int                       psnr_count;
-  int                       counts[64];
-  vpx_codec_ctx_t           encoder;
-  unsigned int              frames_out;
-  uint64_t                  cx_time;
-  size_t                    nbytes;
-  stats_io_t                stats;
+  int index;
+  struct stream_state *next;
+  struct stream_config config;
+  FILE *file;
+  struct rate_hist *rate_hist;
+  struct EbmlGlobal ebml;
+  uint64_t psnr_sse_total;
+  uint64_t psnr_samples_total;
+  double psnr_totals[4];
+  int psnr_count;
+  int counts[64];
+  vpx_codec_ctx_t encoder;
+  unsigned int frames_out;
+  uint64_t cx_time;
+  size_t nbytes;
+  stats_io_t stats;
 #if CONFIG_FP_MB_STATS
-  stats_io_t                fpmb_stats;
+  stats_io_t fpmb_stats;
 #endif
-  struct vpx_image         *img;
-  vpx_codec_ctx_t           decoder;
-  int                       mismatch_seen;
+  struct vpx_image *img;
+  vpx_codec_ctx_t decoder;
+  int mismatch_seen;
 };
 
-
-static void validate_positive_rational(const char          *msg,
+static void validate_positive_rational(const char *msg,
                                        struct vpx_rational *rat) {
   if (rat->den < 0) {
     rat->num *= -1;
     rat->den *= -1;
   }
 
-  if (rat->num < 0)
-    die("Error: %s must be positive\n", msg);
+  if (rat->num < 0) die("Error: %s must be positive\n", msg);
 
-  if (!rat->den)
-    die("Error: %s has zero denominator\n", msg);
+  if (!rat->den) die("Error: %s has zero denominator\n", msg);
 }
 
-
 static void parse_global_config(struct VpxEncoderConfig *global, char **argv) {
-  char       **argi, **argj;
-  struct arg   arg;
+  char **argi, **argj;
+  struct arg arg;
   const int num_encoder = get_vpx_encoder_count();
 
-  if (num_encoder < 1)
-    die("Error: no valid encoder available\n");
+  if (num_encoder < 1) die("Error: no valid encoder available\n");
 
   /* Initialize default parameters */
   memset(global, 0, sizeof(*global));
@@ -817,8 +816,7 @@ static void parse_global_config(struct VpxEncoderConfig *global, char **argv) {
       global->pass = arg_parse_uint(&arg);
 
       if (global->pass < 1 || global->pass > 2)
-        die("Error: Invalid pass selected (%d)\n",
-            global->pass);
+        die("Error: Invalid pass selected (%d)\n", global->pass);
     } else if (arg_match(&arg, &usage, argi))
       global->usage = arg_parse_uint(&arg);
     else if (arg_match(&arg, &deadline, argi))
@@ -874,8 +872,8 @@ static void parse_global_config(struct VpxEncoderConfig *global, char **argv) {
   if (global->pass) {
     /* DWIM: Assume the user meant passes=2 if pass=2 is specified */
     if (global->pass > global->passes) {
-      warn("Assuming --pass=%d implies --passes=%d\n",
-           global->pass, global->pass);
+      warn("Assuming --pass=%d implies --passes=%d\n", global->pass,
+           global->pass);
       global->passes = global->pass;
     }
   }
@@ -886,27 +884,26 @@ static void parse_global_config(struct VpxEncoderConfig *global, char **argv) {
     // encoder
     if (global->codec != NULL && global->codec->name != NULL)
       global->passes = (strcmp(global->codec->name, "vp9") == 0 &&
-                        global->deadline != VPX_DL_REALTIME) ? 2 : 1;
+                        global->deadline != VPX_DL_REALTIME)
+                           ? 2
+                           : 1;
 #else
     global->passes = 1;
 #endif
   }
 
-  if (global->deadline == VPX_DL_REALTIME &&
-      global->passes > 1) {
+  if (global->deadline == VPX_DL_REALTIME && global->passes > 1) {
     warn("Enforcing one-pass encoding in realtime mode\n");
     global->passes = 1;
   }
 }
 
-
 static void open_input_file(struct VpxInputContext *input) {
   /* Parse certain options from the input file, if possible */
-  input->file = strcmp(input->filename, "-")
-      ? fopen(input->filename, "rb") : set_binary_mode(stdin);
+  input->file = strcmp(input->filename, "-") ? fopen(input->filename, "rb")
+                                             : set_binary_mode(stdin);
 
-  if (!input->file)
-    fatal("Failed to open input file");
+  if (!input->file) fatal("Failed to open input file");
 
   if (!fseeko(input->file, 0, SEEK_END)) {
     /* Input file is seekable. Figure out how long it is, so we can get
@@ -926,8 +923,7 @@ static void open_input_file(struct VpxInputContext *input) {
   input->detect.buf_read = fread(input->detect.buf, 1, 4, input->file);
   input->detect.position = 0;
 
-  if (input->detect.buf_read == 4
-      && file_is_y4m(input->detect.buf)) {
+  if (input->detect.buf_read == 4 && file_is_y4m(input->detect.buf)) {
     if (y4m_input_open(&input->y4m, input->file, input->detect.buf, 4,
                        input->only_i420) >= 0) {
       input->file_type = FILE_TYPE_Y4M;
@@ -948,11 +944,9 @@ static void open_input_file(struct VpxInputContext *input) {
   }
 }
 
-
 static void close_input_file(struct VpxInputContext *input) {
   fclose(input->file);
-  if (input->file_type == FILE_TYPE_Y4M)
-    y4m_input_close(&input->y4m);
+  if (input->file_type == FILE_TYPE_Y4M) y4m_input_close(&input->y4m);
 }
 
 static struct stream_state *new_stream(struct VpxEncoderConfig *global,
@@ -969,14 +963,12 @@ static struct stream_state *new_stream(struct VpxEncoderConfig *global,
     stream->index++;
     prev->next = stream;
   } else {
-    vpx_codec_err_t  res;
+    vpx_codec_err_t res;
 
     /* Populate encoder configuration */
     res = vpx_codec_enc_config_default(global->codec->codec_interface(),
-                                       &stream->config.cfg,
-                                       global->usage);
-    if (res)
-      fatal("Failed to get config: %s\n", vpx_codec_err_to_string(res));
+                                       &stream->config.cfg, global->usage);
+    if (res) fatal("Failed to get config: %s\n", vpx_codec_err_to_string(res));
 
     /* Change the default timebase to a high enough value so that the
      * encoder will always create strictly increasing timestamps.
@@ -1013,18 +1005,16 @@ static struct stream_state *new_stream(struct VpxEncoderConfig *global,
   return stream;
 }
 
-
 static int parse_stream_params(struct VpxEncoderConfig *global,
-                               struct stream_state  *stream,
-                               char **argv) {
-  char                   **argi, **argj;
-  struct arg               arg;
+                               struct stream_state *stream, char **argv) {
+  char **argi, **argj;
+  struct arg arg;
   static const arg_def_t **ctrl_args = no_args;
-  static const int        *ctrl_args_map = NULL;
-  struct stream_config    *config = &stream->config;
-  int                      eos_mark_found = 0;
+  static const int *ctrl_args_map = NULL;
+  struct stream_config *config = &stream->config;
+  int eos_mark_found = 0;
 #if CONFIG_VPX_HIGHBITDEPTH
-  int                      test_16bit_internal = 0;
+  int test_16bit_internal = 0;
 #endif
 
   // Handle codec specific options
@@ -1129,7 +1119,7 @@ static int parse_stream_params(struct VpxEncoderConfig *global,
     } else if (arg_match(&arg, &buf_optimal_sz, argi)) {
       config->cfg.rc_buf_optimal_sz = arg_parse_uint(&arg);
     } else if (arg_match(&arg, &bias_pct, argi)) {
-        config->cfg.rc_2pass_vbr_bias_pct = arg_parse_uint(&arg);
+      config->cfg.rc_2pass_vbr_bias_pct = arg_parse_uint(&arg);
       if (global->passes < 2)
         warn("option %s ignored in one-pass mode.\n", arg.name);
     } else if (arg_match(&arg, &minsection_pct, argi)) {
@@ -1176,43 +1166,41 @@ static int parse_stream_params(struct VpxEncoderConfig *global,
           if (ctrl_args_map != NULL && j < (int)ARG_CTRL_CNT_MAX) {
             config->arg_ctrls[j][0] = ctrl_args_map[i];
             config->arg_ctrls[j][1] = arg_parse_enum_or_int(&arg);
-            if (j == config->arg_ctrl_cnt)
-              config->arg_ctrl_cnt++;
+            if (j == config->arg_ctrl_cnt) config->arg_ctrl_cnt++;
           }
         }
       }
-      if (!match)
-        argj++;
+      if (!match) argj++;
     }
   }
 #if CONFIG_VPX_HIGHBITDEPTH
   if (strcmp(global->codec->name, "vp9") == 0 ||
       strcmp(global->codec->name, "vp10") == 0) {
-    config->use_16bit_internal = test_16bit_internal |
-                                 (config->cfg.g_profile > 1);
+    config->use_16bit_internal =
+        test_16bit_internal | (config->cfg.g_profile > 1);
   }
 #endif
   return eos_mark_found;
 }
 
-
-#define FOREACH_STREAM(func) \
-  do { \
-    struct stream_state *stream; \
+#define FOREACH_STREAM(func)                                \
+  do {                                                      \
+    struct stream_state *stream;                            \
     for (stream = streams; stream; stream = stream->next) { \
-      func; \
-    } \
+      func;                                                 \
+    }                                                       \
   } while (0)
 
-
 static void validate_stream_config(const struct stream_state *stream,
                                    const struct VpxEncoderConfig *global) {
   const struct stream_state *streami;
   (void)global;
 
   if (!stream->config.cfg.g_w || !stream->config.cfg.g_h)
-    fatal("Stream %d: Specify stream dimensions with --width (-w) "
-          " and --height (-h)", stream->index);
+    fatal(
+        "Stream %d: Specify stream dimensions with --width (-w) "
+        " and --height (-h)",
+        stream->index);
 
   // Check that the codec bit depth is greater than the input bit depth.
   if (stream->config.cfg.g_input_bit_depth >
@@ -1259,9 +1247,7 @@ static void validate_stream_config(const struct stream_state *stream,
   }
 }
 
-
-static void set_stream_dimensions(struct stream_state *stream,
-                                  unsigned int w,
+static void set_stream_dimensions(struct stream_state *stream, unsigned int w,
                                   unsigned int h) {
   if (!stream->config.cfg.g_w) {
     if (!stream->config.cfg.g_h)
@@ -1274,7 +1260,6 @@ static void set_stream_dimensions(struct stream_state *stream,
   }
 }
 
-
 static void set_default_kf_interval(struct stream_state *stream,
                                     struct VpxEncoderConfig *global) {
   /* Use a max keyframe interval of 5 seconds, if none was
@@ -1287,7 +1272,7 @@ static void set_default_kf_interval(struct stream_state *stream,
   }
 }
 
-static const char* file_type_to_string(enum VideoFileType t) {
+static const char *file_type_to_string(enum VideoFileType t) {
   switch (t) {
     case FILE_TYPE_RAW: return "RAW";
     case FILE_TYPE_Y4M: return "Y4M";
@@ -1295,7 +1280,7 @@ static const char* file_type_to_string(enum VideoFileType t) {
   }
 }
 
-static const char* image_format_to_string(vpx_img_fmt_t f) {
+static const char *image_format_to_string(vpx_img_fmt_t f) {
   switch (f) {
     case VPX_IMG_FMT_I420: return "I420";
     case VPX_IMG_FMT_I422: return "I422";
@@ -1313,7 +1298,6 @@ static const char* image_format_to_string(vpx_img_fmt_t f) {
 static void show_stream_config(struct stream_state *stream,
                                struct VpxEncoderConfig *global,
                                struct VpxInputContext *input) {
-
 #define SHOW(field) \
   fprintf(stderr, "    %-28s = %d\n", #field, stream->config.cfg.field)
 
@@ -1321,8 +1305,7 @@ static void show_stream_config(struct stream_state *stream,
     fprintf(stderr, "Codec: %s\n",
             vpx_codec_iface_name(global->codec->codec_interface()));
     fprintf(stderr, "Source file: %s File Type: %s Format: %s\n",
-            input->filename,
-            file_type_to_string(input->file_type),
+            input->filename, file_type_to_string(input->file_type),
             image_format_to_string(input->fmt));
   }
   if (stream->next || stream->index)
@@ -1365,20 +1348,17 @@ static void show_stream_config(struct stream_state *stream,
   SHOW(kf_max_dist);
 }
 
-
 static void open_output_file(struct stream_state *stream,
                              struct VpxEncoderConfig *global,
                              const struct VpxRational *pixel_aspect_ratio) {
   const char *fn = stream->config.out_fn;
   const struct vpx_codec_enc_cfg *const cfg = &stream->config.cfg;
 
-  if (cfg->g_pass == VPX_RC_FIRST_PASS)
-    return;
+  if (cfg->g_pass == VPX_RC_FIRST_PASS) return;
 
   stream->file = strcmp(fn, "-") ? fopen(fn, "wb") : set_binary_mode(stdout);
 
-  if (!stream->file)
-    fatal("Failed to open output file");
+  if (!stream->file) fatal("Failed to open output file");
 
   if (stream->config.write_webm && fseek(stream->file, 0, SEEK_CUR))
     fatal("WebM output to pipes not supported.");
@@ -1386,10 +1366,8 @@ static void open_output_file(struct stream_state *stream,
 #if CONFIG_WEBM_IO
   if (stream->config.write_webm) {
     stream->ebml.stream = stream->file;
-    write_webm_file_header(&stream->ebml, cfg,
-                           &global->framerate,
-                           stream->config.stereo_fmt,
-                           global->codec->fourcc,
+    write_webm_file_header(&stream->ebml, cfg, &global->framerate,
+                           stream->config.stereo_fmt, global->codec->fourcc,
                            pixel_aspect_ratio);
   }
 #endif
@@ -1399,13 +1377,11 @@ static void open_output_file(struct stream_state *stream,
   }
 }
 
-
 static void close_output_file(struct stream_state *stream,
                               unsigned int fourcc) {
   const struct vpx_codec_enc_cfg *const cfg = &stream->config.cfg;
 
-  if (cfg->g_pass == VPX_RC_FIRST_PASS)
-    return;
+  if (cfg->g_pass == VPX_RC_FIRST_PASS) return;
 
 #if CONFIG_WEBM_IO
   if (stream->config.write_webm) {
@@ -1415,21 +1391,17 @@ static void close_output_file(struct stream_state *stream,
 
   if (!stream->config.write_webm) {
     if (!fseek(stream->file, 0, SEEK_SET))
-      ivf_write_file_header(stream->file, &stream->config.cfg,
-                            fourcc,
+      ivf_write_file_header(stream->file, &stream->config.cfg, fourcc,
                             stream->frames_out);
   }
 
   fclose(stream->file);
 }
 
-
 static void setup_pass(struct stream_state *stream,
-                       struct VpxEncoderConfig *global,
-                       int pass) {
+                       struct VpxEncoderConfig *global, int pass) {
   if (stream->config.stats_fn) {
-    if (!stats_open_file(&stream->stats, stream->config.stats_fn,
-                         pass))
+    if (!stats_open_file(&stream->stats, stream->config.stats_fn, pass))
       fatal("Failed to open statistics store");
   } else {
     if (!stats_open_mem(&stream->stats, pass))
@@ -1438,8 +1410,8 @@ static void setup_pass(struct stream_state *stream,
 
 #if CONFIG_FP_MB_STATS
   if (stream->config.fpmb_stats_fn) {
-    if (!stats_open_file(&stream->fpmb_stats,
-                         stream->config.fpmb_stats_fn, pass))
+    if (!stats_open_file(&stream->fpmb_stats, stream->config.fpmb_stats_fn,
+                         pass))
       fatal("Failed to open mb statistics store");
   } else {
     if (!stats_open_mem(&stream->fpmb_stats, pass))
@@ -1448,8 +1420,8 @@ static void setup_pass(struct stream_state *stream,
 #endif
 
   stream->config.cfg.g_pass = global->passes == 2
-                              ? pass ? VPX_RC_LAST_PASS : VPX_RC_FIRST_PASS
-                            : VPX_RC_ONE_PASS;
+                                  ? pass ? VPX_RC_LAST_PASS : VPX_RC_FIRST_PASS
+                                  : VPX_RC_ONE_PASS;
   if (pass) {
     stream->config.cfg.rc_twopass_stats_in = stats_get(&stream->stats);
 #if CONFIG_FP_MB_STATS
@@ -1463,7 +1435,6 @@ static void setup_pass(struct stream_state *stream,
   stream->frames_out = 0;
 }
 
-
 static void initialize_encoder(struct stream_state *stream,
                                struct VpxEncoderConfig *global) {
   int i;
@@ -1488,8 +1459,7 @@ static void initialize_encoder(struct stream_state *stream,
     int ctrl = stream->config.arg_ctrls[i][0];
     int value = stream->config.arg_ctrls[i][1];
     if (vpx_codec_control_(&stream->encoder, ctrl, value))
-      fprintf(stderr, "Error: Tried to set control %d = %d\n",
-              ctrl, value);
+      fprintf(stderr, "Error: Tried to set control %d = %d\n", ctrl, value);
 
     ctx_exit_on_error(&stream->encoder, "Failed to control codec");
   }
@@ -1502,23 +1472,21 @@ static void initialize_encoder(struct stream_state *stream,
 #endif
 }
 
-
 static void encode_frame(struct stream_state *stream,
-                         struct VpxEncoderConfig *global,
-                         struct vpx_image *img,
+                         struct VpxEncoderConfig *global, struct vpx_image *img,
                          unsigned int frames_in) {
   vpx_codec_pts_t frame_start, next_frame_start;
   struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
   struct vpx_usec_timer timer;
 
-  frame_start = (cfg->g_timebase.den * (int64_t)(frames_in - 1)
-                 * global->framerate.den)
-                / cfg->g_timebase.num / global->framerate.num;
-  next_frame_start = (cfg->g_timebase.den * (int64_t)(frames_in)
-                      * global->framerate.den)
-                     / cfg->g_timebase.num / global->framerate.num;
+  frame_start =
+      (cfg->g_timebase.den * (int64_t)(frames_in - 1) * global->framerate.den) /
+      cfg->g_timebase.num / global->framerate.num;
+  next_frame_start =
+      (cfg->g_timebase.den * (int64_t)(frames_in)*global->framerate.den) /
+      cfg->g_timebase.num / global->framerate.num;
 
-  /* Scale if necessary */
+/* Scale if necessary */
 #if CONFIG_VPX_HIGHBITDEPTH
   if (img) {
     if ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) &&
@@ -1529,32 +1497,28 @@ static void encode_frame(struct stream_state *stream,
       }
 #if CONFIG_LIBYUV
       if (!stream->img) {
-        stream->img = vpx_img_alloc(NULL, VPX_IMG_FMT_I42016,
-                                    cfg->g_w, cfg->g_h, 16);
+        stream->img =
+            vpx_img_alloc(NULL, VPX_IMG_FMT_I42016, cfg->g_w, cfg->g_h, 16);
       }
-      I420Scale_16((uint16*)img->planes[VPX_PLANE_Y],
-                   img->stride[VPX_PLANE_Y]/2,
-                   (uint16*)img->planes[VPX_PLANE_U],
-                   img->stride[VPX_PLANE_U]/2,
-                   (uint16*)img->planes[VPX_PLANE_V],
-                   img->stride[VPX_PLANE_V]/2,
-                   img->d_w, img->d_h,
-                   (uint16*)stream->img->planes[VPX_PLANE_Y],
-                   stream->img->stride[VPX_PLANE_Y]/2,
-                   (uint16*)stream->img->planes[VPX_PLANE_U],
-                   stream->img->stride[VPX_PLANE_U]/2,
-                   (uint16*)stream->img->planes[VPX_PLANE_V],
-                   stream->img->stride[VPX_PLANE_V]/2,
-                   stream->img->d_w, stream->img->d_h,
-                   kFilterBox);
+      I420Scale_16(
+          (uint16 *)img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y] / 2,
+          (uint16 *)img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U] / 2,
+          (uint16 *)img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V] / 2,
+          img->d_w, img->d_h, (uint16 *)stream->img->planes[VPX_PLANE_Y],
+          stream->img->stride[VPX_PLANE_Y] / 2,
+          (uint16 *)stream->img->planes[VPX_PLANE_U],
+          stream->img->stride[VPX_PLANE_U] / 2,
+          (uint16 *)stream->img->planes[VPX_PLANE_V],
+          stream->img->stride[VPX_PLANE_V] / 2, stream->img->d_w,
+          stream->img->d_h, kFilterBox);
       img = stream->img;
 #else
-    stream->encoder.err = 1;
-    ctx_exit_on_error(&stream->encoder,
-                      "Stream %d: Failed to encode frame.\n"
-                      "Scaling disabled in this configuration. \n"
-                      "To enable, configure with --enable-libyuv\n",
-                      stream->index);
+      stream->encoder.err = 1;
+      ctx_exit_on_error(&stream->encoder,
+                        "Stream %d: Failed to encode frame.\n"
+                        "Scaling disabled in this configuration. \n"
+                        "To enable, configure with --enable-libyuv\n",
+                        stream->index);
 #endif
     }
   }
@@ -1566,20 +1530,16 @@ static void encode_frame(struct stream_state *stream,
     }
 #if CONFIG_LIBYUV
     if (!stream->img)
-      stream->img = vpx_img_alloc(NULL, VPX_IMG_FMT_I420,
-                                  cfg->g_w, cfg->g_h, 16);
-    I420Scale(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
-              img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
-              img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
-              img->d_w, img->d_h,
-              stream->img->planes[VPX_PLANE_Y],
-              stream->img->stride[VPX_PLANE_Y],
-              stream->img->planes[VPX_PLANE_U],
-              stream->img->stride[VPX_PLANE_U],
-              stream->img->planes[VPX_PLANE_V],
-              stream->img->stride[VPX_PLANE_V],
-              stream->img->d_w, stream->img->d_h,
-              kFilterBox);
+      stream->img =
+          vpx_img_alloc(NULL, VPX_IMG_FMT_I420, cfg->g_w, cfg->g_h, 16);
+    I420Scale(
+        img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+        img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+        img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], img->d_w, img->d_h,
+        stream->img->planes[VPX_PLANE_Y], stream->img->stride[VPX_PLANE_Y],
+        stream->img->planes[VPX_PLANE_U], stream->img->stride[VPX_PLANE_U],
+        stream->img->planes[VPX_PLANE_V], stream->img->stride[VPX_PLANE_V],
+        stream->img->d_w, stream->img->d_h, kFilterBox);
     img = stream->img;
 #else
     stream->encoder.err = 1;
@@ -1593,15 +1553,14 @@ static void encode_frame(struct stream_state *stream,
 
   vpx_usec_timer_start(&timer);
   vpx_codec_encode(&stream->encoder, img, frame_start,
-                   (unsigned long)(next_frame_start - frame_start),
-                   0, global->deadline);
+                   (unsigned long)(next_frame_start - frame_start), 0,
+                   global->deadline);
   vpx_usec_timer_mark(&timer);
   stream->cx_time += vpx_usec_timer_elapsed(&timer);
   ctx_exit_on_error(&stream->encoder, "Stream %d: Failed to encode frame",
                     stream->index);
 }
 
-
 static void update_quantizer_histogram(struct stream_state *stream) {
   if (stream->config.cfg.g_pass != VPX_RC_FIRST_PASS) {
     int q;
@@ -1612,10 +1571,8 @@ static void update_quantizer_histogram(struct stream_state *stream) {
   }
 }
 
-
 static void get_cx_data(struct stream_state *stream,
-                        struct VpxEncoderConfig *global,
-                        int *got_data) {
+                        struct VpxEncoderConfig *global, int *got_data) {
   const vpx_codec_cx_pkt_t *pkt;
   const struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
   vpx_codec_iter_t iter = NULL;
@@ -1656,8 +1613,8 @@ static void get_cx_data(struct stream_state *stream,
             }
           }
 
-          (void) fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz,
-                        stream->file);
+          (void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz,
+                       stream->file);
         }
         stream->nbytes += pkt->data.raw.sz;
 
@@ -1678,15 +1635,13 @@ static void get_cx_data(struct stream_state *stream,
         break;
       case VPX_CODEC_STATS_PKT:
         stream->frames_out++;
-        stats_write(&stream->stats,
-                    pkt->data.twopass_stats.buf,
+        stats_write(&stream->stats, pkt->data.twopass_stats.buf,
                     pkt->data.twopass_stats.sz);
         stream->nbytes += pkt->data.raw.sz;
         break;
 #if CONFIG_FP_MB_STATS
       case VPX_CODEC_FPMB_STATS_PKT:
-        stats_write(&stream->fpmb_stats,
-                    pkt->data.firstpass_mb_stats.buf,
+        stats_write(&stream->fpmb_stats, pkt->data.firstpass_mb_stats.buf,
                     pkt->data.firstpass_mb_stats.sz);
         stream->nbytes += pkt->data.raw.sz;
         break;
@@ -1707,19 +1662,16 @@ static void get_cx_data(struct stream_state *stream,
         }
 
         break;
-      default:
-        break;
+      default: break;
     }
   }
 }
 
-
-static void show_psnr(struct stream_state  *stream, double peak) {
+static void show_psnr(struct stream_state *stream, double peak) {
   int i;
   double ovpsnr;
 
-  if (!stream->psnr_count)
-    return;
+  if (!stream->psnr_count) return;
 
   fprintf(stderr, "Stream %d PSNR (Overall/Avg/Y/U/V)", stream->index);
   ovpsnr = sse_to_psnr((double)stream->psnr_samples_total, peak,
@@ -1732,19 +1684,17 @@ static void show_psnr(struct stream_state  *stream, double peak) {
   fprintf(stderr, "\n");
 }
 
-
 static float usec_to_fps(uint64_t usec, unsigned int frames) {
   return (float)(usec > 0 ? frames * 1000000.0 / (float)usec : 0);
 }
 
-static void test_decode(struct stream_state  *stream,
+static void test_decode(struct stream_state *stream,
                         enum TestDecodeFatality fatal,
                         const VpxInterface *codec) {
   vpx_image_t enc_img, dec_img;
   struct vp9_ref_frame ref_enc, ref_dec;
 
-  if (stream->mismatch_seen)
-    return;
+  if (stream->mismatch_seen) return;
 
   ref_enc.idx = 0;
   ref_dec.idx = 0;
@@ -1787,10 +1737,8 @@ static void test_decode(struct stream_state  *stream,
                           " Y[%d, %d] {%d/%d},"
                           " U[%d, %d] {%d/%d},"
                           " V[%d, %d] {%d/%d}",
-                          stream->index, stream->frames_out,
-                          y[0], y[1], y[2], y[3],
-                          u[0], u[1], u[2], u[3],
-                          v[0], v[1], v[2], v[3]);
+                          stream->index, stream->frames_out, y[0], y[1], y[2],
+                          y[3], u[0], u[1], u[2], u[3], v[0], v[1], v[2], v[3]);
     stream->mismatch_seen = stream->frames_out;
   }
 
@@ -1798,7 +1746,6 @@ static void test_decode(struct stream_state  *stream,
   vpx_img_free(&dec_img);
 }
 
-
 static void print_time(const char *label, int64_t etl) {
   int64_t hours;
   int64_t mins;
@@ -1811,14 +1758,13 @@ static void print_time(const char *label, int64_t etl) {
     etl -= mins * 60;
     secs = etl;
 
-    fprintf(stderr, "[%3s %2"PRId64":%02"PRId64":%02"PRId64"] ",
-            label, hours, mins, secs);
+    fprintf(stderr, "[%3s %2" PRId64 ":%02" PRId64 ":%02" PRId64 "] ", label,
+            hours, mins, secs);
   } else {
     fprintf(stderr, "[%3s  unknown] ", label);
   }
 }
 
-
 int main(int argc, const char **argv_) {
   int pass;
   vpx_image_t raw;
@@ -1841,8 +1787,7 @@ int main(int argc, const char **argv_) {
   memset(&input, 0, sizeof(input));
   exec_name = argv_[0];
 
-  if (argc < 3)
-    usage_exit();
+  if (argc < 3) usage_exit();
 
   /* Setup default input stream settings */
   input.framerate.numerator = 30;
@@ -1858,21 +1803,11 @@ int main(int argc, const char **argv_) {
   parse_global_config(&global, argv);
 
   switch (global.color_type) {
-    case I420:
-      input.fmt = VPX_IMG_FMT_I420;
-      break;
-    case I422:
-      input.fmt = VPX_IMG_FMT_I422;
-      break;
-    case I444:
-      input.fmt = VPX_IMG_FMT_I444;
-      break;
-    case I440:
-      input.fmt = VPX_IMG_FMT_I440;
-      break;
-    case YV12:
-      input.fmt = VPX_IMG_FMT_YV12;
-      break;
+    case I420: input.fmt = VPX_IMG_FMT_I420; break;
+    case I422: input.fmt = VPX_IMG_FMT_I422; break;
+    case I444: input.fmt = VPX_IMG_FMT_I444; break;
+    case I440: input.fmt = VPX_IMG_FMT_I440; break;
+    case YV12: input.fmt = VPX_IMG_FMT_YV12; break;
   }
 
   {
@@ -1885,8 +1820,7 @@ int main(int argc, const char **argv_) {
     do {
       stream = new_stream(&global, stream);
       stream_cnt++;
-      if (!streams)
-        streams = stream;
+      if (!streams) streams = stream;
     } while (parse_stream_params(&global, stream, argv));
   }
 
@@ -1895,14 +1829,13 @@ int main(int argc, const char **argv_) {
     if (argi[0][0] == '-' && argi[0][1])
       die("Error: Unrecognized option %s\n", *argi);
 
-  FOREACH_STREAM(check_encoder_config(global.disable_warning_prompt,
-                                      &global, &stream->config.cfg););
+  FOREACH_STREAM(check_encoder_config(global.disable_warning_prompt, &global,
+                                      &stream->config.cfg););
 
   /* Handle non-option arguments */
   input.filename = argv[0];
 
-  if (!input.filename)
-    usage_exit();
+  if (!input.filename) usage_exit();
 
   /* Decide if other chroma subsamplings than 4:2:0 are supported */
   if (global.codec->fourcc == VP9_FOURCC || global.codec->fourcc == VP10_FOURCC)
@@ -1931,8 +1864,9 @@ int main(int argc, const char **argv_) {
 
     /* Update stream configurations from the input file's parameters */
     if (!input.width || !input.height)
-      fatal("Specify stream dimensions with --width (-w) "
-            " and --height (-h)");
+      fatal(
+          "Specify stream dimensions with --width (-w) "
+          " and --height (-h)");
 
     /* If input file does not specify bit-depth but input-bit-depth parameter
      * exists, assume that to be the input bit-depth. However, if the
@@ -1949,9 +1883,8 @@ int main(int argc, const char **argv_) {
       });
       if (input.bit_depth > 8) input.fmt |= VPX_IMG_FMT_HIGHBITDEPTH;
     } else {
-      FOREACH_STREAM({
-        stream->config.cfg.g_input_bit_depth = input.bit_depth;
-      });
+      FOREACH_STREAM(
+          { stream->config.cfg.g_input_bit_depth = input.bit_depth; });
     }
 
     FOREACH_STREAM(set_stream_dimensions(stream, input.width, input.height));
@@ -1961,18 +1894,21 @@ int main(int argc, const char **argv_) {
      * --passes=2, ensure --fpf was set.
      */
     if (global.pass && global.passes == 2)
-      FOREACH_STREAM( {
-      if (!stream->config.stats_fn)
-        die("Stream %d: Must specify --fpf when --pass=%d"
-        " and --passes=2\n", stream->index, global.pass);
-    });
+      FOREACH_STREAM({
+        if (!stream->config.stats_fn)
+          die(
+              "Stream %d: Must specify --fpf when --pass=%d"
+              " and --passes=2\n",
+              stream->index, global.pass);
+      });
 
 #if !CONFIG_WEBM_IO
     FOREACH_STREAM({
       if (stream->config.write_webm) {
         stream->config.write_webm = 0;
-        warn("vpxenc was compiled without WebM container support."
-             "Producing IVF output");
+        warn(
+            "vpxenc was compiled without WebM container support."
+            "Producing IVF output");
       }
     });
 #endif
@@ -2000,14 +1936,13 @@ int main(int argc, const char **argv_) {
       else
         vpx_img_alloc(&raw, input.fmt, input.width, input.height, 32);
 
-      FOREACH_STREAM(stream->rate_hist =
-                         init_rate_histogram(&stream->config.cfg,
-                                             &global.framerate));
+      FOREACH_STREAM(stream->rate_hist = init_rate_histogram(
+                         &stream->config.cfg, &global.framerate));
     }
 
     FOREACH_STREAM(setup_pass(stream, &global, pass));
-    FOREACH_STREAM(open_output_file(stream, &global,
-                                    &input.pixel_aspect_ratio));
+    FOREACH_STREAM(
+        open_output_file(stream, &global, &input.pixel_aspect_ratio));
     FOREACH_STREAM(initialize_encoder(stream, &global));
 
 #if CONFIG_VPX_HIGHBITDEPTH
@@ -2024,7 +1959,7 @@ int main(int argc, const char **argv_) {
           input_shift = 0;
         } else {
           input_shift = (int)stream->config.cfg.g_bit_depth -
-              stream->config.cfg.g_input_bit_depth;
+                        stream->config.cfg.g_input_bit_depth;
         }
       });
     }
@@ -2039,26 +1974,23 @@ int main(int argc, const char **argv_) {
       if (!global.limit || frames_in < global.limit) {
         frame_avail = read_frame(&input, &raw);
 
-        if (frame_avail)
-          frames_in++;
-        seen_frames = frames_in > global.skip_frames ?
-                          frames_in - global.skip_frames : 0;
+        if (frame_avail) frames_in++;
+        seen_frames =
+            frames_in > global.skip_frames ? frames_in - global.skip_frames : 0;
 
         if (!global.quiet) {
           float fps = usec_to_fps(cx_time, seen_frames);
           fprintf(stderr, "\rPass %d/%d ", pass + 1, global.passes);
 
           if (stream_cnt == 1)
-            fprintf(stderr,
-                    "frame %4d/%-4d %7"PRId64"B ",
-                    frames_in, streams->frames_out, (int64_t)streams->nbytes);
+            fprintf(stderr, "frame %4d/%-4d %7" PRId64 "B ", frames_in,
+                    streams->frames_out, (int64_t)streams->nbytes);
           else
             fprintf(stderr, "frame %4d ", frames_in);
 
-          fprintf(stderr, "%7"PRId64" %s %.2f %s ",
+          fprintf(stderr, "%7" PRId64 " %s %.2f %s ",
                   cx_time > 9999999 ? cx_time / 1000 : cx_time,
-                  cx_time > 9999999 ? "ms" : "us",
-                  fps >= 1.0 ? fps : fps * 60,
+                  cx_time > 9999999 ? "ms" : "us", fps >= 1.0 ? fps : fps * 60,
                   fps >= 1.0 ? "fps" : "fpm");
           print_time("ETA", estimated_time_left);
         }
@@ -2089,8 +2021,7 @@ int main(int argc, const char **argv_) {
           FOREACH_STREAM({
             if (stream->config.use_16bit_internal)
               encode_frame(stream, &global,
-                           frame_avail ? frame_to_encode : NULL,
-                           frames_in);
+                           frame_avail ? frame_to_encode : NULL, frames_in);
             else
               assert(0);
           });
@@ -2102,8 +2033,7 @@ int main(int argc, const char **argv_) {
         }
 #else
         vpx_usec_timer_start(&timer);
-        FOREACH_STREAM(encode_frame(stream, &global,
-                                    frame_avail ? &raw : NULL,
+        FOREACH_STREAM(encode_frame(stream, &global, frame_avail ? &raw : NULL,
                                     frames_in));
 #endif
         vpx_usec_timer_mark(&timer);
@@ -2125,8 +2055,8 @@ int main(int argc, const char **argv_) {
             const int64_t frame_in_lagged = (seen_frames - lagged_count) * 1000;
 
             rate = cx_time ? frame_in_lagged * (int64_t)1000000 / cx_time : 0;
-            remaining = 1000 * (global.limit - global.skip_frames
-                                - seen_frames + lagged_count);
+            remaining = 1000 * (global.limit - global.skip_frames -
+                                seen_frames + lagged_count);
           } else {
             const int64_t input_pos = ftello(input.file);
             const int64_t input_pos_lagged = input_pos - lagged_count;
@@ -2136,9 +2066,8 @@ int main(int argc, const char **argv_) {
             remaining = limit - input_pos + lagged_count;
           }
 
-          average_rate = (average_rate <= 0)
-              ? rate
-              : (average_rate * 7 + rate) / 8;
+          average_rate =
+              (average_rate <= 0) ? rate : (average_rate * 7 + rate) / 8;
           estimated_time_left = average_rate ? remaining / average_rate : -1;
         }
 
@@ -2147,23 +2076,23 @@ int main(int argc, const char **argv_) {
       }
 
       fflush(stdout);
-      if (!global.quiet)
-        fprintf(stderr, "\033[K");
+      if (!global.quiet) fprintf(stderr, "\033[K");
     }
 
-    if (stream_cnt > 1)
-      fprintf(stderr, "\n");
+    if (stream_cnt > 1) fprintf(stderr, "\n");
 
     if (!global.quiet) {
-      FOREACH_STREAM(fprintf(stderr,
-          "\rPass %d/%d frame %4d/%-4d %7"PRId64"B %7"PRId64"b/f %7"PRId64"b/s"
-          " %7"PRId64" %s (%.2f fps)\033[K\n",
-          pass + 1,
-          global.passes, frames_in, stream->frames_out, (int64_t)stream->nbytes,
+      FOREACH_STREAM(fprintf(
+          stderr, "\rPass %d/%d frame %4d/%-4d %7" PRId64 "B %7" PRId64
+                  "b/f %7" PRId64 "b/s"
+                  " %7" PRId64 " %s (%.2f fps)\033[K\n",
+          pass + 1, global.passes, frames_in, stream->frames_out,
+          (int64_t)stream->nbytes,
           seen_frames ? (int64_t)(stream->nbytes * 8 / seen_frames) : 0,
-          seen_frames ? (int64_t)stream->nbytes * 8 *
-              (int64_t)global.framerate.num / global.framerate.den /
-              seen_frames : 0,
+          seen_frames
+              ? (int64_t)stream->nbytes * 8 * (int64_t)global.framerate.num /
+                    global.framerate.den / seen_frames
+              : 0,
           stream->cx_time > 9999999 ? stream->cx_time / 1000 : stream->cx_time,
           stream->cx_time > 9999999 ? "ms" : "us",
           usec_to_fps(stream->cx_time, seen_frames)));
@@ -2197,17 +2126,15 @@ int main(int argc, const char **argv_) {
     FOREACH_STREAM(stats_close(&stream->fpmb_stats, global.passes - 1));
 #endif
 
-    if (global.pass)
-      break;
+    if (global.pass) break;
   }
 
   if (global.show_q_hist_buckets)
-    FOREACH_STREAM(show_q_histogram(stream->counts,
-                                    global.show_q_hist_buckets));
+    FOREACH_STREAM(
+        show_q_histogram(stream->counts, global.show_q_hist_buckets));
 
   if (global.show_rate_hist_buckets)
-    FOREACH_STREAM(show_rate_histogram(stream->rate_hist,
-                                       &stream->config.cfg,
+    FOREACH_STREAM(show_rate_histogram(stream->rate_hist, &stream->config.cfg,
                                        global.show_rate_hist_buckets));
   FOREACH_STREAM(destroy_rate_histogram(stream->rate_hist));
 
@@ -2229,8 +2156,7 @@ int main(int argc, const char **argv_) {
 #endif
 
 #if CONFIG_VPX_HIGHBITDEPTH
-  if (allocated_raw_shift)
-    vpx_img_free(&raw_shift);
+  if (allocated_raw_shift) vpx_img_free(&raw_shift);
 #endif
   vpx_img_free(&raw);
   free(argv);
diff --git a/vpxstats.c b/vpxstats.c
index 16728ce09637d33614457ae20b847133ca3ab8db..142e367bb48c440fc7873722cdff8cbfdaaca8d5 100644
--- a/vpxstats.c
+++ b/vpxstats.c
@@ -30,8 +30,7 @@ int stats_open_file(stats_io_t *stats, const char *fpf, int pass) {
 
     stats->file = fopen(fpf, "rb");
 
-    if (stats->file == NULL)
-      fatal("First-pass stats file does not exist!");
+    if (stats->file == NULL) fatal("First-pass stats file does not exist!");
 
     if (fseek(stats->file, 0, SEEK_END))
       fatal("First-pass stats file must be seekable!");
@@ -76,18 +75,17 @@ void stats_close(stats_io_t *stats, int last_pass) {
     fclose(stats->file);
     stats->file = NULL;
   } else {
-    if (stats->pass == last_pass)
-      free(stats->buf.buf);
+    if (stats->pass == last_pass) free(stats->buf.buf);
   }
 }
 
 void stats_write(stats_io_t *stats, const void *pkt, size_t len) {
   if (stats->file) {
-    (void) fwrite(pkt, 1, len, stats->file);
+    (void)fwrite(pkt, 1, len, stats->file);
   } else {
     if (stats->buf.sz + len > stats->buf_alloc_sz) {
-      size_t  new_sz = stats->buf_alloc_sz + 64 * 1024;
-      char   *new_ptr = realloc(stats->buf.buf, new_sz);
+      size_t new_sz = stats->buf_alloc_sz + 64 * 1024;
+      char *new_ptr = realloc(stats->buf.buf, new_sz);
 
       if (new_ptr) {
         stats->buf_ptr = new_ptr + (stats->buf_ptr - (char *)stats->buf.buf);
@@ -104,6 +102,4 @@ void stats_write(stats_io_t *stats, const void *pkt, size_t len) {
   }
 }
 
-vpx_fixed_buf_t stats_get(stats_io_t *stats) {
-  return stats->buf;
-}
+vpx_fixed_buf_t stats_get(stats_io_t *stats) { return stats->buf; }
diff --git a/warnings.c b/warnings.c
index 7ac678ab4aabfee4a94ce7916f0580f1205c5b25..a3e4926674b00eb434a478835dbaac2219034766 100644
--- a/warnings.c
+++ b/warnings.c
@@ -47,8 +47,7 @@ static void add_warning(const char *warning_string,
   new_node->warning_string = warning_string;
   new_node->next_warning = NULL;
 
-  while (*node != NULL)
-    node = &(*node)->next_warning;
+  while (*node != NULL) node = &(*node)->next_warning;
 
   *node = new_node;
 }
@@ -78,9 +77,7 @@ static void check_quantizer(int min_q, int max_q,
 }
 
 static void check_lag_in_frames_realtime_deadline(
-    int lag_in_frames,
-    int deadline,
-    struct WarningList *warning_list) {
+    int lag_in_frames, int deadline, struct WarningList *warning_list) {
   if (deadline == VPX_DL_REALTIME && lag_in_frames != 0)
     add_warning(lag_in_frames_with_realtime, warning_list);
 }
@@ -90,26 +87,21 @@ void check_encoder_config(int disable_prompt,
                           const struct vpx_codec_enc_cfg *stream_config) {
   int num_warnings = 0;
   struct WarningListNode *warning = NULL;
-  struct WarningList warning_list = {0};
+  struct WarningList warning_list = { 0 };
 
   check_quantizer(stream_config->rc_min_quantizer,
-                  stream_config->rc_max_quantizer,
-                  &warning_list);
+                  stream_config->rc_max_quantizer, &warning_list);
   check_lag_in_frames_realtime_deadline(stream_config->g_lag_in_frames,
-                                        global_config->deadline,
-                                        &warning_list);
+                                        global_config->deadline, &warning_list);
   /* Count and print warnings. */
-  for (warning = warning_list.warning_node;
-       warning != NULL;
-       warning = warning->next_warning,
-       ++num_warnings) {
+  for (warning = warning_list.warning_node; warning != NULL;
+       warning = warning->next_warning, ++num_warnings) {
     warn(warning->warning_string);
   }
 
   free_warning_list(&warning_list);
 
   if (num_warnings) {
-    if (!disable_prompt && !continue_prompt(num_warnings))
-      exit(EXIT_FAILURE);
+    if (!disable_prompt && !continue_prompt(num_warnings)) exit(EXIT_FAILURE);
   }
 }
diff --git a/webmdec.cc b/webmdec.cc
index f541cfecc1692ebb6894d3ffa5efc42970249c2a..92f61fcdec128af2670de440fcae2d1a31e0ac63 100644
--- a/webmdec.cc
+++ b/webmdec.cc
@@ -21,12 +21,12 @@ namespace {
 void reset(struct WebmInputContext *const webm_ctx) {
   if (webm_ctx->reader != NULL) {
     mkvparser::MkvReader *const reader =
-        reinterpret_cast<mkvparser::MkvReader*>(webm_ctx->reader);
+        reinterpret_cast<mkvparser::MkvReader *>(webm_ctx->reader);
     delete reader;
   }
   if (webm_ctx->segment != NULL) {
     mkvparser::Segment *const segment =
-        reinterpret_cast<mkvparser::Segment*>(webm_ctx->segment);
+        reinterpret_cast<mkvparser::Segment *>(webm_ctx->segment);
     delete segment;
   }
   if (webm_ctx->buffer != NULL) {
@@ -46,7 +46,7 @@ void reset(struct WebmInputContext *const webm_ctx) {
 
 void get_first_cluster(struct WebmInputContext *const webm_ctx) {
   mkvparser::Segment *const segment =
-      reinterpret_cast<mkvparser::Segment*>(webm_ctx->segment);
+      reinterpret_cast<mkvparser::Segment *>(webm_ctx->segment);
   const mkvparser::Cluster *const cluster = segment->GetFirst();
   webm_ctx->cluster = cluster;
 }
@@ -72,7 +72,7 @@ int file_is_webm(struct WebmInputContext *webm_ctx,
     return 0;
   }
 
-  mkvparser::Segment* segment;
+  mkvparser::Segment *segment;
   if (mkvparser::Segment::CreateInstance(reader, pos, segment)) {
     rewind_and_reset(webm_ctx, vpx_ctx);
     return 0;
@@ -84,11 +84,11 @@ int file_is_webm(struct WebmInputContext *webm_ctx,
   }
 
   const mkvparser::Tracks *const tracks = segment->GetTracks();
-  const mkvparser::VideoTrack* video_track = NULL;
+  const mkvparser::VideoTrack *video_track = NULL;
   for (unsigned long i = 0; i < tracks->GetTracksCount(); ++i) {
-    const mkvparser::Track* const track = tracks->GetTrackByIndex(i);
+    const mkvparser::Track *const track = tracks->GetTrackByIndex(i);
     if (track->GetType() == mkvparser::Track::kVideo) {
-      video_track = static_cast<const mkvparser::VideoTrack*>(track);
+      video_track = static_cast<const mkvparser::VideoTrack *>(track);
       webm_ctx->video_track_index = track->GetNumber();
       break;
     }
@@ -120,23 +120,21 @@ int file_is_webm(struct WebmInputContext *webm_ctx,
   return 1;
 }
 
-int webm_read_frame(struct WebmInputContext *webm_ctx,
-                    uint8_t **buffer,
-                    size_t *bytes_in_buffer,
-                    size_t *buffer_size) {
+int webm_read_frame(struct WebmInputContext *webm_ctx, uint8_t **buffer,
+                    size_t *bytes_in_buffer, size_t *buffer_size) {
   // This check is needed for frame parallel decoding, in which case this
   // function could be called even after it has reached end of input stream.
   if (webm_ctx->reached_eos) {
     return 1;
   }
   mkvparser::Segment *const segment =
-      reinterpret_cast<mkvparser::Segment*>(webm_ctx->segment);
-  const mkvparser::Cluster* cluster =
-      reinterpret_cast<const mkvparser::Cluster*>(webm_ctx->cluster);
+      reinterpret_cast<mkvparser::Segment *>(webm_ctx->segment);
+  const mkvparser::Cluster *cluster =
+      reinterpret_cast<const mkvparser::Cluster *>(webm_ctx->cluster);
   const mkvparser::Block *block =
-      reinterpret_cast<const mkvparser::Block*>(webm_ctx->block);
+      reinterpret_cast<const mkvparser::Block *>(webm_ctx->block);
   const mkvparser::BlockEntry *block_entry =
-      reinterpret_cast<const mkvparser::BlockEntry*>(webm_ctx->block_entry);
+      reinterpret_cast<const mkvparser::BlockEntry *>(webm_ctx->block_entry);
   bool block_entry_eos = false;
   do {
     long status = 0;
@@ -178,11 +176,11 @@ int webm_read_frame(struct WebmInputContext *webm_ctx,
   webm_ctx->block_entry = block_entry;
   webm_ctx->block = block;
 
-  const mkvparser::Block::Frame& frame =
+  const mkvparser::Block::Frame &frame =
       block->GetFrame(webm_ctx->block_frame_index);
   ++webm_ctx->block_frame_index;
   if (frame.len > static_cast<long>(*buffer_size)) {
-    delete[] *buffer;
+    delete[] * buffer;
     *buffer = new uint8_t[frame.len];
     if (*buffer == NULL) {
       return -1;
@@ -195,7 +193,7 @@ int webm_read_frame(struct WebmInputContext *webm_ctx,
   webm_ctx->is_key_frame = block->IsKey();
 
   mkvparser::MkvReader *const reader =
-      reinterpret_cast<mkvparser::MkvReader*>(webm_ctx->reader);
+      reinterpret_cast<mkvparser::MkvReader *>(webm_ctx->reader);
   return frame.Read(reader, *buffer) ? -1 : 0;
 }
 
@@ -226,6 +224,4 @@ int webm_guess_framerate(struct WebmInputContext *webm_ctx,
   return 0;
 }
 
-void webm_free(struct WebmInputContext *webm_ctx) {
-  reset(webm_ctx);
-}
+void webm_free(struct WebmInputContext *webm_ctx) { reset(webm_ctx); }
diff --git a/webmdec.h b/webmdec.h
index 7d1638035523131d667912159f2233c5f8caefb6..3f22bc5d66ceba1cf43b2d27ed01ff24b369da10 100644
--- a/webmdec.h
+++ b/webmdec.h
@@ -55,10 +55,8 @@ int file_is_webm(struct WebmInputContext *webm_ctx,
 //     -1 - Error
 // TODO(vigneshv): Make the return values consistent across all functions in
 // this file.
-int webm_read_frame(struct WebmInputContext *webm_ctx,
-                    uint8_t **buffer,
-                    size_t *bytes_in_buffer,
-                    size_t *buffer_size);
+int webm_read_frame(struct WebmInputContext *webm_ctx, uint8_t **buffer,
+                    size_t *bytes_in_buffer, size_t *buffer_size);
 
 // Guesses the frame rate of the input file based on the container timestamps.
 int webm_guess_framerate(struct WebmInputContext *webm_ctx,
diff --git a/webmenc.cc b/webmenc.cc
index d41e700443c6cf941f61fe9171687508257498c6..c86999d33a2b83eecf764e73045569c2899a3707 100644
--- a/webmenc.cc
+++ b/webmenc.cc
@@ -23,8 +23,7 @@ const int kVideoTrackNumber = 1;
 void write_webm_file_header(struct EbmlGlobal *glob,
                             const vpx_codec_enc_cfg_t *cfg,
                             const struct vpx_rational *fps,
-                            stereo_format_t stereo_fmt,
-                            unsigned int fourcc,
+                            stereo_format_t stereo_fmt, unsigned int fourcc,
                             const struct VpxRational *par) {
   mkvmuxer::MkvWriter *const writer = new mkvmuxer::MkvWriter(glob->stream);
   mkvmuxer::Segment *const segment = new mkvmuxer::Segment();
@@ -43,34 +42,23 @@ void write_webm_file_header(struct EbmlGlobal *glob,
 
   const uint64_t video_track_id =
       segment->AddVideoTrack(static_cast<int>(cfg->g_w),
-                             static_cast<int>(cfg->g_h),
-                             kVideoTrackNumber);
-  mkvmuxer::VideoTrack* const video_track =
-      static_cast<mkvmuxer::VideoTrack*>(
-          segment->GetTrackByNumber(video_track_id));
+                             static_cast<int>(cfg->g_h), kVideoTrackNumber);
+  mkvmuxer::VideoTrack *const video_track = static_cast<mkvmuxer::VideoTrack *>(
+      segment->GetTrackByNumber(video_track_id));
   video_track->SetStereoMode(stereo_fmt);
   const char *codec_id;
   switch (fourcc) {
-  case VP8_FOURCC:
-    codec_id = "V_VP8";
-    break;
-  case VP9_FOURCC:
-    codec_id = "V_VP9";
-    break;
-  case VP10_FOURCC:
-    codec_id = "V_VP10";
-    break;
-  default:
-    codec_id = "V_VP10";
-    break;
+    case VP8_FOURCC: codec_id = "V_VP8"; break;
+    case VP9_FOURCC: codec_id = "V_VP9"; break;
+    case VP10_FOURCC: codec_id = "V_VP10"; break;
+    default: codec_id = "V_VP10"; break;
   }
   video_track->set_codec_id(codec_id);
   if (par->numerator > 1 || par->denominator > 1) {
     // TODO(fgalligan): Add support of DisplayUnit, Display Aspect Ratio type
     // to WebM format.
-    const uint64_t display_width =
-        static_cast<uint64_t>(((cfg->g_w * par->numerator * 1.0) /
-                               par->denominator) + .5);
+    const uint64_t display_width = static_cast<uint64_t>(
+        ((cfg->g_w * par->numerator * 1.0) / par->denominator) + .5);
     video_track->set_display_width(display_width);
     video_track->set_display_height(cfg->g_h);
   }
@@ -81,29 +69,25 @@ void write_webm_file_header(struct EbmlGlobal *glob,
   glob->segment = segment;
 }
 
-void write_webm_block(struct EbmlGlobal *glob,
-                      const vpx_codec_enc_cfg_t *cfg,
+void write_webm_block(struct EbmlGlobal *glob, const vpx_codec_enc_cfg_t *cfg,
                       const vpx_codec_cx_pkt_t *pkt) {
   mkvmuxer::Segment *const segment =
-      reinterpret_cast<mkvmuxer::Segment*>(glob->segment);
-  int64_t pts_ns = pkt->data.frame.pts * 1000000000ll *
-                   cfg->g_timebase.num / cfg->g_timebase.den;
-  if (pts_ns <= glob->last_pts_ns)
-    pts_ns = glob->last_pts_ns + 1000000;
+      reinterpret_cast<mkvmuxer::Segment *>(glob->segment);
+  int64_t pts_ns = pkt->data.frame.pts * 1000000000ll * cfg->g_timebase.num /
+                   cfg->g_timebase.den;
+  if (pts_ns <= glob->last_pts_ns) pts_ns = glob->last_pts_ns + 1000000;
   glob->last_pts_ns = pts_ns;
 
-  segment->AddFrame(static_cast<uint8_t*>(pkt->data.frame.buf),
-                    pkt->data.frame.sz,
-                    kVideoTrackNumber,
-                    pts_ns,
+  segment->AddFrame(static_cast<uint8_t *>(pkt->data.frame.buf),
+                    pkt->data.frame.sz, kVideoTrackNumber, pts_ns,
                     pkt->data.frame.flags & VPX_FRAME_IS_KEY);
 }
 
 void write_webm_file_footer(struct EbmlGlobal *glob) {
   mkvmuxer::MkvWriter *const writer =
-      reinterpret_cast<mkvmuxer::MkvWriter*>(glob->writer);
+      reinterpret_cast<mkvmuxer::MkvWriter *>(glob->writer);
   mkvmuxer::Segment *const segment =
-      reinterpret_cast<mkvmuxer::Segment*>(glob->segment);
+      reinterpret_cast<mkvmuxer::Segment *>(glob->segment);
   segment->Finalize();
   delete segment;
   delete writer;
diff --git a/webmenc.h b/webmenc.h
index c255d3de6694c9c29594236a67f53f1784fba7f7..87cb7eb35bd6a03cf355aae578d88624969c0e5b 100644
--- a/webmenc.h
+++ b/webmenc.h
@@ -41,12 +41,10 @@ typedef enum stereo_format {
 void write_webm_file_header(struct EbmlGlobal *glob,
                             const vpx_codec_enc_cfg_t *cfg,
                             const struct vpx_rational *fps,
-                            stereo_format_t stereo_fmt,
-                            unsigned int fourcc,
+                            stereo_format_t stereo_fmt, unsigned int fourcc,
                             const struct VpxRational *par);
 
-void write_webm_block(struct EbmlGlobal *glob,
-                      const vpx_codec_enc_cfg_t *cfg,
+void write_webm_block(struct EbmlGlobal *glob, const vpx_codec_enc_cfg_t *cfg,
                       const vpx_codec_cx_pkt_t *pkt);
 
 void write_webm_file_footer(struct EbmlGlobal *glob);
diff --git a/y4menc.c b/y4menc.c
index b647e8dcc5e891a3e57ea637be4dd40102e081c4..e26fcaf6ea30885dbdc01a5c9902817181473d95 100644
--- a/y4menc.c
+++ b/y4menc.c
@@ -17,39 +17,43 @@ int y4m_write_file_header(char *buf, size_t len, int width, int height,
   const char *color;
   switch (bit_depth) {
     case 8:
-      color = fmt == VPX_IMG_FMT_444A ? "C444alpha\n" :
-              fmt == VPX_IMG_FMT_I444 ? "C444\n" :
-              fmt == VPX_IMG_FMT_I422 ? "C422\n" :
-              "C420jpeg\n";
+      color = fmt == VPX_IMG_FMT_444A
+                  ? "C444alpha\n"
+                  : fmt == VPX_IMG_FMT_I444 ? "C444\n" : fmt == VPX_IMG_FMT_I422
+                                                             ? "C422\n"
+                                                             : "C420jpeg\n";
       break;
     case 9:
-      color = fmt == VPX_IMG_FMT_I44416 ? "C444p9 XYSCSS=444P9\n" :
-              fmt == VPX_IMG_FMT_I42216 ? "C422p9 XYSCSS=422P9\n" :
-              "C420p9 XYSCSS=420P9\n";
+      color = fmt == VPX_IMG_FMT_I44416
+                  ? "C444p9 XYSCSS=444P9\n"
+                  : fmt == VPX_IMG_FMT_I42216 ? "C422p9 XYSCSS=422P9\n"
+                                              : "C420p9 XYSCSS=420P9\n";
       break;
     case 10:
-      color = fmt == VPX_IMG_FMT_I44416 ? "C444p10 XYSCSS=444P10\n" :
-              fmt == VPX_IMG_FMT_I42216 ? "C422p10 XYSCSS=422P10\n" :
-              "C420p10 XYSCSS=420P10\n";
+      color = fmt == VPX_IMG_FMT_I44416
+                  ? "C444p10 XYSCSS=444P10\n"
+                  : fmt == VPX_IMG_FMT_I42216 ? "C422p10 XYSCSS=422P10\n"
+                                              : "C420p10 XYSCSS=420P10\n";
       break;
     case 12:
-      color = fmt == VPX_IMG_FMT_I44416 ? "C444p12 XYSCSS=444P12\n" :
-              fmt == VPX_IMG_FMT_I42216 ? "C422p12 XYSCSS=422P12\n" :
-              "C420p12 XYSCSS=420P12\n";
+      color = fmt == VPX_IMG_FMT_I44416
+                  ? "C444p12 XYSCSS=444P12\n"
+                  : fmt == VPX_IMG_FMT_I42216 ? "C422p12 XYSCSS=422P12\n"
+                                              : "C420p12 XYSCSS=420P12\n";
       break;
     case 14:
-      color = fmt == VPX_IMG_FMT_I44416 ? "C444p14 XYSCSS=444P14\n" :
-              fmt == VPX_IMG_FMT_I42216 ? "C422p14 XYSCSS=422P14\n" :
-              "C420p14 XYSCSS=420P14\n";
+      color = fmt == VPX_IMG_FMT_I44416
+                  ? "C444p14 XYSCSS=444P14\n"
+                  : fmt == VPX_IMG_FMT_I42216 ? "C422p14 XYSCSS=422P14\n"
+                                              : "C420p14 XYSCSS=420P14\n";
       break;
     case 16:
-      color = fmt == VPX_IMG_FMT_I44416 ? "C444p16 XYSCSS=444P16\n" :
-              fmt == VPX_IMG_FMT_I42216 ? "C422p16 XYSCSS=422P16\n" :
-              "C420p16 XYSCSS=420P16\n";
+      color = fmt == VPX_IMG_FMT_I44416
+                  ? "C444p16 XYSCSS=444P16\n"
+                  : fmt == VPX_IMG_FMT_I42216 ? "C422p16 XYSCSS=422P16\n"
+                                              : "C420p16 XYSCSS=420P16\n";
       break;
-    default:
-      color = NULL;
-      assert(0);
+    default: color = NULL; assert(0);
   }
   return snprintf(buf, len, "YUV4MPEG2 W%u H%u F%u:%u I%c %s", width, height,
                   framerate->numerator, framerate->denominator, 'p', color);
diff --git a/y4minput.c b/y4minput.c
index 34ea96d9d57d7f629582bbfa45f2c0afc80a4c19..7de859f7ac1cc09fdfffbba66e334b734e545ec2 100644
--- a/y4minput.c
+++ b/y4minput.c
@@ -25,7 +25,7 @@ static int file_read(void *buf, size_t size, FILE *file) {
   int file_error;
   size_t len = 0;
   do {
-    const size_t n = fread((uint8_t*)buf + len, 1, size - len, file);
+    const size_t n = fread((uint8_t *)buf + len, 1, size - len, file);
     len += n;
     file_error = ferror(file);
     if (file_error) {
@@ -41,83 +41,77 @@ static int file_read(void *buf, size_t size, FILE *file) {
   } while (!feof(file) && len < size && ++retry_count < kMaxRetries);
 
   if (!feof(file) && len != size) {
-    fprintf(stderr, "Error reading file: %u of %u bytes read,"
-                    " error: %d, retries: %d, %d: %s\n",
-            (uint32_t)len, (uint32_t)size, file_error, retry_count,
-            errno, strerror(errno));
+    fprintf(stderr,
+            "Error reading file: %u of %u bytes read,"
+            " error: %d, retries: %d, %d: %s\n",
+            (uint32_t)len, (uint32_t)size, file_error, retry_count, errno,
+            strerror(errno));
   }
   return len == size;
 }
 
 static int y4m_parse_tags(y4m_input *_y4m, char *_tags) {
-  int   got_w;
-  int   got_h;
-  int   got_fps;
-  int   got_interlace;
-  int   got_par;
-  int   got_chroma;
+  int got_w;
+  int got_h;
+  int got_fps;
+  int got_interlace;
+  int got_par;
+  int got_chroma;
   char *p;
   char *q;
   got_w = got_h = got_fps = got_interlace = got_par = got_chroma = 0;
   for (p = _tags;; p = q) {
     /*Skip any leading spaces.*/
-    while (*p == ' ')p++;
+    while (*p == ' ') p++;
     /*If that's all we have, stop.*/
-    if (p[0] == '\0')break;
+    if (p[0] == '\0') break;
     /*Find the end of this tag.*/
-    for (q = p + 1; *q != '\0' && *q != ' '; q++);
+    for (q = p + 1; *q != '\0' && *q != ' '; q++) {
+    }
     /*Process the tag.*/
     switch (p[0]) {
       case 'W': {
-        if (sscanf(p + 1, "%d", &_y4m->pic_w) != 1)return -1;
+        if (sscanf(p + 1, "%d", &_y4m->pic_w) != 1) return -1;
         got_w = 1;
-      }
-      break;
+      } break;
       case 'H': {
-        if (sscanf(p + 1, "%d", &_y4m->pic_h) != 1)return -1;
+        if (sscanf(p + 1, "%d", &_y4m->pic_h) != 1) return -1;
         got_h = 1;
-      }
-      break;
+      } break;
       case 'F': {
         if (sscanf(p + 1, "%d:%d", &_y4m->fps_n, &_y4m->fps_d) != 2) {
           return -1;
         }
         got_fps = 1;
-      }
-      break;
+      } break;
       case 'I': {
         _y4m->interlace = p[1];
         got_interlace = 1;
-      }
-      break;
+      } break;
       case 'A': {
         if (sscanf(p + 1, "%d:%d", &_y4m->par_n, &_y4m->par_d) != 2) {
           return -1;
         }
         got_par = 1;
-      }
-      break;
+      } break;
       case 'C': {
-        if (q - p > 16)return -1;
+        if (q - p > 16) return -1;
         memcpy(_y4m->chroma_type, p + 1, q - p - 1);
         _y4m->chroma_type[q - p - 1] = '\0';
         got_chroma = 1;
-      }
-      break;
-      /*Ignore unknown tags.*/
+      } break;
+        /*Ignore unknown tags.*/
     }
   }
-  if (!got_w || !got_h || !got_fps)return -1;
-  if (!got_interlace)_y4m->interlace = '?';
-  if (!got_par)_y4m->par_n = _y4m->par_d = 0;
+  if (!got_w || !got_h || !got_fps) return -1;
+  if (!got_interlace) _y4m->interlace = '?';
+  if (!got_par) _y4m->par_n = _y4m->par_d = 0;
   /*Chroma-type is not specified in older files, e.g., those generated by
      mplayer.*/
-  if (!got_chroma)strcpy(_y4m->chroma_type, "420");
+  if (!got_chroma) strcpy(_y4m->chroma_type, "420");
   return 0;
 }
 
-
-
 /*All anti-aliasing filters in the following conversion functions are based on
    one of two window functions:
   The 6-tap Lanczos window (for down-sampling and shifts):
@@ -140,9 +134,9 @@ static int y4m_parse_tags(y4m_input *_y4m, char *_tags) {
    have these steps pipelined, for less memory consumption and better cache
    performance, but we do them separately for simplicity.*/
 
-#define OC_MINI(_a,_b)      ((_a)>(_b)?(_b):(_a))
-#define OC_MAXI(_a,_b)      ((_a)<(_b)?(_b):(_a))
-#define OC_CLAMPI(_a,_b,_c) (OC_MAXI(_a,OC_MINI(_b,_c)))
+#define OC_MINI(_a, _b) ((_a) > (_b) ? (_b) : (_a))
+#define OC_MAXI(_a, _b) ((_a) < (_b) ? (_b) : (_a))
+#define OC_CLAMPI(_a, _b, _c) (OC_MAXI(_a, OC_MINI(_b, _c)))
 
 /*420jpeg chroma samples are sited like:
   Y-------Y-------Y-------Y-------
@@ -186,25 +180,36 @@ static int y4m_parse_tags(y4m_input *_y4m, char *_tags) {
    lines, and they are vertically co-sited with the luma samples in both the
    mpeg2 and jpeg cases (thus requiring no vertical resampling).*/
 static void y4m_42xmpeg2_42xjpeg_helper(unsigned char *_dst,
-                                        const unsigned char *_src, int _c_w, int _c_h) {
+                                        const unsigned char *_src, int _c_w,
+                                        int _c_h) {
   int y;
   int x;
   for (y = 0; y < _c_h; y++) {
     /*Filter: [4 -17 114 35 -9 1]/128, derived from a 6-tap Lanczos
        window.*/
     for (x = 0; x < OC_MINI(_c_w, 2); x++) {
-      _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[0] - 17 * _src[OC_MAXI(x - 1, 0)] +
-                                             114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] +
-                                             _src[OC_MINI(x + 3, _c_w - 1)] + 64) >> 7, 255);
+      _dst[x] = (unsigned char)OC_CLAMPI(
+          0, (4 * _src[0] - 17 * _src[OC_MAXI(x - 1, 0)] + 114 * _src[x] +
+              35 * _src[OC_MINI(x + 1, _c_w - 1)] -
+              9 * _src[OC_MINI(x + 2, _c_w - 1)] +
+              _src[OC_MINI(x + 3, _c_w - 1)] + 64) >>
+                 7,
+          255);
     }
     for (; x < _c_w - 3; x++) {
-      _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] +
-                                             114 * _src[x] + 35 * _src[x + 1] - 9 * _src[x + 2] + _src[x + 3] + 64) >> 7, 255);
+      _dst[x] = (unsigned char)OC_CLAMPI(
+          0, (4 * _src[x - 2] - 17 * _src[x - 1] + 114 * _src[x] +
+              35 * _src[x + 1] - 9 * _src[x + 2] + _src[x + 3] + 64) >>
+                 7,
+          255);
     }
     for (; x < _c_w; x++) {
-      _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] +
-                                             114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] +
-                                             _src[_c_w - 1] + 64) >> 7, 255);
+      _dst[x] = (unsigned char)OC_CLAMPI(
+          0, (4 * _src[x - 2] - 17 * _src[x - 1] + 114 * _src[x] +
+              35 * _src[OC_MINI(x + 1, _c_w - 1)] -
+              9 * _src[OC_MINI(x + 2, _c_w - 1)] + _src[_c_w - 1] + 64) >>
+                 7,
+          255);
     }
     _dst += _c_w;
     _src += _c_w;
@@ -277,12 +282,12 @@ static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
 static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
                                          unsigned char *_aux) {
   unsigned char *tmp;
-  int            c_w;
-  int            c_h;
-  int            c_sz;
-  int            pli;
-  int            y;
-  int            x;
+  int c_w;
+  int c_h;
+  int c_sz;
+  int pli;
+  int y;
+  int x;
   /*Skip past the luma data.*/
   _dst += _y4m->pic_w * _y4m->pic_h;
   /*Compute the size of each chroma plane.*/
@@ -302,53 +307,73 @@ static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
           This is the same filter used above, but in the other order.*/
         for (x = 0; x < c_w; x++) {
           for (y = 0; y < OC_MINI(c_h, 3); y++) {
-            _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[0]
-                                                         - 9 * tmp[OC_MAXI(y - 2, 0) * c_w] + 35 * tmp[OC_MAXI(y - 1, 0) * c_w]
-                                                         + 114 * tmp[y * c_w] - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w]
-                                                         + 4 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + 64) >> 7, 255);
+            _dst[y * c_w] = (unsigned char)OC_CLAMPI(
+                0, (tmp[0] - 9 * tmp[OC_MAXI(y - 2, 0) * c_w] +
+                    35 * tmp[OC_MAXI(y - 1, 0) * c_w] + 114 * tmp[y * c_w] -
+                    17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] +
+                    4 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + 64) >>
+                       7,
+                255);
           }
           for (; y < c_h - 2; y++) {
-            _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w]
-                                                         - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w]
-                                                         - 17 * tmp[(y + 1) * c_w] + 4 * tmp[(y + 2) * c_w] + 64) >> 7, 255);
+            _dst[y * c_w] = (unsigned char)OC_CLAMPI(
+                0, (tmp[(y - 3) * c_w] - 9 * tmp[(y - 2) * c_w] +
+                    35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] -
+                    17 * tmp[(y + 1) * c_w] + 4 * tmp[(y + 2) * c_w] + 64) >>
+                       7,
+                255);
           }
           for (; y < c_h; y++) {
-            _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w]
-                                                         - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w]
-                                                         - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] + 4 * tmp[(c_h - 1) * c_w] + 64) >> 7, 255);
+            _dst[y * c_w] = (unsigned char)OC_CLAMPI(
+                0, (tmp[(y - 3) * c_w] - 9 * tmp[(y - 2) * c_w] +
+                    35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] -
+                    17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] +
+                    4 * tmp[(c_h - 1) * c_w] + 64) >>
+                       7,
+                255);
           }
           _dst++;
           tmp++;
         }
         _dst += c_sz - c_w;
         tmp -= c_w;
-      }
-      break;
+      } break;
       case 2: {
         /*Slide C_r down a quarter-pel.
           This is the same as the horizontal filter.*/
         for (x = 0; x < c_w; x++) {
           for (y = 0; y < OC_MINI(c_h, 2); y++) {
-            _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[0]
-                                                         - 17 * tmp[OC_MAXI(y - 1, 0) * c_w] + 114 * tmp[y * c_w]
-                                                         + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w]
-                                                         + tmp[OC_MINI(y + 3, c_h - 1) * c_w] + 64) >> 7, 255);
+            _dst[y * c_w] = (unsigned char)OC_CLAMPI(
+                0,
+                (4 * tmp[0] - 17 * tmp[OC_MAXI(y - 1, 0) * c_w] +
+                 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] -
+                 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] +
+                 tmp[OC_MINI(y + 3, c_h - 1) * c_w] + 64) >>
+                    7,
+                255);
           }
           for (; y < c_h - 3; y++) {
-            _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w]
-                                                         - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[(y + 1) * c_w]
-                                                         - 9 * tmp[(y + 2) * c_w] + tmp[(y + 3) * c_w] + 64) >> 7, 255);
+            _dst[y * c_w] = (unsigned char)OC_CLAMPI(
+                0, (4 * tmp[(y - 2) * c_w] - 17 * tmp[(y - 1) * c_w] +
+                    114 * tmp[y * c_w] + 35 * tmp[(y + 1) * c_w] -
+                    9 * tmp[(y + 2) * c_w] + tmp[(y + 3) * c_w] + 64) >>
+                       7,
+                255);
           }
           for (; y < c_h; y++) {
-            _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w]
-                                                         - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w]
-                                                         - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + tmp[(c_h - 1) * c_w] + 64) >> 7, 255);
+            _dst[y * c_w] = (unsigned char)OC_CLAMPI(
+                0,
+                (4 * tmp[(y - 2) * c_w] - 17 * tmp[(y - 1) * c_w] +
+                 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] -
+                 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + tmp[(c_h - 1) * c_w] +
+                 64) >>
+                    7,
+                255);
           }
           _dst++;
           tmp++;
         }
-      }
-      break;
+      } break;
     }
     /*For actual interlaced material, this would have to be done separately on
        each field, and the shift amounts would be different.
@@ -363,27 +388,37 @@ static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
 /*Perform vertical filtering to reduce a single plane from 4:2:2 to 4:2:0.
   This is used as a helper by several converation routines.*/
 static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst,
-                                       const unsigned char *_src, int _c_w, int _c_h) {
+                                       const unsigned char *_src, int _c_w,
+                                       int _c_h) {
   int y;
   int x;
   /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
   for (x = 0; x < _c_w; x++) {
     for (y = 0; y < OC_MINI(_c_h, 2); y += 2) {
-      _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (64 * _src[0]
-                                          + 78 * _src[OC_MINI(1, _c_h - 1) * _c_w]
-                                          - 17 * _src[OC_MINI(2, _c_h - 1) * _c_w]
-                                          + 3 * _src[OC_MINI(3, _c_h - 1) * _c_w] + 64) >> 7, 255);
+      _dst[(y >> 1) * _c_w] =
+          OC_CLAMPI(0, (64 * _src[0] + 78 * _src[OC_MINI(1, _c_h - 1) * _c_w] -
+                        17 * _src[OC_MINI(2, _c_h - 1) * _c_w] +
+                        3 * _src[OC_MINI(3, _c_h - 1) * _c_w] + 64) >>
+                           7,
+                    255);
     }
     for (; y < _c_h - 3; y += 2) {
-      _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w] + _src[(y + 3) * _c_w])
-                                          - 17 * (_src[(y - 1) * _c_w] + _src[(y + 2) * _c_w])
-                                          + 78 * (_src[y * _c_w] + _src[(y + 1) * _c_w]) + 64) >> 7, 255);
+      _dst[(y >> 1) * _c_w] =
+          OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w] + _src[(y + 3) * _c_w]) -
+                        17 * (_src[(y - 1) * _c_w] + _src[(y + 2) * _c_w]) +
+                        78 * (_src[y * _c_w] + _src[(y + 1) * _c_w]) + 64) >>
+                           7,
+                    255);
     }
     for (; y < _c_h; y += 2) {
-      _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w]
-                                               + _src[(_c_h - 1) * _c_w]) - 17 * (_src[(y - 1) * _c_w]
-                                                                                  + _src[OC_MINI(y + 2, _c_h - 1) * _c_w])
-                                          + 78 * (_src[y * _c_w] + _src[OC_MINI(y + 1, _c_h - 1) * _c_w]) + 64) >> 7, 255);
+      _dst[(y >> 1) * _c_w] = OC_CLAMPI(
+          0,
+          (3 * (_src[(y - 2) * _c_w] + _src[(_c_h - 1) * _c_w]) -
+           17 * (_src[(y - 1) * _c_w] + _src[OC_MINI(y + 2, _c_h - 1) * _c_w]) +
+           78 * (_src[y * _c_w] + _src[OC_MINI(y + 1, _c_h - 1) * _c_w]) +
+           64) >>
+              7,
+          255);
     }
     _src++;
     _dst++;
@@ -496,12 +531,12 @@ static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m, unsigned char *_dst,
 static void y4m_convert_422_420jpeg(y4m_input *_y4m, unsigned char *_dst,
                                     unsigned char *_aux) {
   unsigned char *tmp;
-  int            c_w;
-  int            c_h;
-  int            c_sz;
-  int            dst_c_h;
-  int            dst_c_sz;
-  int            pli;
+  int c_w;
+  int c_h;
+  int c_sz;
+  int dst_c_h;
+  int dst_c_sz;
+  int pli;
   /*Skip past the luma data.*/
   _dst += _y4m->pic_w * _y4m->pic_h;
   /*Compute the size of each chroma plane.*/
@@ -568,16 +603,16 @@ static void y4m_convert_422_420jpeg(y4m_input *_y4m, unsigned char *_dst,
 static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst,
                                     unsigned char *_aux) {
   unsigned char *tmp;
-  int            c_w;
-  int            c_h;
-  int            c_sz;
-  int            dst_c_w;
-  int            dst_c_h;
-  int            dst_c_sz;
-  int            tmp_sz;
-  int            pli;
-  int            y;
-  int            x;
+  int c_w;
+  int c_h;
+  int c_sz;
+  int dst_c_w;
+  int dst_c_h;
+  int dst_c_sz;
+  int tmp_sz;
+  int pli;
+  int y;
+  int x;
   /*Skip past the luma data.*/
   _dst += _y4m->pic_w * _y4m->pic_h;
   /*Compute the size of each chroma plane.*/
@@ -598,23 +633,42 @@ static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst,
       /*Filters: [1 110 18 -1]/128 and [-3 50 86 -5]/128, both derived from a
          4-tap Mitchell window.*/
       for (x = 0; x < OC_MINI(c_w, 1); x++) {
-        tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (111 * _aux[0]
-                                                   + 18 * _aux[OC_MINI(1, c_w - 1)] - _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255);
-        tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (47 * _aux[0]
-                                                       + 86 * _aux[OC_MINI(1, c_w - 1)] - 5 * _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255);
+        tmp[x << 1] = (unsigned char)OC_CLAMPI(
+            0, (111 * _aux[0] + 18 * _aux[OC_MINI(1, c_w - 1)] -
+                _aux[OC_MINI(2, c_w - 1)] + 64) >>
+                   7,
+            255);
+        tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(
+            0, (47 * _aux[0] + 86 * _aux[OC_MINI(1, c_w - 1)] -
+                5 * _aux[OC_MINI(2, c_w - 1)] + 64) >>
+                   7,
+            255);
       }
       for (; x < c_w - 2; x++) {
-        tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x]
-                                                   + 18 * _aux[x + 1] - _aux[x + 2] + 64) >> 7, 255);
-        tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x]
-                                                       + 86 * _aux[x + 1] - 5 * _aux[x + 2] + 64) >> 7, 255);
+        tmp[x << 1] =
+            (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x] +
+                                         18 * _aux[x + 1] - _aux[x + 2] + 64) >>
+                                            7,
+                                     255);
+        tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(
+            0, (-3 * _aux[x - 1] + 50 * _aux[x] + 86 * _aux[x + 1] -
+                5 * _aux[x + 2] + 64) >>
+                   7,
+            255);
       }
       for (; x < c_w; x++) {
-        tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x]
-                                                   + 18 * _aux[OC_MINI(x + 1, c_w - 1)] - _aux[c_w - 1] + 64) >> 7, 255);
+        tmp[x << 1] = (unsigned char)OC_CLAMPI(
+            0, (_aux[x - 1] + 110 * _aux[x] +
+                18 * _aux[OC_MINI(x + 1, c_w - 1)] - _aux[c_w - 1] + 64) >>
+                   7,
+            255);
         if ((x << 1 | 1) < dst_c_w) {
-          tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x]
-                                                         + 86 * _aux[OC_MINI(x + 1, c_w - 1)] - 5 * _aux[c_w - 1] + 64) >> 7, 255);
+          tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(
+              0,
+              (-3 * _aux[x - 1] + 50 * _aux[x] +
+               86 * _aux[OC_MINI(x + 1, c_w - 1)] - 5 * _aux[c_w - 1] + 64) >>
+                  7,
+              255);
         }
       }
       tmp += dst_c_w;
@@ -631,16 +685,16 @@ static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst,
 static void y4m_convert_444_420jpeg(y4m_input *_y4m, unsigned char *_dst,
                                     unsigned char *_aux) {
   unsigned char *tmp;
-  int            c_w;
-  int            c_h;
-  int            c_sz;
-  int            dst_c_w;
-  int            dst_c_h;
-  int            dst_c_sz;
-  int            tmp_sz;
-  int            pli;
-  int            y;
-  int            x;
+  int c_w;
+  int c_h;
+  int c_sz;
+  int dst_c_w;
+  int dst_c_h;
+  int dst_c_sz;
+  int tmp_sz;
+  int pli;
+  int y;
+  int x;
   /*Skip past the luma data.*/
   _dst += _y4m->pic_w * _y4m->pic_h;
   /*Compute the size of each chroma plane.*/
@@ -656,18 +710,27 @@ static void y4m_convert_444_420jpeg(y4m_input *_y4m, unsigned char *_dst,
     /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
     for (y = 0; y < c_h; y++) {
       for (x = 0; x < OC_MINI(c_w, 2); x += 2) {
-        tmp[x >> 1] = OC_CLAMPI(0, (64 * _aux[0] + 78 * _aux[OC_MINI(1, c_w - 1)]
-                                    - 17 * _aux[OC_MINI(2, c_w - 1)]
-                                    + 3 * _aux[OC_MINI(3, c_w - 1)] + 64) >> 7, 255);
+        tmp[x >> 1] =
+            OC_CLAMPI(0, (64 * _aux[0] + 78 * _aux[OC_MINI(1, c_w - 1)] -
+                          17 * _aux[OC_MINI(2, c_w - 1)] +
+                          3 * _aux[OC_MINI(3, c_w - 1)] + 64) >>
+                             7,
+                      255);
       }
       for (; x < c_w - 3; x += 2) {
-        tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[x + 3])
-                                    - 17 * (_aux[x - 1] + _aux[x + 2]) + 78 * (_aux[x] + _aux[x + 1]) + 64) >> 7, 255);
+        tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[x + 3]) -
+                                    17 * (_aux[x - 1] + _aux[x + 2]) +
+                                    78 * (_aux[x] + _aux[x + 1]) + 64) >>
+                                       7,
+                                255);
       }
       for (; x < c_w; x += 2) {
-        tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[c_w - 1]) -
-                                    17 * (_aux[x - 1] + _aux[OC_MINI(x + 2, c_w - 1)]) +
-                                    78 * (_aux[x] + _aux[OC_MINI(x + 1, c_w - 1)]) + 64) >> 7, 255);
+        tmp[x >> 1] = OC_CLAMPI(
+            0, (3 * (_aux[x - 2] + _aux[c_w - 1]) -
+                17 * (_aux[x - 1] + _aux[OC_MINI(x + 2, c_w - 1)]) +
+                78 * (_aux[x] + _aux[OC_MINI(x + 1, c_w - 1)]) + 64) >>
+                   7,
+            255);
       }
       tmp += dst_c_w;
       _aux += c_w;
@@ -700,9 +763,9 @@ static void y4m_convert_null(y4m_input *_y4m, unsigned char *_dst,
 
 int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
                    int only_420) {
-  char buffer[80] = {0};
-  int  ret;
-  int  i;
+  char buffer[80] = { 0 };
+  int ret;
+  int i;
   /*Read until newline, or 80 cols, whichever happens first.*/
   for (i = 0; i < 79; i++) {
     if (_nskip > 0) {
@@ -711,10 +774,10 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
     } else {
       if (!file_read(buffer + i, 1, _fin)) return -1;
     }
-    if (buffer[i] == '\n')break;
+    if (buffer[i] == '\n') break;
   }
   /*We skipped too much header data.*/
-  if (_nskip > 0)return -1;
+  if (_nskip > 0) return -1;
   if (i == 79) {
     fprintf(stderr, "Error parsing header; not a YUV2MPEG2 file?\n");
     return -1;
@@ -733,10 +796,12 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
     return ret;
   }
   if (_y4m->interlace == '?') {
-    fprintf(stderr, "Warning: Input video interlacing format unknown; "
+    fprintf(stderr,
+            "Warning: Input video interlacing format unknown; "
             "assuming progressive scan.\n");
   } else if (_y4m->interlace != 'p') {
-    fprintf(stderr, "Input video is interlaced; "
+    fprintf(stderr,
+            "Input video is interlaced; "
             "Only progressive scan handled.\n");
     return -1;
   }
@@ -745,9 +810,11 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
   _y4m->bit_depth = 8;
   if (strcmp(_y4m->chroma_type, "420") == 0 ||
       strcmp(_y4m->chroma_type, "420jpeg") == 0) {
-    _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
-    _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h
-                            + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
+    _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v =
+        _y4m->dst_c_dec_v = 2;
+    _y4m->dst_buf_read_sz =
+        _y4m->pic_w * _y4m->pic_h +
+        2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
     /* Natively supported: no conversion required. */
     _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
     _y4m->convert = y4m_convert_null;
@@ -756,9 +823,9 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
     _y4m->dst_c_dec_h = 2;
     _y4m->src_c_dec_v = 2;
     _y4m->dst_c_dec_v = 2;
-    _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h +
-                                 2 * ((_y4m->pic_w + 1) / 2) *
-                                 ((_y4m->pic_h + 1) / 2));
+    _y4m->dst_buf_read_sz =
+        2 * (_y4m->pic_w * _y4m->pic_h +
+             2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2));
     /* Natively supported: no conversion required. */
     _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
     _y4m->convert = y4m_convert_null;
@@ -774,9 +841,9 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
     _y4m->dst_c_dec_h = 2;
     _y4m->src_c_dec_v = 2;
     _y4m->dst_c_dec_v = 2;
-    _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h +
-                                 2 * ((_y4m->pic_w + 1) / 2) *
-                                 ((_y4m->pic_h + 1) / 2));
+    _y4m->dst_buf_read_sz =
+        2 * (_y4m->pic_w * _y4m->pic_h +
+             2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2));
     /* Natively supported: no conversion required. */
     _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
     _y4m->convert = y4m_convert_null;
@@ -788,20 +855,23 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
       return -1;
     }
   } else if (strcmp(_y4m->chroma_type, "420mpeg2") == 0) {
-    _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
+    _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v =
+        _y4m->dst_c_dec_v = 2;
     _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
     /*Chroma filter required: read into the aux buf first.*/
     _y4m->aux_buf_sz = _y4m->aux_buf_read_sz =
-                         2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
+        2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
     _y4m->convert = y4m_convert_42xmpeg2_42xjpeg;
   } else if (strcmp(_y4m->chroma_type, "420paldv") == 0) {
-    _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
+    _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v =
+        _y4m->dst_c_dec_v = 2;
     _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
     /*Chroma filter required: read into the aux buf first.
       We need to make two filter passes, so we need some extra space in the
        aux buffer.*/
     _y4m->aux_buf_sz = 3 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
-    _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
+    _y4m->aux_buf_read_sz =
+        2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
     _y4m->convert = y4m_convert_42xpaldv_42xjpeg;
   } else if (strcmp(_y4m->chroma_type, "422jpeg") == 0) {
     _y4m->src_c_dec_h = _y4m->dst_c_dec_h = 2;
@@ -809,7 +879,8 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
     _y4m->dst_c_dec_v = 2;
     _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
     /*Chroma filter required: read into the aux buf first.*/
-    _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+    _y4m->aux_buf_sz = _y4m->aux_buf_read_sz =
+        2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
     _y4m->convert = y4m_convert_422jpeg_420jpeg;
   } else if (strcmp(_y4m->chroma_type, "422") == 0) {
     _y4m->src_c_dec_h = 2;
@@ -822,16 +893,16 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
         We need to make two filter passes, so we need some extra space in the
          aux buffer.*/
       _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
-      _y4m->aux_buf_sz = _y4m->aux_buf_read_sz +
-          ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+      _y4m->aux_buf_sz =
+          _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
       _y4m->convert = y4m_convert_422_420jpeg;
     } else {
       _y4m->vpx_fmt = VPX_IMG_FMT_I422;
       _y4m->bps = 16;
       _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
       _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
-      _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h
-                              + 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+      _y4m->dst_buf_read_sz =
+          _y4m->pic_w * _y4m->pic_h + 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
       /*Natively supported: no conversion required.*/
       _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
       _y4m->convert = y4m_convert_null;
@@ -878,7 +949,8 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
       We need to make two filter passes, so we need some extra space in the
        aux buffer.*/
     _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 3) / 4) * _y4m->pic_h;
-    _y4m->aux_buf_sz = _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+    _y4m->aux_buf_sz =
+        _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
     _y4m->convert = y4m_convert_411_420jpeg;
   } else if (strcmp(_y4m->chroma_type, "444") == 0) {
     _y4m->src_c_dec_h = 1;
@@ -891,8 +963,8 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
         We need to make two filter passes, so we need some extra space in the
          aux buffer.*/
       _y4m->aux_buf_read_sz = 2 * _y4m->pic_w * _y4m->pic_h;
-      _y4m->aux_buf_sz = _y4m->aux_buf_read_sz +
-          ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+      _y4m->aux_buf_sz =
+          _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
       _y4m->convert = y4m_convert_444_420jpeg;
     } else {
       _y4m->vpx_fmt = VPX_IMG_FMT_I444;
@@ -971,9 +1043,10 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
   }
   /*The size of the final frame buffers is always computed from the
      destination chroma decimation type.*/
-  _y4m->dst_buf_sz = _y4m->pic_w * _y4m->pic_h
-                     + 2 * ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
-                     ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
+  _y4m->dst_buf_sz =
+      _y4m->pic_w * _y4m->pic_h +
+      2 * ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
+          ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
   if (_y4m->bit_depth == 8)
     _y4m->dst_buf = (unsigned char *)malloc(_y4m->dst_buf_sz);
   else
@@ -991,11 +1064,11 @@ void y4m_input_close(y4m_input *_y4m) {
 
 int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *_img) {
   char frame[6];
-  int  pic_sz;
-  int  c_w;
-  int  c_h;
-  int  c_sz;
-  int  bytes_per_sample = _y4m->bit_depth > 8 ? 2 : 1;
+  int pic_sz;
+  int c_w;
+  int c_h;
+  int c_sz;
+  int bytes_per_sample = _y4m->bit_depth > 8 ? 2 : 1;
   /*Read and skip the frame header.*/
   if (!file_read(frame, 6, _fin)) return 0;
   if (memcmp(frame, "FRAME", 5)) {
@@ -1004,8 +1077,9 @@ int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *_img) {
   }
   if (frame[5] != '\n') {
     char c;
-    int  j;
-    for (j = 0; j < 79 && file_read(&c, 1, _fin) && c != '\n'; j++) {}
+    int j;
+    for (j = 0; j < 79 && file_read(&c, 1, _fin) && c != '\n'; j++) {
+    }
     if (j == 79) {
       fprintf(stderr, "Error parsing Y4M frame header\n");
       return -1;
diff --git a/y4minput.h b/y4minput.h
index 356cebbcf0aadabe4b86f5e7f645251aa1fa7479..9e69ceb835a8861620e7b822d415225c9538b3c9 100644
--- a/y4minput.h
+++ b/y4minput.h
@@ -14,52 +14,46 @@
 #ifndef Y4MINPUT_H_
 #define Y4MINPUT_H_
 
-# include <stdio.h>
-# include "vpx/vpx_image.h"
+#include <stdio.h>
+#include "vpx/vpx_image.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-
-
 typedef struct y4m_input y4m_input;
 
-
-
 /*The function used to perform chroma conversion.*/
-typedef void (*y4m_convert_func)(y4m_input *_y4m,
-                                 unsigned char *_dst, unsigned char *_src);
-
-
+typedef void (*y4m_convert_func)(y4m_input *_y4m, unsigned char *_dst,
+                                 unsigned char *_src);
 
 struct y4m_input {
-  int               pic_w;
-  int               pic_h;
-  int               fps_n;
-  int               fps_d;
-  int               par_n;
-  int               par_d;
-  char              interlace;
-  int               src_c_dec_h;
-  int               src_c_dec_v;
-  int               dst_c_dec_h;
-  int               dst_c_dec_v;
-  char              chroma_type[16];
+  int pic_w;
+  int pic_h;
+  int fps_n;
+  int fps_d;
+  int par_n;
+  int par_d;
+  char interlace;
+  int src_c_dec_h;
+  int src_c_dec_v;
+  int dst_c_dec_h;
+  int dst_c_dec_v;
+  char chroma_type[16];
   /*The size of each converted frame buffer.*/
-  size_t            dst_buf_sz;
+  size_t dst_buf_sz;
   /*The amount to read directly into the converted frame buffer.*/
-  size_t            dst_buf_read_sz;
+  size_t dst_buf_read_sz;
   /*The size of the auxilliary buffer.*/
-  size_t            aux_buf_sz;
+  size_t aux_buf_sz;
   /*The amount to read into the auxilliary buffer.*/
-  size_t            aux_buf_read_sz;
-  y4m_convert_func  convert;
-  unsigned char    *dst_buf;
-  unsigned char    *aux_buf;
-  enum vpx_img_fmt  vpx_fmt;
-  int               bps;
-  unsigned int      bit_depth;
+  size_t aux_buf_read_sz;
+  y4m_convert_func convert;
+  unsigned char *dst_buf;
+  unsigned char *aux_buf;
+  enum vpx_img_fmt vpx_fmt;
+  int bps;
+  unsigned int bit_depth;
 };
 
 int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,