diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index efbe2365b633d5cc883ce6f33cca44674c31be79..91384c73f7973840d3bf26f2a44733dbb26e222a 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -1071,8 +1071,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
 
         error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
 
-        x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
-
         vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
         rate += rateuv;
 
@@ -1139,8 +1137,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
         else
             Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
 
-        x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
-
         if (Error4x4 < Error16x16)
         {
             x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
@@ -1237,8 +1233,6 @@ int vp8cx_encode_inter_macroblock
 
     if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
     {
-        x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
-
         vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
 
         if (xd->mode_info_context->mbmi.mode == B_PRED)
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index af80857d231063e747919e2a4d9f2da882d26c0e..6611e0077ba591182175b7e841603dc170fffcb0 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -53,8 +53,6 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK
 
     x->quantize_b(be, b);
 
-    x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!b->eob);
-
     vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
 
     RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
@@ -70,8 +68,6 @@ void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BL
 
     x->quantize_b(be, b);
 
-    x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!b->eob);
-
     IDCT_INVOKE(&rtcd->common->idct, idct16)(b->dqcoeff, b->diff, 32);
 
     RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
@@ -157,8 +153,6 @@ void vp8_encode_intra16x16mbyrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
 
     vp8_transform_intra_mby(x);
 
-    x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
-
     vp8_quantize_mby(x);
 
     vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index 63cdf3c1a35db6b41991b51f6a0e843b0ac46b8c..1f9568902b56814277ea193c8814f2eee1163d6d 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -547,35 +547,6 @@ void vp8_optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
 }
 
 
-
-static void vp8_find_mb_skip_coef(MACROBLOCK *x)
-{
-    int i;
-
-    x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
-
-    if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
-    {
-        for (i = 0; i < 16; i++)
-        {
-            x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (x->e_mbd.block[i].eob < 2);
-        }
-
-        for (i = 16; i < 25; i++)
-        {
-            x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
-        }
-    }
-    else
-    {
-        for (i = 0; i < 24; i++)
-        {
-            x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
-        }
-    }
-}
-
-
 void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
 {
     int b;
@@ -663,10 +634,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
 
 #if !(CONFIG_REALTIME_ONLY)
     if (x->optimize && x->rddiv > 1)
-    {
         vp8_optimize_mb(x, rtcd);
-        vp8_find_mb_skip_coef(x);
-    }
 #endif
 
     vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index 1a8c8ebb795d183e3ef26fc785939a95069ab672..6cc224494db848be96c65873c1f293c104b25a2f 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -273,17 +273,10 @@ void vp8_quantize_mby(MACROBLOCK *x)
         && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
 
     for (i = 0; i < 16; i++)
-    {
         x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
-        x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &=
-            (x->e_mbd.block[i].eob <= has_2nd_order);
-    }
 
     if(has_2nd_order)
-    {
         x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
-        x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[24].eob);
-    }
 }
 
 void vp8_quantize_mb(MACROBLOCK *x)
@@ -292,13 +285,8 @@ void vp8_quantize_mb(MACROBLOCK *x)
     int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED
         && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
 
-    x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
     for (i = 0; i < 24+has_2nd_order; i++)
-    {
         x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
-        x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &=
-            (x->e_mbd.block[i].eob <= (has_2nd_order && i<16));
-    }
 }
 
 
@@ -307,8 +295,5 @@ void vp8_quantize_mbuv(MACROBLOCK *x)
     int i;
 
     for (i = 16; i < 24; i++)
-    {
         x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
-        x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
-    }
 }
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index 0e86f28dfdd7a02537ee35b95e670d929690d0b2..e4da833791d49829704a29aa72a77da9fe4bccbe 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -198,6 +198,28 @@ static void tokenize1st_order_b
 
 }
 
+
+static int mb_is_skippable(MACROBLOCKD *x)
+{
+    int has_y2_block;
+    int skip = 1;
+    int i = 0;
+
+    has_y2_block = (x->mode_info_context->mbmi.mode != B_PRED
+                    && x->mode_info_context->mbmi.mode != SPLITMV);
+    if (has_y2_block)
+    {
+        for (i = 0; i < 16; i++)
+            skip &= (x->block[i].eob < 2);
+    }
+
+    for (; i < 24 + has_y2_block; i++)
+        skip &= (!x->block[i].eob);
+
+    return skip;
+}
+
+
 void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
 {
     ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
@@ -223,6 +245,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
 
 #if 1
 
+    x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(x);
     if (x->mode_info_context->mbmi.mb_skip_coeff)
     {
 
@@ -247,35 +270,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
     cpi->skip_false_count++;
 #endif
 #if 0
-
-    if (x->mbmi.mode == B_PRED || x->mbmi.mode == SPLITMV)
-    {
-        int i, skip = 1;
-
-        for (i = 0; i < 24; i++)
-            skip &= (!x->block[i].eob);
-
-        if (skip != x->mbmi.mb_skip_coeff)
-            skip += 0;
-
-        x->mbmi.mb_skip_coeff = skip;
-    }
-    else
-    {
-        int i, skip = 1;
-
-        for (i = 0; i < 16; i++)
-            skip &= (x->block[i].eob < 2);
-
-        for (i = 16; i < 25; i++)
-            skip &= (!x->block[i].eob);
-
-        if (skip != x->mbmi.mb_skip_coeff)
-            skip += 0;
-
-        x->mbmi.mb_skip_coeff = skip;
-    }
-
     vpx_memcpy(cpi->coef_counts_backup, cpi->coef_counts, sizeof(cpi->coef_counts));
 #endif