shithub: libvpx

Download patch

ref: 4aec440682f74becb94bf39e3b8909ce26e10025
parent: 0810a2d8bcb31db7c735053c0d9693142eec172c
parent: f58011ada5e3d28d9acf078657f8117f0960d240
author: James Zern <jzern@google.com>
date: Wed Apr 29 15:01:48 EDT 2015

Merge changes from topic 'vpx_mem-cruft'

* changes:
  vpx_mem: remove vpx_memset
  vpx_mem: remove vpx_memcpy
  vpx_mem: remove vpx_memmove

--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -398,9 +398,9 @@
   }
 
   void CopyOutputToRef() {
-    vpx_memcpy(output_ref_, output_, kOutputBufferSize);
+    memcpy(output_ref_, output_, kOutputBufferSize);
 #if CONFIG_VP9_HIGHBITDEPTH
-    vpx_memcpy(output16_ref_, output16_, kOutputBufferSize);
+    memcpy(output16_ref_, output16_, kOutputBufferSize);
 #endif
   }
 
--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -502,11 +502,11 @@
       fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
 
       // clear reconstructed pixel buffers
-      vpx_memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
-      vpx_memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+      memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+      memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
 #if CONFIG_VP9_HIGHBITDEPTH
-      vpx_memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
-      vpx_memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+      memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+      memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
 #endif
 
       // quantization with maximum allowed step sizes
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -280,8 +280,8 @@
   // function of sharpness_lvl and the loopfilter lvl as:
   // block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4));
   // ...
-  // vpx_memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit),
-  //            SIMD_WIDTH);
+  // memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit),
+  //        SIMD_WIDTH);
   // This means that the largest value for mblim will occur when sharpness_lvl
   // is equal to 0, and lvl is equal to its greatest value (MAX_LOOP_FILTER).
   // In this case block_inside_limit will be equal to MAX_LOOP_FILTER and
--- a/test/pp_filter_test.cc
+++ b/test/pp_filter_test.cc
@@ -63,12 +63,12 @@
   uint8_t *const dst_image_ptr = dst_image + 8;
   uint8_t *const flimits =
       reinterpret_cast<uint8_t *>(vpx_memalign(16, block_width));
-  (void)vpx_memset(flimits, 255, block_width);
+  (void)memset(flimits, 255, block_width);
 
   // Initialize pixels in the input:
   //   block pixels to value 1,
   //   border pixels to value 10.
-  (void)vpx_memset(src_image, 10, input_size);
+  (void)memset(src_image, 10, input_size);
   uint8_t *pixel_ptr = src_image_ptr;
   for (int i = 0; i < block_height; ++i) {
     for (int j = 0; j < block_width; ++j) {
@@ -78,7 +78,7 @@
   }
 
   // Initialize pixels in the output to 99.
-  (void)vpx_memset(dst_image, 99, output_size);
+  (void)memset(dst_image, 99, output_size);
 
   ASM_REGISTER_STATE_CHECK(
       GetParam()(src_image_ptr, dst_image_ptr, input_stride,
--- a/test/quantize_test.cc
+++ b/test/quantize_test.cc
@@ -56,7 +56,7 @@
 
     // The full configuration is necessary to generate the quantization tables.
     VP8_CONFIG vp8_config;
-    vpx_memset(&vp8_config, 0, sizeof(vp8_config));
+    memset(&vp8_config, 0, sizeof(vp8_config));
 
     vp8_comp_ = vp8_create_compressor(&vp8_config);
 
@@ -69,8 +69,7 @@
     // Copy macroblockd from the reference to get pre-set-up dequant values.
     macroblockd_dst_ = reinterpret_cast<MACROBLOCKD *>(
         vpx_memalign(32, sizeof(*macroblockd_dst_)));
-    vpx_memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd,
-               sizeof(*macroblockd_dst_));
+    memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd, sizeof(*macroblockd_dst_));
     // Fix block pointers - currently they point to the blocks in the reference
     // structure.
     vp8_setup_block_dptrs(macroblockd_dst_);
@@ -79,8 +78,7 @@
   void UpdateQuantizer(int q) {
     vp8_set_quantizer(vp8_comp_, q);
 
-    vpx_memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd,
-               sizeof(*macroblockd_dst_));
+    memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd, sizeof(*macroblockd_dst_));
     vp8_setup_block_dptrs(macroblockd_dst_);
   }
 
--- a/test/set_roi.cc
+++ b/test/set_roi.cc
@@ -53,7 +53,7 @@
   cpi.common.mb_rows = 240 >> 4;
   cpi.common.mb_cols = 320 >> 4;
   const int mbs = (cpi.common.mb_rows * cpi.common.mb_cols);
-  vpx_memset(cpi.segment_feature_data, 0, sizeof(cpi.segment_feature_data));
+  memset(cpi.segment_feature_data, 0, sizeof(cpi.segment_feature_data));
 
   // Segment map
   cpi.segmentation_map = reinterpret_cast<unsigned char *>(vpx_calloc(mbs, 1));
@@ -61,9 +61,9 @@
   // Allocate memory for the source memory map.
   unsigned char *roi_map =
     reinterpret_cast<unsigned char *>(vpx_calloc(mbs, 1));
-  vpx_memset(&roi_map[mbs >> 2], 1, (mbs >> 2));
-  vpx_memset(&roi_map[mbs >> 1], 2, (mbs >> 2));
-  vpx_memset(&roi_map[mbs -(mbs >> 2)], 3, (mbs >> 2));
+  memset(&roi_map[mbs >> 2], 1, (mbs >> 2));
+  memset(&roi_map[mbs >> 1], 2, (mbs >> 2));
+  memset(&roi_map[mbs -(mbs >> 2)], 3, (mbs >> 2));
 
   // Do a test call with valid parameters.
   int roi_retval = vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows,
--- a/test/vpx_scale_test.cc
+++ b/test/vpx_scale_test.cc
@@ -33,10 +33,10 @@
   void ResetImage(int width, int height) {
     width_ = width;
     height_ = height;
-    vpx_memset(&img_, 0, sizeof(img_));
+    memset(&img_, 0, sizeof(img_));
     ASSERT_EQ(0, vp8_yv12_alloc_frame_buffer(&img_, width_, height_,
                                              VP8BORDERINPIXELS));
-    vpx_memset(img_.buffer_alloc, kBufFiller, img_.frame_size);
+    memset(img_.buffer_alloc, kBufFiller, img_.frame_size);
     FillPlane(img_.y_buffer, img_.y_crop_width, img_.y_crop_height,
               img_.y_stride);
     FillPlane(img_.u_buffer, img_.uv_crop_width, img_.uv_crop_height,
@@ -44,15 +44,15 @@
     FillPlane(img_.v_buffer, img_.uv_crop_width, img_.uv_crop_height,
               img_.uv_stride);
 
-    vpx_memset(&ref_img_, 0, sizeof(ref_img_));
+    memset(&ref_img_, 0, sizeof(ref_img_));
     ASSERT_EQ(0, vp8_yv12_alloc_frame_buffer(&ref_img_, width_, height_,
                                              VP8BORDERINPIXELS));
-    vpx_memset(ref_img_.buffer_alloc, kBufFiller, ref_img_.frame_size);
+    memset(ref_img_.buffer_alloc, kBufFiller, ref_img_.frame_size);
 
-    vpx_memset(&cpy_img_, 0, sizeof(cpy_img_));
+    memset(&cpy_img_, 0, sizeof(cpy_img_));
     ASSERT_EQ(0, vp8_yv12_alloc_frame_buffer(&cpy_img_, width_, height_,
                                              VP8BORDERINPIXELS));
-    vpx_memset(cpy_img_.buffer_alloc, kBufFiller, cpy_img_.frame_size);
+    memset(cpy_img_.buffer_alloc, kBufFiller, cpy_img_.frame_size);
     ReferenceCopyFrame();
   }
 
@@ -87,8 +87,8 @@
 
     // Fill the border pixels from the nearest image pixel.
     for (int y = 0; y < crop_height; ++y) {
-      vpx_memset(left, left[padding], padding);
-      vpx_memset(right, right[-1], right_extend);
+      memset(left, left[padding], padding);
+      memset(right, right[-1], right_extend);
       left += stride;
       right += stride;
     }
@@ -101,13 +101,13 @@
 
     // The first row was already extended to the left and right. Copy it up.
     for (int y = 0; y < padding; ++y) {
-      vpx_memcpy(top, left, extend_width);
+      memcpy(top, left, extend_width);
       top += stride;
     }
 
     uint8_t *bottom = left + (crop_height * stride);
     for (int y = 0; y <  bottom_extend; ++y) {
-      vpx_memcpy(bottom, left + (crop_height - 1) * stride, extend_width);
+      memcpy(bottom, left + (crop_height - 1) * stride, extend_width);
       bottom += stride;
     }
   }
--- a/vp8/common/alloccommon.c
+++ b/vp8/common/alloccommon.c
@@ -103,9 +103,9 @@
         goto allocation_fail;
 
     oci->post_proc_buffer_int_used = 0;
-    vpx_memset(&oci->postproc_state, 0, sizeof(oci->postproc_state));
-    vpx_memset(oci->post_proc_buffer.buffer_alloc, 128,
-               oci->post_proc_buffer.frame_size);
+    memset(&oci->postproc_state, 0, sizeof(oci->postproc_state));
+    memset(oci->post_proc_buffer.buffer_alloc, 128,
+           oci->post_proc_buffer.frame_size);
 
     /* Allocate buffer to store post-processing filter coefficients.
      *
@@ -176,7 +176,7 @@
     oci->clamp_type = RECON_CLAMP_REQUIRED;
 
     /* Initialize reference frame sign bias structure to defaults */
-    vpx_memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
+    memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
 
     /* Default disable buffer to buffer copying */
     oci->copy_buffer_to_gf = 0;
--- a/vp8/common/arm/armv6/dequant_idct_v6.asm
+++ b/vp8/common/arm/armv6/dequant_idct_v6.asm
@@ -165,7 +165,7 @@
     str     r1, [r2], r12           ; store output to dst
     bne     vp8_dequant_idct_loop2_v6
 
-; vpx_memset
+; memset
     sub     r0, r0, #32
     add     sp, sp, #4
 
--- a/vp8/common/common.h
+++ b/vp8/common/common.h
@@ -29,7 +29,7 @@
 
 #define vp8_copy( Dest, Src) { \
         assert( sizeof( Dest) == sizeof( Src)); \
-        vpx_memcpy( Dest, Src, sizeof( Src)); \
+        memcpy( Dest, Src, sizeof( Src)); \
     }
 
 /* Use this for variably-sized arrays. */
@@ -36,12 +36,12 @@
 
 #define vp8_copy_array( Dest, Src, N) { \
         assert( sizeof( *Dest) == sizeof( *Src)); \
-        vpx_memcpy( Dest, Src, N * sizeof( *Src)); \
+        memcpy( Dest, Src, N * sizeof( *Src)); \
     }
 
-#define vp8_zero( Dest)  vpx_memset( &Dest, 0, sizeof( Dest));
+#define vp8_zero( Dest)  memset( &Dest, 0, sizeof( Dest));
 
-#define vp8_zero_array( Dest, N)  vpx_memset( Dest, 0, N * sizeof( *Dest));
+#define vp8_zero_array( Dest, N)  memset( Dest, 0, N * sizeof( *Dest));
 
 
 #ifdef __cplusplus
--- a/vp8/common/dequantize.c
+++ b/vp8/common/dequantize.c
@@ -38,6 +38,6 @@
 
     vp8_short_idct4x4llm_c(input, dest, stride, dest, stride);
 
-    vpx_memset(input, 0, 32);
+    memset(input, 0, 32);
 
 }
--- a/vp8/common/entropy.c
+++ b/vp8/common/entropy.c
@@ -183,7 +183,6 @@
 
 void vp8_default_coef_probs(VP8_COMMON *pc)
 {
-    vpx_memcpy(pc->fc.coef_probs, default_coef_probs,
-                   sizeof(default_coef_probs));
+    memcpy(pc->fc.coef_probs, default_coef_probs, sizeof(default_coef_probs));
 }
 
--- a/vp8/common/entropymode.c
+++ b/vp8/common/entropymode.c
@@ -159,13 +159,13 @@
 
 void vp8_init_mbmode_probs(VP8_COMMON *x)
 {
-    vpx_memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob));
-    vpx_memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob));
-    vpx_memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob));
+    memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob));
+    memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob));
+    memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob));
 }
 
 void vp8_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES-1])
 {
-    vpx_memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob));
+    memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob));
 }
 
--- a/vp8/common/extend.c
+++ b/vp8/common/extend.c
@@ -40,9 +40,9 @@
 
     for (i = 0; i < h; i++)
     {
-        vpx_memset(dest_ptr1, src_ptr1[0], el);
-        vpx_memcpy(dest_ptr1 + el, src_ptr1, w);
-        vpx_memset(dest_ptr2, src_ptr2[0], er);
+        memset(dest_ptr1, src_ptr1[0], el);
+        memcpy(dest_ptr1 + el, src_ptr1, w);
+        memset(dest_ptr2, src_ptr2[0], er);
         src_ptr1  += sp;
         src_ptr2  += sp;
         dest_ptr1 += dp;
@@ -60,13 +60,13 @@
 
     for (i = 0; i < et; i++)
     {
-        vpx_memcpy(dest_ptr1, src_ptr1, linesize);
+        memcpy(dest_ptr1, src_ptr1, linesize);
         dest_ptr1 += dp;
     }
 
     for (i = 0; i < eb; i++)
     {
-        vpx_memcpy(dest_ptr2, src_ptr2, linesize);
+        memcpy(dest_ptr2, src_ptr2, linesize);
         dest_ptr2 += dp;
     }
 }
--- a/vp8/common/idct_blk.c
+++ b/vp8/common/idct_blk.c
@@ -33,7 +33,7 @@
             else
             {
                 vp8_dc_only_idct_add_c (q[0]*dq[0], dst, stride, dst, stride);
-                vpx_memset(q, 0, 2 * sizeof(q[0]));
+                memset(q, 0, 2 * sizeof(q[0]));
             }
 
             q   += 16;
@@ -59,7 +59,7 @@
             else
             {
                 vp8_dc_only_idct_add_c (q[0]*dq[0], dstu, stride, dstu, stride);
-                vpx_memset(q, 0, 2 * sizeof(q[0]));
+                memset(q, 0, 2 * sizeof(q[0]));
             }
 
             q    += 16;
@@ -78,7 +78,7 @@
             else
             {
                 vp8_dc_only_idct_add_c (q[0]*dq[0], dstv, stride, dstv, stride);
-                vpx_memset(q, 0, 2 * sizeof(q[0]));
+                memset(q, 0, 2 * sizeof(q[0]));
             }
 
             q    += 16;
--- a/vp8/common/loopfilter.c
+++ b/vp8/common/loopfilter.c
@@ -82,11 +82,10 @@
         if (block_inside_limit < 1)
             block_inside_limit = 1;
 
-        vpx_memset(lfi->lim[i], block_inside_limit, SIMD_WIDTH);
-        vpx_memset(lfi->blim[i], (2 * filt_lvl + block_inside_limit),
-                SIMD_WIDTH);
-        vpx_memset(lfi->mblim[i], (2 * (filt_lvl + 2) + block_inside_limit),
-                SIMD_WIDTH);
+        memset(lfi->lim[i], block_inside_limit, SIMD_WIDTH);
+        memset(lfi->blim[i], (2 * filt_lvl + block_inside_limit), SIMD_WIDTH);
+        memset(lfi->mblim[i], (2 * (filt_lvl + 2) + block_inside_limit),
+               SIMD_WIDTH);
     }
 }
 
@@ -105,7 +104,7 @@
     /* init hev threshold const vectors */
     for(i = 0; i < 4 ; i++)
     {
-        vpx_memset(lfi->hev_thr[i], i, SIMD_WIDTH);
+        memset(lfi->hev_thr[i], i, SIMD_WIDTH);
     }
 }
 
@@ -151,7 +150,7 @@
             /* we could get rid of this if we assume that deltas are set to
              * zero when not in use; encoder always uses deltas
              */
-            vpx_memset(lfi->lvl[seg][0], lvl_seg, 4 * 4 );
+            memset(lfi->lvl[seg][0], lvl_seg, 4 * 4 );
             continue;
         }
 
--- a/vp8/common/mfqe.c
+++ b/vp8/common/mfqe.c
@@ -231,9 +231,9 @@
         {
             vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
             for (up = u, udp = ud, i = 0; i < uvblksize; ++i, up += uv_stride, udp += uvd_stride)
-                vpx_memcpy(udp, up, uvblksize);
+                memcpy(udp, up, uvblksize);
             for (vp = v, vdp = vd, i = 0; i < uvblksize; ++i, vp += uv_stride, vdp += uvd_stride)
-                vpx_memcpy(vdp, vp, uvblksize);
+                memcpy(vdp, vp, uvblksize);
         }
     }
 }
@@ -341,8 +341,8 @@
                                 for (k = 0; k < 4; ++k, up += show->uv_stride, udp += dest->uv_stride,
                                                         vp += show->uv_stride, vdp += dest->uv_stride)
                                 {
-                                    vpx_memcpy(udp, up, 4);
-                                    vpx_memcpy(vdp, vp, 4);
+                                    memcpy(udp, up, 4);
+                                    memcpy(vdp, vp, 4);
                                 }
                             }
                         }
--- a/vp8/common/mips/dspr2/dequantize_dspr2.c
+++ b/vp8/common/mips/dspr2/dequantize_dspr2.c
@@ -26,7 +26,7 @@
 
     vp8_short_idct4x4llm_dspr2(input, dest, stride, dest, stride);
 
-    vpx_memset(input, 0, 32);
+    memset(input, 0, 32);
 
 }
 
--- a/vp8/common/postproc.c
+++ b/vp8/common/postproc.c
@@ -355,8 +355,8 @@
                 else
                     mb_ppl = (unsigned char)ppl;
 
-                vpx_memset(ylptr, mb_ppl, 16);
-                vpx_memset(uvlptr, mb_ppl, 8);
+                memset(ylptr, mb_ppl, 16);
+                memset(uvlptr, mb_ppl, 8);
 
                 ylptr += 16;
                 uvlptr += 8;
@@ -403,7 +403,7 @@
     (void) low_var_thresh;
     (void) flag;
 
-    vpx_memset(limits, (unsigned char)ppl, 16 * mb_cols);
+    memset(limits, (unsigned char)ppl, 16 * mb_cols);
 
     /* TODO: The original code don't filter the 2 outer rows and columns. */
     for (mbr = 0; mbr < mb_rows; mbr++)
@@ -763,7 +763,7 @@
             /* insure that postproc is set to all 0's so that post proc
              * doesn't pull random data in from edge
              */
-            vpx_memset((&oci->post_proc_buffer_int)->buffer_alloc,128,(&oci->post_proc_buffer)->frame_size);
+            memset((&oci->post_proc_buffer_int)->buffer_alloc,128,(&oci->post_proc_buffer)->frame_size);
 
         }
     }
--- a/vp8/common/reconintra.c
+++ b/vp8/common/reconintra.c
@@ -70,10 +70,10 @@
             expected_dc = 128;
         }
 
-        /*vpx_memset(ypred_ptr, expected_dc, 256);*/
+        /*memset(ypred_ptr, expected_dc, 256);*/
         for (r = 0; r < 16; r++)
         {
-            vpx_memset(ypred_ptr, expected_dc, 16);
+            memset(ypred_ptr, expected_dc, 16);
             ypred_ptr += y_stride;
         }
     }
@@ -98,7 +98,7 @@
         for (r = 0; r < 16; r++)
         {
 
-            vpx_memset(ypred_ptr, yleft_col[r], 16);
+            memset(ypred_ptr, yleft_col[r], 16);
             ypred_ptr += y_stride;
         }
 
@@ -202,12 +202,12 @@
         }
 
 
-        /*vpx_memset(upred_ptr,expected_udc,64);*/
-        /*vpx_memset(vpred_ptr,expected_vdc,64);*/
+        /*memset(upred_ptr,expected_udc,64);*/
+        /*memset(vpred_ptr,expected_vdc,64);*/
         for (i = 0; i < 8; i++)
         {
-            vpx_memset(upred_ptr, expected_udc, 8);
-            vpx_memset(vpred_ptr, expected_vdc, 8);
+            memset(upred_ptr, expected_udc, 8);
+            memset(vpred_ptr, expected_vdc, 8);
             upred_ptr += pred_stride;
             vpred_ptr += pred_stride;
         }
@@ -217,8 +217,8 @@
     {
         for (i = 0; i < 8; i++)
         {
-            vpx_memcpy(upred_ptr, uabove_row, 8);
-            vpx_memcpy(vpred_ptr, vabove_row, 8);
+            memcpy(upred_ptr, uabove_row, 8);
+            memcpy(vpred_ptr, vabove_row, 8);
             upred_ptr += pred_stride;
             vpred_ptr += pred_stride;
         }
@@ -229,8 +229,8 @@
     {
         for (i = 0; i < 8; i++)
         {
-            vpx_memset(upred_ptr, uleft_col[i], 8);
-            vpx_memset(vpred_ptr, vleft_col[i], 8);
+            memset(upred_ptr, uleft_col[i], 8);
+            memset(vpred_ptr, vleft_col[i], 8);
             upred_ptr += pred_stride;
             vpred_ptr += pred_stride;
         }
--- a/vp8/common/setupintrarecon.c
+++ b/vp8/common/setupintrarecon.c
@@ -17,15 +17,15 @@
     int i;
 
     /* set up frame new frame for intra coded blocks */
-    vpx_memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
+    memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
     for (i = 0; i < ybf->y_height; i++)
         ybf->y_buffer[ybf->y_stride *i - 1] = (unsigned char) 129;
 
-    vpx_memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+    memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
     for (i = 0; i < ybf->uv_height; i++)
         ybf->u_buffer[ybf->uv_stride *i - 1] = (unsigned char) 129;
 
-    vpx_memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+    memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
     for (i = 0; i < ybf->uv_height; i++)
         ybf->v_buffer[ybf->uv_stride *i - 1] = (unsigned char) 129;
 
@@ -33,7 +33,7 @@
 
 void vp8_setup_intra_recon_top_line(YV12_BUFFER_CONFIG *ybf)
 {
-    vpx_memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
-    vpx_memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
-    vpx_memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+    memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
+    memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+    memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
 }
--- a/vp8/common/x86/idct_blk_mmx.c
+++ b/vp8/common/x86/idct_blk_mmx.c
@@ -36,7 +36,7 @@
         else if (eobs[0] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[0]*dq[0], dst, stride, dst, stride);
-            vpx_memset(q, 0, 2 * sizeof(q[0]));
+            memset(q, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[1] > 1)
@@ -45,7 +45,7 @@
         {
             vp8_dc_only_idct_add_mmx (q[16]*dq[0], dst+4, stride,
                                       dst+4, stride);
-            vpx_memset(q + 16, 0, 2 * sizeof(q[0]));
+            memset(q + 16, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[2] > 1)
@@ -54,7 +54,7 @@
         {
             vp8_dc_only_idct_add_mmx (q[32]*dq[0], dst+8, stride,
                                       dst+8, stride);
-            vpx_memset(q + 32, 0, 2 * sizeof(q[0]));
+            memset(q + 32, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[3] > 1)
@@ -63,7 +63,7 @@
         {
             vp8_dc_only_idct_add_mmx (q[48]*dq[0], dst+12, stride,
                                       dst+12, stride);
-            vpx_memset(q + 48, 0, 2 * sizeof(q[0]));
+            memset(q + 48, 0, 2 * sizeof(q[0]));
         }
 
         q    += 64;
@@ -85,7 +85,7 @@
         else if (eobs[0] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstu, stride, dstu, stride);
-            vpx_memset(q, 0, 2 * sizeof(q[0]));
+            memset(q, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[1] > 1)
@@ -94,7 +94,7 @@
         {
             vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstu+4, stride,
                                       dstu+4, stride);
-            vpx_memset(q + 16, 0, 2 * sizeof(q[0]));
+            memset(q + 16, 0, 2 * sizeof(q[0]));
         }
 
         q    += 32;
@@ -109,7 +109,7 @@
         else if (eobs[0] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstv, stride, dstv, stride);
-            vpx_memset(q, 0, 2 * sizeof(q[0]));
+            memset(q, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[1] > 1)
@@ -118,7 +118,7 @@
         {
             vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstv+4, stride,
                                       dstv+4, stride);
-            vpx_memset(q + 16, 0, 2 * sizeof(q[0]));
+            memset(q + 16, 0, 2 * sizeof(q[0]));
         }
 
         q    += 32;
--- a/vp8/decoder/decodeframe.c
+++ b/vp8/decoder/decodeframe.c
@@ -142,7 +142,7 @@
              * Better to use the predictor as reconstruction.
              */
             pbi->frame_corrupt_residual = 1;
-            vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
+            memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
             vp8_conceal_corrupt_mb(xd);
 
 
@@ -151,7 +151,7 @@
             /* force idct to be skipped for B_PRED and use the
              * prediction only for reconstruction
              * */
-            vpx_memset(xd->eobs, 0, 25);
+            memset(xd->eobs, 0, 25);
         }
     }
 #endif
@@ -184,7 +184,7 @@
 
             /* clear out residual eob info */
             if(xd->mode_info_context->mbmi.mb_skip_coeff)
-                vpx_memset(xd->eobs, 0, 25);
+                memset(xd->eobs, 0, 25);
 
             intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
 
@@ -214,7 +214,7 @@
                             (b->qcoeff[0] * DQC[0],
                                 dst, dst_stride,
                                 dst, dst_stride);
-                        vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+                        memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
                     }
                 }
             }
@@ -251,7 +251,7 @@
 
                     vp8_short_inv_walsh4x4(&b->dqcoeff[0],
                         xd->qcoeff);
-                    vpx_memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
+                    memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
                 }
                 else
                 {
@@ -258,7 +258,7 @@
                     b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
                     vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
                         xd->qcoeff);
-                    vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+                    memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
                 }
 
                 /* override the dc dequant constant in order to preserve the
@@ -323,7 +323,7 @@
 
     for (i = 0; i < (int)Border; i++)
     {
-        vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
+        memcpy(dest_ptr1, src_ptr1, plane_stride);
         dest_ptr1 += plane_stride;
     }
 
@@ -338,7 +338,7 @@
 
     for (i = 0; i < (int)(Border); i++)
     {
-        vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
+        memcpy(dest_ptr1, src_ptr1, plane_stride);
         dest_ptr1 += plane_stride;
     }
 
@@ -351,7 +351,7 @@
 
     for (i = 0; i < (int)(Border); i++)
     {
-        vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
+        memcpy(dest_ptr1, src_ptr1, plane_stride);
         dest_ptr1 += plane_stride;
     }
 }
@@ -379,7 +379,7 @@
 
     for (i = 0; i < (int)Border; i++)
     {
-        vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
+        memcpy(dest_ptr2, src_ptr2, plane_stride);
         dest_ptr2 += plane_stride;
     }
 
@@ -397,7 +397,7 @@
 
     for (i = 0; i < (int)(Border); i++)
     {
-        vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
+        memcpy(dest_ptr2, src_ptr2, plane_stride);
         dest_ptr2 += plane_stride;
     }
 
@@ -411,7 +411,7 @@
 
     for (i = 0; i < (int)(Border); i++)
     {
-        vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
+        memcpy(dest_ptr2, src_ptr2, plane_stride);
         dest_ptr2 += plane_stride;
     }
 }
@@ -446,8 +446,8 @@
 
     for (i = 0; i < plane_height; i++)
     {
-        vpx_memset(dest_ptr1, src_ptr1[0], Border);
-        vpx_memset(dest_ptr2, src_ptr2[0], Border);
+        memset(dest_ptr1, src_ptr1[0], Border);
+        memset(dest_ptr2, src_ptr2[0], Border);
         src_ptr1  += plane_stride;
         src_ptr2  += plane_stride;
         dest_ptr1 += plane_stride;
@@ -470,8 +470,8 @@
 
     for (i = 0; i < plane_height; i++)
     {
-        vpx_memset(dest_ptr1, src_ptr1[0], Border);
-        vpx_memset(dest_ptr2, src_ptr2[0], Border);
+        memset(dest_ptr1, src_ptr1[0], Border);
+        memset(dest_ptr2, src_ptr2[0], Border);
         src_ptr1  += plane_stride;
         src_ptr2  += plane_stride;
         dest_ptr1 += plane_stride;
@@ -490,8 +490,8 @@
 
     for (i = 0; i < plane_height; i++)
     {
-        vpx_memset(dest_ptr1, src_ptr1[0], Border);
-        vpx_memset(dest_ptr2, src_ptr2[0], Border);
+        memset(dest_ptr1, src_ptr1[0], Border);
+        memset(dest_ptr2, src_ptr2[0], Border);
         src_ptr1  += plane_stride;
         src_ptr2  += plane_stride;
         dest_ptr1 += plane_stride;
@@ -568,7 +568,7 @@
 
         /* reset contexts */
         xd->above_context = pc->above_context;
-        vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+        memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
 
         xd->left_available = 0;
 
@@ -918,7 +918,7 @@
     if (pc->frame_type == KEY_FRAME)
     {
         /* Various keyframe initializations */
-        vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
+        memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
 
         vp8_init_mbmode_probs(pc);
 
@@ -925,12 +925,12 @@
         vp8_default_coef_probs(pc);
 
         /* reset the segment feature data to 0 with delta coding (Default state). */
-        vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
+        memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
         xd->mb_segement_abs_delta = SEGMENT_DELTADATA;
 
         /* reset the mode ref deltasa for loop filter */
-        vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
-        vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
+        memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
+        memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
 
         /* All buffers are implicitly updated on key frames. */
         pc->refresh_golden_frame = 1;
@@ -1072,8 +1072,8 @@
         }
         else
         {
-          vpx_memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
-          vpx_memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
+          memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
+          memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
         }
     }
     if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME))
@@ -1105,7 +1105,7 @@
         {
             xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc);
 
-            vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
+            memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
 
             /* For each segmentation feature (Quant and loop filter level) */
             for (i = 0; i < MB_LVL_MAX; i++)
@@ -1129,7 +1129,7 @@
         if (xd->update_mb_segmentation_map)
         {
             /* Which macro block level features are enabled */
-            vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
+            memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
 
             /* Read the probs used to decode the segment id for each macro block. */
             for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
@@ -1278,7 +1278,7 @@
 #endif
     if (pc->refresh_entropy_probs == 0)
     {
-        vpx_memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
+        memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
     }
 
     pc->refresh_last_frame = pc->frame_type == KEY_FRAME  ||  vp8_read_bit(bc);
@@ -1327,7 +1327,7 @@
     }
 
     /* clear out the coeff buffer */
-    vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
+    memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
 
     vp8_decode_mode_mvs(pbi);
 
@@ -1341,7 +1341,7 @@
     }
 #endif
 
-    vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
+    memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
     pbi->frame_corrupt_residual = 0;
 
 #if CONFIG_MULTITHREAD
@@ -1380,7 +1380,7 @@
 
     if (pc->refresh_entropy_probs == 0)
     {
-        vpx_memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc));
+        memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc));
         pbi->independent_partitions = prev_independent_partitions;
     }
 
--- a/vp8/decoder/detokenize.c
+++ b/vp8/decoder/detokenize.c
@@ -20,8 +20,8 @@
     ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context);
     ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context);
 
-    vpx_memset(a_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
-    vpx_memset(l_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
+    memset(a_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
+    memset(l_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
 
     /* Clear entropy contexts for Y2 blocks */
     if (!x->mode_info_context->mbmi.is_4x4)
--- a/vp8/decoder/error_concealment.c
+++ b/vp8/decoder/error_concealment.c
@@ -350,7 +350,7 @@
                                  unsigned int first_corrupt)
 {
     int mb_row, mb_col;
-    vpx_memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols);
+    memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols);
     /* First calculate the overlaps for all blocks */
     for (mb_row = 0; mb_row < mb_rows; ++mb_row)
     {
--- a/vp8/decoder/onyxd_if.c
+++ b/vp8/decoder/onyxd_if.c
@@ -58,7 +58,7 @@
     if (!pbi)
         return NULL;
 
-    vpx_memset(pbi, 0, sizeof(VP8D_COMP));
+    memset(pbi, 0, sizeof(VP8D_COMP));
 
     if (setjmp(pbi->common.error.jmp))
     {
--- a/vp8/decoder/threading.c
+++ b/vp8/decoder/threading.c
@@ -60,12 +60,12 @@
 
         mbd->segmentation_enabled    = xd->segmentation_enabled;
         mbd->mb_segement_abs_delta     = xd->mb_segement_abs_delta;
-        vpx_memcpy(mbd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data));
+        memcpy(mbd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data));
 
         /*signed char ref_lf_deltas[MAX_REF_LF_DELTAS];*/
-        vpx_memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas));
+        memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas));
         /*signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];*/
-        vpx_memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas));
+        memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas));
         /*unsigned char mode_ref_lf_delta_enabled;
         unsigned char mode_ref_lf_delta_update;*/
         mbd->mode_ref_lf_delta_enabled    = xd->mode_ref_lf_delta_enabled;
@@ -73,10 +73,10 @@
 
         mbd->current_bc = &pbi->mbc[0];
 
-        vpx_memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc));
-        vpx_memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
-        vpx_memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
-        vpx_memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
+        memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc));
+        memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
+        memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
+        memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
 
         mbd->fullpixel_mask = 0xffffffff;
 
@@ -137,7 +137,7 @@
              * Better to use the predictor as reconstruction.
              */
             pbi->frame_corrupt_residual = 1;
-            vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
+            memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
             vp8_conceal_corrupt_mb(xd);
 
 
@@ -146,7 +146,7 @@
             /* force idct to be skipped for B_PRED and use the
              * prediction only for reconstruction
              * */
-            vpx_memset(xd->eobs, 0, 25);
+            memset(xd->eobs, 0, 25);
         }
     }
 #endif
@@ -179,7 +179,7 @@
 
             /* clear out residual eob info */
             if(xd->mode_info_context->mbmi.mb_skip_coeff)
-                vpx_memset(xd->eobs, 0, 25);
+                memset(xd->eobs, 0, 25);
 
             intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
 
@@ -229,7 +229,7 @@
                     {
                         vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0],
                                              dst, dst_stride, dst, dst_stride);
-                        vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+                        memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
                     }
                 }
             }
@@ -266,7 +266,7 @@
 
                     vp8_short_inv_walsh4x4(&b->dqcoeff[0],
                         xd->qcoeff);
-                    vpx_memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
+                    memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
                 }
                 else
                 {
@@ -273,7 +273,7 @@
                     b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
                     vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
                         xd->qcoeff);
-                    vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+                    memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
                 }
 
                 /* override the dc dequant constant in order to preserve the
@@ -360,7 +360,7 @@
 
        /* reset contexts */
        xd->above_context = pc->above_context;
-       vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+       memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
 
        xd->left_available = 0;
 
@@ -499,9 +499,9 @@
                if( mb_row != pc->mb_rows-1 )
                {
                    /* Save decoded MB last row data for next-row decoding */
-                   vpx_memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col*16), (xd->dst.y_buffer + 15 * recon_y_stride), 16);
-                   vpx_memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8);
-                   vpx_memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8);
+                   memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col*16), (xd->dst.y_buffer + 15 * recon_y_stride), 16);
+                   memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8);
+                   memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8);
                }
 
                /* save left_col for next MB decoding */
@@ -876,23 +876,23 @@
     if (filter_level)
     {
         /* Set above_row buffer to 127 for decoding first MB row */
-        vpx_memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS-1, 127, yv12_fb_new->y_width + 5);
-        vpx_memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5);
-        vpx_memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5);
+        memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS-1, 127, yv12_fb_new->y_width + 5);
+        memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5);
+        memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5);
 
         for (j=1; j<pc->mb_rows; j++)
         {
-            vpx_memset(pbi->mt_yabove_row[j] + VP8BORDERINPIXELS-1, (unsigned char)129, 1);
-            vpx_memset(pbi->mt_uabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1);
-            vpx_memset(pbi->mt_vabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1);
+            memset(pbi->mt_yabove_row[j] + VP8BORDERINPIXELS-1, (unsigned char)129, 1);
+            memset(pbi->mt_uabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1);
+            memset(pbi->mt_vabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1);
         }
 
         /* Set left_col to 129 initially */
         for (j=0; j<pc->mb_rows; j++)
         {
-            vpx_memset(pbi->mt_yleft_col[j], (unsigned char)129, 16);
-            vpx_memset(pbi->mt_uleft_col[j], (unsigned char)129, 8);
-            vpx_memset(pbi->mt_vleft_col[j], (unsigned char)129, 8);
+            memset(pbi->mt_yleft_col[j], (unsigned char)129, 16);
+            memset(pbi->mt_uleft_col[j], (unsigned char)129, 8);
+            memset(pbi->mt_vleft_col[j], (unsigned char)129, 8);
         }
 
         /* Initialize the loop filter for this frame. */
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -1543,7 +1543,7 @@
     if (pc->refresh_entropy_probs == 0)
     {
         /* save a copy for later refresh */
-        vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
+        memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
     }
 
     vp8_update_coef_probs(cpi);
@@ -1620,7 +1620,7 @@
             /* concatenate partition buffers */
             for(i = 0; i < num_part; i++)
             {
-                vpx_memmove(dp, cpi->partition_d[i+1], cpi->partition_sz[i+1]);
+                memmove(dp, cpi->partition_d[i+1], cpi->partition_sz[i+1]);
                 cpi->partition_d[i+1] = dp;
                 dp += cpi->partition_sz[i+1];
             }
--- a/vp8/encoder/denoising.c
+++ b/vp8/encoder/denoising.c
@@ -415,8 +415,8 @@
             vp8_denoiser_free(denoiser);
             return 1;
         }
-        vpx_memset(denoiser->yv12_running_avg[i].buffer_alloc, 0,
-                   denoiser->yv12_running_avg[i].frame_size);
+        memset(denoiser->yv12_running_avg[i].buffer_alloc, 0,
+               denoiser->yv12_running_avg[i].frame_size);
 
     }
     denoiser->yv12_mc_running_avg.flags = 0;
@@ -428,8 +428,8 @@
         return 1;
     }
 
-    vpx_memset(denoiser->yv12_mc_running_avg.buffer_alloc, 0,
-               denoiser->yv12_mc_running_avg.frame_size);
+    memset(denoiser->yv12_mc_running_avg.buffer_alloc, 0,
+           denoiser->yv12_mc_running_avg.frame_size);
 
     if (vp8_yv12_alloc_frame_buffer(&denoiser->yv12_last_source, width,
                                     height, VP8BORDERINPIXELS) < 0) {
@@ -436,11 +436,11 @@
       vp8_denoiser_free(denoiser);
       return 1;
     }
-    vpx_memset(denoiser->yv12_last_source.buffer_alloc, 0,
-               denoiser->yv12_last_source.frame_size);
+    memset(denoiser->yv12_last_source.buffer_alloc, 0,
+           denoiser->yv12_last_source.frame_size);
 
     denoiser->denoise_state = vpx_calloc((num_mb_rows * num_mb_cols), 1);
-    vpx_memset(denoiser->denoise_state, 0, (num_mb_rows * num_mb_cols));
+    memset(denoiser->denoise_state, 0, (num_mb_rows * num_mb_cols));
     vp8_denoiser_set_parameters(denoiser, mode);
     denoiser->nmse_source_diff = 0;
     denoiser->nmse_source_diff_count = 0;
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -155,8 +155,8 @@
                         cpi->common.MBs));
 
         /* Copy map to sort list */
-        vpx_memcpy( sortlist, cpi->mb_activity_map,
-                    sizeof(unsigned int) * cpi->common.MBs );
+        memcpy( sortlist, cpi->mb_activity_map,
+                sizeof(unsigned int) * cpi->common.MBs );
 
 
         /* Ripple each value down to its correct position */
@@ -665,8 +665,7 @@
 
     x->mvc = cm->fc.mvc;
 
-    vpx_memset(cm->above_context, 0,
-               sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
+    memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
 
     /* Special case treatment when GF and ARF are not sensible options
      * for reference
@@ -744,7 +743,7 @@
     const int num_part = (1 << cm->multi_token_partition);
 #endif
 
-    vpx_memset(segment_counts, 0, sizeof(segment_counts));
+    memset(segment_counts, 0, sizeof(segment_counts));
     totalrate = 0;
 
     if (cpi->compressor_speed == 2)
@@ -974,7 +973,7 @@
         int i;
 
         /* Set to defaults */
-        vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
+        memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
 
         tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
 
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -506,8 +506,8 @@
     ENTROPY_CONTEXT *ta;
     ENTROPY_CONTEXT *tl;
 
-    vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
-    vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
 
     ta = (ENTROPY_CONTEXT *)&t_above;
     tl = (ENTROPY_CONTEXT *)&t_left;
@@ -555,8 +555,8 @@
     if (!x->e_mbd.left_context)
         return;
 
-    vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
-    vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
 
     ta = (ENTROPY_CONTEXT *)&t_above;
     tl = (ENTROPY_CONTEXT *)&t_left;
@@ -595,8 +595,8 @@
     if (!x->e_mbd.left_context)
         return;
 
-    vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
-    vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
 
     ta = (ENTROPY_CONTEXT *)&t_above;
     tl = (ENTROPY_CONTEXT *)&t_left;
--- a/vp8/encoder/ethreading.c
+++ b/vp8/encoder/ethreading.c
@@ -416,14 +416,13 @@
         zd->subpixel_predict16x16    = xd->subpixel_predict16x16;
         zd->segmentation_enabled     = xd->segmentation_enabled;
         zd->mb_segement_abs_delta      = xd->mb_segement_abs_delta;
-        vpx_memcpy(zd->segment_feature_data, xd->segment_feature_data,
-                   sizeof(xd->segment_feature_data));
+        memcpy(zd->segment_feature_data, xd->segment_feature_data,
+               sizeof(xd->segment_feature_data));
 
-        vpx_memcpy(zd->dequant_y1_dc, xd->dequant_y1_dc,
-                   sizeof(xd->dequant_y1_dc));
-        vpx_memcpy(zd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
-        vpx_memcpy(zd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
-        vpx_memcpy(zd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
+        memcpy(zd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc));
+        memcpy(zd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
+        memcpy(zd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
+        memcpy(zd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
 
 #if 1
         /*TODO:  Remove dequant from BLOCKD.  This is a temporary solution until
@@ -438,15 +437,14 @@
 #endif
 
 
-        vpx_memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes));
-        vpx_memcpy(z->rd_thresh_mult, x->rd_thresh_mult,
-                   sizeof(x->rd_thresh_mult));
+        memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes));
+        memcpy(z->rd_thresh_mult, x->rd_thresh_mult, sizeof(x->rd_thresh_mult));
 
         z->zbin_over_quant = x->zbin_over_quant;
         z->zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
         z->zbin_mode_boost = x->zbin_mode_boost;
 
-        vpx_memset(z->error_bins, 0, sizeof(z->error_bins));
+        memset(z->error_bins, 0, sizeof(z->error_bins));
     }
 }
 
@@ -472,7 +470,7 @@
         mbd->subpixel_predict16x16   = xd->subpixel_predict16x16;
         mb->gf_active_ptr            = x->gf_active_ptr;
 
-        vpx_memset(mbr_ei[i].segment_counts, 0, sizeof(mbr_ei[i].segment_counts));
+        memset(mbr_ei[i].segment_counts, 0, sizeof(mbr_ei[i].segment_counts));
         mbr_ei[i].totalrate = 0;
 
         mb->partition_info = x->pi + x->e_mbd.mode_info_stride * (i + 1);
@@ -547,7 +545,7 @@
                         vpx_malloc(sizeof(sem_t) * th_count));
         CHECK_MEM_ERROR(cpi->mb_row_ei,
                         vpx_memalign(32, sizeof(MB_ROW_COMP) * th_count));
-        vpx_memset(cpi->mb_row_ei, 0, sizeof(MB_ROW_COMP) * th_count);
+        memset(cpi->mb_row_ei, 0, sizeof(MB_ROW_COMP) * th_count);
         CHECK_MEM_ERROR(cpi->en_thread_data,
                         vpx_malloc(sizeof(ENCODETHREAD_DATA) * th_count));
 
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -573,7 +573,7 @@
     {
         int flag[2] = {1, 1};
         vp8_initialize_rd_consts(cpi, x, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
-        vpx_memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
+        memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
         vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
     }
 
@@ -1779,7 +1779,7 @@
 
     start_pos = cpi->twopass.stats_in;
 
-    vpx_memset(&next_frame, 0, sizeof(next_frame)); /* assure clean */
+    memset(&next_frame, 0, sizeof(next_frame)); /* assure clean */
 
     /* Load stats for the current frame. */
     mod_frame_err = calculate_modified_err(cpi, this_frame);
@@ -1875,7 +1875,7 @@
             break;
         }
 
-        vpx_memcpy(this_frame, &next_frame, sizeof(*this_frame));
+        memcpy(this_frame, &next_frame, sizeof(*this_frame));
 
         old_boost_score = boost_score;
     }
@@ -2445,7 +2445,7 @@
     if (cpi->twopass.frames_to_key == 0)
     {
         /* Define next KF group and assign bits to it */
-        vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+        memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
         find_next_key_frame(cpi, &this_frame_copy);
 
         /* Special case: Error error_resilient_mode mode does not make much
@@ -2471,7 +2471,7 @@
     if (cpi->frames_till_gf_update_due == 0)
     {
         /* Define next gf group and assign bits to it */
-        vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+        memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
         define_gf_group(cpi, &this_frame_copy);
 
         /* If we are going to code an altref frame at the end of the group
@@ -2487,7 +2487,7 @@
              * to the GF group
              */
             int bak = cpi->per_frame_bandwidth;
-            vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+            memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
             assign_std_frame_bits(cpi, &this_frame_copy);
             cpi->per_frame_bandwidth = bak;
         }
@@ -2510,7 +2510,7 @@
             if (cpi->common.frame_type != KEY_FRAME)
             {
                 /* Assign bits from those allocated to the GF group */
-                vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+                memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
                 assign_std_frame_bits(cpi, &this_frame_copy);
             }
         }
@@ -2517,7 +2517,7 @@
         else
         {
             /* Assign bits from those allocated to the GF group */
-            vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+            memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
             assign_std_frame_bits(cpi, &this_frame_copy);
         }
     }
@@ -2658,7 +2658,7 @@
         double decay_accumulator = 1.0;
         double next_iiratio;
 
-        vpx_memcpy(&local_next_frame, next_frame, sizeof(*next_frame));
+        memcpy(&local_next_frame, next_frame, sizeof(*next_frame));
 
         /* Note the starting file position so we can reset to it */
         start_pos = cpi->twopass.stats_in;
@@ -2735,7 +2735,7 @@
     double kf_group_coded_err = 0.0;
     double recent_loop_decay[8] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0};
 
-    vpx_memset(&next_frame, 0, sizeof(next_frame));
+    memset(&next_frame, 0, sizeof(next_frame));
 
     vp8_clear_system_state();
     start_position = cpi->twopass.stats_in;
@@ -2756,7 +2756,7 @@
     cpi->twopass.frames_to_key = 1;
 
     /* Take a copy of the initial frame details */
-    vpx_memcpy(&first_frame, this_frame, sizeof(*this_frame));
+    memcpy(&first_frame, this_frame, sizeof(*this_frame));
 
     cpi->twopass.kf_group_bits = 0;
     cpi->twopass.kf_group_error_left = 0;
@@ -2779,7 +2779,7 @@
         kf_group_coded_err += this_frame->coded_error;
 
         /* Load the next frame's stats. */
-        vpx_memcpy(&last_frame, this_frame, sizeof(*this_frame));
+        memcpy(&last_frame, this_frame, sizeof(*this_frame));
         input_stats(cpi, this_frame);
 
         /* Provided that we are not at the end of the file... */
@@ -2847,7 +2847,7 @@
         cpi->twopass.frames_to_key /= 2;
 
         /* Copy first frame details */
-        vpx_memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
+        memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
 
         /* Reset to the start of the group */
         reset_fpf_position(cpi, start_position);
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -1978,8 +1978,8 @@
 #ifdef VP8_ENTROPY_STATS
 void init_mv_ref_counts()
 {
-    vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
-    vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
+    memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
+    memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
 }
 
 void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -428,10 +428,10 @@
 
     cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
     cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
-    vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
-    vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
-    vpx_memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
-    vpx_memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
+    memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
+    memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
+    memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
+    memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
 
     set_default_lf_deltas(cpi);
 
@@ -508,7 +508,7 @@
 static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map)
 {
     /* Copy in the new segmentation map */
-    vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
+    memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
 
     /* Signal that the map should be updated. */
     cpi->mb.e_mbd.update_mb_segmentation_map = 1;
@@ -530,7 +530,7 @@
 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data, unsigned char abs_delta)
 {
     cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
-    vpx_memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data));
+    memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data));
 }
 
 
@@ -602,7 +602,7 @@
 
     // Set every macroblock to be eligible for update.
     // For key frame this will reset seg map to 0.
-    vpx_memset(cpi->segmentation_map, 0, mbs_in_frame);
+    memset(cpi->segmentation_map, 0, mbs_in_frame);
 
     if (cpi->common.frame_type != KEY_FRAME && block_count > 0)
     {
@@ -686,8 +686,8 @@
     cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
     cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
 
-    vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
-    vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
+    memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
+    memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
 
     /* Test of ref frame deltas */
     cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
@@ -1087,7 +1087,7 @@
         if (Speed >= 15)
             sf->half_pixel_search = 0;
 
-        vpx_memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
+        memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
 
     }; /* switch */
 
@@ -1298,7 +1298,7 @@
     CHECK_MEM_ERROR(cpi->active_map,
                     vpx_calloc(cm->mb_rows * cm->mb_cols,
                     sizeof(*cpi->active_map)));
-    vpx_memset(cpi->active_map , 1, (cm->mb_rows * cm->mb_cols));
+    memset(cpi->active_map , 1, (cm->mb_rows * cm->mb_cols));
 
 #if CONFIG_MULTITHREAD
     if (width < 640)
@@ -1891,7 +1891,7 @@
 
     cm = &cpi->common;
 
-    vpx_memset(cpi, 0, sizeof(VP8_COMP));
+    memset(cpi, 0, sizeof(VP8_COMP));
 
     if (setjmp(cm->error.jmp))
     {
@@ -2867,7 +2867,7 @@
     }
 
     /* Update data structure that monitors level of reference to last GF */
-    vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
+    memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
     cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
 
     /* this frame refreshes means next frames don't unless specified by user */
@@ -2916,7 +2916,7 @@
         }
 
         /* Update data structure that monitors level of reference to last GF */
-        vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
+        memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
         cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
 
         /* this frame refreshes means next frames don't unless specified by
@@ -3830,9 +3830,9 @@
         }
 
         // Reset the zero_last counter to 0 on key frame.
-        vpx_memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
-        vpx_memset(cpi->consec_zero_last_mvbias, 0,
-                   (cpi->common.mb_rows * cpi->common.mb_cols));
+        memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
+        memset(cpi->consec_zero_last_mvbias, 0,
+               (cpi->common.mb_rows * cpi->common.mb_cols));
     }
 
 #if 0
@@ -4362,9 +4362,9 @@
                   disable_segmentation(cpi);
               }
               // Reset the zero_last counter to 0 on key frame.
-              vpx_memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
-              vpx_memset(cpi->consec_zero_last_mvbias, 0,
-                         (cpi->common.mb_rows * cpi->common.mb_cols));
+              memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
+              memset(cpi->consec_zero_last_mvbias, 0,
+                     (cpi->common.mb_rows * cpi->common.mb_cols));
               vp8_set_quantizer(cpi, Q);
             }
 
@@ -4387,7 +4387,7 @@
             if (cm->refresh_entropy_probs == 0)
             {
                 /* save a copy for later refresh */
-                vpx_memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
+                memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
             }
 
             vp8_update_coef_context(cpi);
@@ -5613,19 +5613,19 @@
 
     if (cm->refresh_entropy_probs == 0)
     {
-        vpx_memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
+        memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
     }
 
     /* Save the contexts separately for alt ref, gold and last. */
     /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
     if(cm->refresh_alt_ref_frame)
-        vpx_memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
+        memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
 
     if(cm->refresh_golden_frame)
-        vpx_memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
+        memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
 
     if(cm->refresh_last_frame)
-        vpx_memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
+        memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
 
     /* if its a dropped frame honor the requests on subsequent frames */
     if (*size > 0)
@@ -5934,7 +5934,7 @@
     {
         if (map)
         {
-            vpx_memcpy(cpi->active_map, map, rows * cols);
+            memcpy(cpi->active_map, map, rows * cols);
             cpi->active_map_enabled = 1;
         }
         else
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -862,8 +862,8 @@
 
     mode_mv = mode_mv_sb[sign_bias];
     best_ref_mv.as_int = 0;
-    vpx_memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
-    vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
+    memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
+    memset(&best_mbmode, 0, sizeof(best_mbmode));
 
     /* Setup search priorities */
 #if CONFIG_MULTI_RES_ENCODING
@@ -1348,8 +1348,8 @@
             *returndistortion = distortion2;
             best_rd_sse = sse;
             best_rd = this_rd;
-            vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
-                       sizeof(MB_MODE_INFO));
+            memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
+                   sizeof(MB_MODE_INFO));
 
             /* Testing this mode gave rise to an improvement in best error
              * score. Lower threshold a bit for next time
@@ -1487,8 +1487,8 @@
 
             if (this_rd < best_rd)
             {
-                vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
-                           sizeof(MB_MODE_INFO));
+                memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
+                       sizeof(MB_MODE_INFO));
             }
         }
 
@@ -1512,8 +1512,8 @@
     /* set to the best mb mode, this copy can be skip if x->skip since it
      * already has the right content */
     if (!x->skip)
-        vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
-                   sizeof(MB_MODE_INFO));
+        memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
+               sizeof(MB_MODE_INFO));
 
     if (best_mbmode.mode <= B_PRED)
     {
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -49,7 +49,7 @@
     src_y = src_ybc->y_buffer + yoffset;
     dst_y = dst_ybc->y_buffer + yoffset;
 
-    vpx_memcpy(dst_y, src_y, ystride * linestocopy);
+    memcpy(dst_y, src_y, ystride * linestocopy);
 }
 
 static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
@@ -286,7 +286,7 @@
 
     YV12_BUFFER_CONFIG * saved_frame = cm->frame_to_show;
 
-    vpx_memset(ss_err, 0, sizeof(ss_err));
+    memset(ss_err, 0, sizeof(ss_err));
 
     /* Replace unfiltered frame buffer with a new one */
     cm->frame_to_show = &cpi->pick_lf_lvl_frame;
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -65,8 +65,8 @@
     short *dequant_ptr     = d->dequant;
     short zbin_oq_value    = b->zbin_extra;
 
-    vpx_memset(qcoeff_ptr, 0, 32);
-    vpx_memset(dqcoeff_ptr, 0, 32);
+    memset(qcoeff_ptr, 0, 32);
+    memset(dqcoeff_ptr, 0, 32);
 
     eob = -1;
 
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -296,7 +296,7 @@
 
     vp8_default_coef_probs(& cpi->common);
 
-    vpx_memcpy(cpi->common.fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
+    memcpy(cpi->common.fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
     {
         int flag[2] = {1, 1};
         vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
@@ -305,9 +305,9 @@
     /* Make sure we initialize separate contexts for altref,gold, and normal.
      * TODO shouldn't need 3 different copies of structure to do this!
      */
-    vpx_memcpy(&cpi->lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
-    vpx_memcpy(&cpi->lfc_g, &cpi->common.fc, sizeof(cpi->common.fc));
-    vpx_memcpy(&cpi->lfc_n, &cpi->common.fc, sizeof(cpi->common.fc));
+    memcpy(&cpi->lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
+    memcpy(&cpi->lfc_g, &cpi->common.fc, sizeof(cpi->common.fc));
+    memcpy(&cpi->lfc_n, &cpi->common.fc, sizeof(cpi->common.fc));
 
     cpi->common.filter_level = cpi->common.base_qindex * 3 / 8 ;
 
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -555,8 +555,8 @@
     ENTROPY_CONTEXT *ta;
     ENTROPY_CONTEXT *tl;
 
-    vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
-    vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
 
     ta = (ENTROPY_CONTEXT *)&t_above;
     tl = (ENTROPY_CONTEXT *)&t_left;
@@ -691,7 +691,7 @@
             *a = tempa;
             *l = templ;
             copy_predictor(best_predictor, b->predictor);
-            vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
+            memcpy(best_dqcoeff, b->dqcoeff, 32);
         }
     }
     b->bmi.as_mode = *best_mode;
@@ -715,8 +715,8 @@
     ENTROPY_CONTEXT *tl;
     const int *bmode_costs;
 
-    vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
-    vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
 
     ta = (ENTROPY_CONTEXT *)&t_above;
     tl = (ENTROPY_CONTEXT *)&t_left;
@@ -820,8 +820,8 @@
     ENTROPY_CONTEXT *ta;
     ENTROPY_CONTEXT *tl;
 
-    vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
-    vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
 
     ta = (ENTROPY_CONTEXT *)&t_above;
     tl = (ENTROPY_CONTEXT *)&t_left;
@@ -1128,8 +1128,8 @@
     ENTROPY_CONTEXT *ta_b;
     ENTROPY_CONTEXT *tl_b;
 
-    vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
-    vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+    memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
 
     ta = (ENTROPY_CONTEXT *)&t_above;
     tl = (ENTROPY_CONTEXT *)&t_left;
@@ -1172,8 +1172,8 @@
             ENTROPY_CONTEXT *ta_s;
             ENTROPY_CONTEXT *tl_s;
 
-            vpx_memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
-            vpx_memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
+            memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
+            memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
 
             ta_s = (ENTROPY_CONTEXT *)&t_above_s;
             tl_s = (ENTROPY_CONTEXT *)&t_left_s;
@@ -1329,14 +1329,14 @@
                 mode_selected = this_mode;
                 best_label_rd = this_rd;
 
-                vpx_memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
-                vpx_memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
+                memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
+                memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
 
             }
         } /*for each 4x4 mode*/
 
-        vpx_memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
-        vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
+        memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
+        memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
 
         labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
                     bsi->ref_mv, x->mvcost);
@@ -1392,7 +1392,7 @@
     int i;
     BEST_SEG_INFO bsi;
 
-    vpx_memset(&bsi, 0, sizeof(bsi));
+    memset(&bsi, 0, sizeof(bsi));
 
     bsi.segment_rd = best_rd;
     bsi.ref_mv = best_ref_mv;
@@ -1925,8 +1925,8 @@
                       (rd->distortion2-rd->distortion_uv));
 
     best_mode->rd = this_rd;
-    vpx_memcpy(&best_mode->mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
-    vpx_memcpy(&best_mode->partition, x->partition_info, sizeof(PARTITION_INFO));
+    memcpy(&best_mode->mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
+    memcpy(&best_mode->partition, x->partition_info, sizeof(PARTITION_INFO));
 
     if ((this_mode == B_PRED) || (this_mode == SPLITMV))
     {
@@ -1988,9 +1988,9 @@
     best_mode.rd = INT_MAX;
     best_mode.yrd = INT_MAX;
     best_mode.intra_rd = INT_MAX;
-    vpx_memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
-    vpx_memset(&best_mode.mbmode, 0, sizeof(best_mode.mbmode));
-    vpx_memset(&best_mode.bmodes, 0, sizeof(best_mode.bmodes));
+    memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
+    memset(&best_mode.mbmode, 0, sizeof(best_mode.mbmode));
+    memset(&best_mode.bmodes, 0, sizeof(best_mode.bmodes));
 
     /* Setup search priorities */
     get_reference_search_order(cpi, ref_frame_map);
@@ -2582,7 +2582,7 @@
 
 
     /* macroblock modes */
-    vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode, sizeof(MB_MODE_INFO));
+    memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode, sizeof(MB_MODE_INFO));
 
     if (best_mode.mbmode.mode == B_PRED)
     {
@@ -2595,7 +2595,7 @@
         for (i = 0; i < 16; i++)
             xd->mode_info_context->bmi[i].mv.as_int = best_mode.bmodes[i].mv.as_int;
 
-        vpx_memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO));
+        memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO));
 
         x->e_mbd.mode_info_context->mbmi.mv.as_int =
                                       x->partition_info->bmi[15].mv.as_int;
--- a/vp8/encoder/segmentation.c
+++ b/vp8/encoder/segmentation.c
@@ -23,7 +23,7 @@
     if ((cm->frame_type == KEY_FRAME) || (cm->refresh_golden_frame))
     {
         /* Reset Gf useage monitors */
-        vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
+        memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
         cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
     }
     else
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -274,8 +274,8 @@
             int i, j, k;
             int stride;
 
-            vpx_memset(accumulator, 0, 384*sizeof(unsigned int));
-            vpx_memset(count, 0, 384*sizeof(unsigned short));
+            memset(accumulator, 0, 384*sizeof(unsigned int));
+            memset(count, 0, 384*sizeof(unsigned short));
 
 #if ALT_REF_MC_ENABLED
             cpi->mb.mv_col_min = -((mb_col * 16) + (16 - 5));
@@ -502,7 +502,7 @@
     start_frame = distance + frames_to_blur_forward;
 
     /* Setup frame pointers, NULL indicates frame not included in filter */
-    vpx_memset(cpi->frames, 0, max_frames*sizeof(YV12_BUFFER_CONFIG *));
+    memset(cpi->frames, 0, max_frames*sizeof(YV12_BUFFER_CONFIG *));
     for (frame = 0; frame < frames_to_blur; frame++)
     {
         int which_buffer =  start_frame - frame;
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -421,7 +421,7 @@
 
 void init_context_counters(void)
 {
-    vpx_memset(context_counters, 0, sizeof(context_counters));
+    memset(context_counters, 0, sizeof(context_counters));
 }
 
 void print_context_counters()
@@ -596,13 +596,13 @@
     /* Clear entropy contexts for Y2 blocks */
     if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
     {
-        vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
-        vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+        memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+        memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
     }
     else
     {
-        vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
-        vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
+        memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
+        memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
     }
 
 }
--- a/vp8/encoder/x86/quantize_sse2.c
+++ b/vp8/encoder/x86/quantize_sse2.c
@@ -55,7 +55,7 @@
     __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
     __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
 
-    vpx_memset(qcoeff_ptr, 0, 32);
+    memset(qcoeff_ptr, 0, 32);
 
     /* Duplicate to all lanes. */
     zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0);
--- a/vp8/vp8_cx_iface.c
+++ b/vp8/vp8_cx_iface.c
@@ -366,9 +366,9 @@
     if (oxcf->number_of_layers > 1)
     {
         memcpy (oxcf->target_bitrate, cfg.ts_target_bitrate,
-                          sizeof(cfg.ts_target_bitrate));
+                sizeof(cfg.ts_target_bitrate));
         memcpy (oxcf->rate_decimator, cfg.ts_rate_decimator,
-                          sizeof(cfg.ts_rate_decimator));
+                sizeof(cfg.ts_rate_decimator));
         memcpy (oxcf->layer_id, cfg.ts_layer_id, sizeof(cfg.ts_layer_id));
     }
 
--- a/vp8/vp8_dx_iface.c
+++ b/vp8/vp8_dx_iface.c
@@ -288,8 +288,8 @@
     if (ctx->fragments.count == 0)
     {
         /* New frame, reset fragment pointers and sizes */
-        vpx_memset((void*)ctx->fragments.ptrs, 0, sizeof(ctx->fragments.ptrs));
-        vpx_memset(ctx->fragments.sizes, 0, sizeof(ctx->fragments.sizes));
+        memset((void*)ctx->fragments.ptrs, 0, sizeof(ctx->fragments.ptrs));
+        memset(ctx->fragments.sizes, 0, sizeof(ctx->fragments.sizes));
     }
     if (ctx->fragments.enabled && !(data == NULL && data_sz == 0))
     {
--- a/vp9/common/vp9_alloccommon.c
+++ b/vp9/common/vp9_alloccommon.c
@@ -156,7 +156,7 @@
 void vp9_init_context_buffers(VP9_COMMON *cm) {
   cm->setup_mi(cm);
   if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
-    vpx_memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
+    memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
 }
 
 void vp9_swap_current_and_last_seg_map(VP9_COMMON *cm) {
--- a/vp9/common/vp9_blockd.c
+++ b/vp9/common/vp9_blockd.c
@@ -103,7 +103,7 @@
     for (i = above_contexts; i < tx_size_in_blocks; ++i)
       a[i] = 0;
   } else {
-    vpx_memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
+    memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
   }
 
   // left
@@ -120,7 +120,7 @@
     for (i = left_contexts; i < tx_size_in_blocks; ++i)
       l[i] = 0;
   } else {
-    vpx_memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
+    memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
   }
 }
 
--- a/vp9/common/vp9_common.h
+++ b/vp9/common/vp9_common.h
@@ -36,17 +36,17 @@
 // Only need this for fixed-size arrays, for structs just assign.
 #define vp9_copy(dest, src) {            \
     assert(sizeof(dest) == sizeof(src)); \
-    vpx_memcpy(dest, src, sizeof(src));  \
+    memcpy(dest, src, sizeof(src));  \
   }
 
 // Use this for variably-sized arrays.
 #define vp9_copy_array(dest, src, n) {       \
     assert(sizeof(*dest) == sizeof(*src));   \
-    vpx_memcpy(dest, src, n * sizeof(*src)); \
+    memcpy(dest, src, n * sizeof(*src)); \
   }
 
-#define vp9_zero(dest) vpx_memset(&(dest), 0, sizeof(dest))
-#define vp9_zero_array(dest, n) vpx_memset(dest, 0, n * sizeof(*dest))
+#define vp9_zero(dest) memset(&(dest), 0, sizeof(dest))
+#define vp9_zero_array(dest, n) memset(dest, 0, n * sizeof(*dest))
 
 static INLINE uint8_t clip_pixel(int val) {
   return (val > 255) ? 255 : (val < 0) ? 0 : val;
--- a/vp9/common/vp9_convolve.c
+++ b/vp9/common/vp9_convolve.c
@@ -256,7 +256,7 @@
   (void)filter_y;  (void)filter_y_stride;
 
   for (r = h; r > 0; --r) {
-    vpx_memcpy(dst, src, w);
+    memcpy(dst, src, w);
     src += src_stride;
     dst += dst_stride;
   }
@@ -526,7 +526,7 @@
   (void)bd;
 
   for (r = h; r > 0; --r) {
-    vpx_memcpy(dst, src, w * sizeof(uint16_t));
+    memcpy(dst, src, w * sizeof(uint16_t));
     src += src_stride;
     dst += dst_stride;
   }
--- a/vp9/common/vp9_entropy.c
+++ b/vp9/common/vp9_entropy.c
@@ -749,13 +749,13 @@
 };
 
 static void extend_to_full_distribution(vp9_prob *probs, vp9_prob p) {
-  vpx_memcpy(probs, vp9_pareto8_full[p = 0 ? 0 : p - 1],
-             MODEL_NODES * sizeof(vp9_prob));
+  memcpy(probs, vp9_pareto8_full[p = 0 ? 0 : p - 1],
+         MODEL_NODES * sizeof(vp9_prob));
 }
 
 void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full) {
   if (full != model)
-    vpx_memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
+    memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
   extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]);
 }
 
--- a/vp9/common/vp9_entropy.h
+++ b/vp9/common/vp9_entropy.h
@@ -142,10 +142,10 @@
   for (i = 0; i < MAX_MB_PLANE; i++) {
     struct macroblockd_plane *const pd = &xd->plane[i];
     const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
-    vpx_memset(pd->above_context, 0, sizeof(ENTROPY_CONTEXT) *
-                   num_4x4_blocks_wide_lookup[plane_bsize]);
-    vpx_memset(pd->left_context, 0, sizeof(ENTROPY_CONTEXT) *
-                   num_4x4_blocks_high_lookup[plane_bsize]);
+    memset(pd->above_context, 0,
+           sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide_lookup[plane_bsize]);
+    memset(pd->left_context, 0,
+           sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high_lookup[plane_bsize]);
   }
 }
 
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -430,10 +430,10 @@
   cm->seg.abs_delta = SEGMENT_DELTADATA;
 
   if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
-    vpx_memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
+    memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
 
   if (cm->current_frame_seg_map)
-    vpx_memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
+    memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
 
   // Reset the mode ref deltas for loop filter
   vp9_zero(lf->last_ref_deltas);
@@ -460,8 +460,8 @@
 
   // prev_mip will only be allocated in encoder.
   if (frame_is_intra_only(cm) && cm->prev_mip && !cm->frame_parallel_decode)
-    vpx_memset(cm->prev_mip, 0, cm->mi_stride * (cm->mi_rows + 1) *
-                                    sizeof(*cm->prev_mip));
+    memset(cm->prev_mip, 0,
+           cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->prev_mip));
 
   vp9_zero(cm->ref_frame_sign_bias);
 
--- a/vp9/common/vp9_frame_buffers.c
+++ b/vp9/common/vp9_frame_buffers.c
@@ -64,7 +64,7 @@
     // This memset is needed for fixing valgrind error from C loop filter
     // due to access uninitialized memory in frame border. It could be
     // removed if border is totally removed.
-    vpx_memset(int_fb_list->int_fb[i].data, 0, min_size);
+    memset(int_fb_list->int_fb[i].data, 0, min_size);
     int_fb_list->int_fb[i].size = min_size;
   }
 
--- a/vp9/common/vp9_idct.c
+++ b/vp9/common/vp9_idct.c
@@ -1276,7 +1276,7 @@
     if (zero_coeff[0] | zero_coeff[1])
       idct32(input, outptr);
     else
-      vpx_memset(outptr, 0, sizeof(tran_low_t) * 32);
+      memset(outptr, 0, sizeof(tran_low_t) * 32);
     input += 32;
     outptr += 32;
   }
@@ -1676,7 +1676,7 @@
   (void) bd;
 
   if (!(x0 | x1 | x2 | x3)) {
-    vpx_memset(output, 0, 4 * sizeof(*output));
+    memset(output, 0, 4 * sizeof(*output));
     return;
   }
 
@@ -1752,7 +1752,7 @@
   (void) bd;
 
   if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
-    vpx_memset(output, 0, 8 * sizeof(*output));
+    memset(output, 0, 8 * sizeof(*output));
     return;
   }
 
@@ -2095,7 +2095,7 @@
 
   if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
            | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
-    vpx_memset(output, 0, 16 * sizeof(*output));
+    memset(output, 0, 16 * sizeof(*output));
     return;
   }
 
@@ -2712,7 +2712,7 @@
     if (zero_coeff[0] | zero_coeff[1])
       highbd_idct32(input, outptr, bd);
     else
-      vpx_memset(outptr, 0, sizeof(tran_low_t) * 32);
+      memset(outptr, 0, sizeof(tran_low_t) * 32);
     input += 32;
     outptr += 32;
   }
--- a/vp9/common/vp9_loopfilter.c
+++ b/vp9/common/vp9_loopfilter.c
@@ -222,9 +222,9 @@
     if (block_inside_limit < 1)
       block_inside_limit = 1;
 
-    vpx_memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH);
-    vpx_memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit),
-               SIMD_WIDTH);
+    memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH);
+    memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit),
+           SIMD_WIDTH);
   }
 }
 
@@ -245,7 +245,7 @@
 
   // init hev threshold const vectors
   for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++)
-    vpx_memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
+    memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
 }
 
 void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) {
@@ -276,7 +276,7 @@
     if (!lf->mode_ref_delta_enabled) {
       // we could get rid of this if we assume that deltas are set to
       // zero when not in use; encoder always uses deltas
-      vpx_memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id]));
+      memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id]));
     } else {
       int ref, mode;
       const int intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale;
@@ -727,7 +727,7 @@
     const int h = num_8x8_blocks_high_lookup[block_size];
     int index = shift_y;
     for (i = 0; i < h; i++) {
-      vpx_memset(&lfm->lfl_y[index], filter_level, w);
+      memset(&lfm->lfl_y[index], filter_level, w);
       index += 8;
     }
   }
@@ -801,7 +801,7 @@
     const int h = num_8x8_blocks_high_lookup[block_size];
     int index = shift_y;
     for (i = 0; i < h; i++) {
-      vpx_memset(&lfm->lfl_y[index], filter_level, w);
+      memset(&lfm->lfl_y[index], filter_level, w);
       index += 8;
     }
   }
@@ -1603,7 +1603,7 @@
   lf_data->start = 0;
   lf_data->stop = 0;
   lf_data->y_only = 0;
-  vpx_memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
+  memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
 }
 
 int vp9_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -28,7 +28,7 @@
       cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL;
 
   // Blank the reference vector list
-  vpx_memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
+  memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
 
   // The nearest 2 blocks are treated differently
   // if the size < 8x8 we get the mv from the bmi substructure,
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -408,8 +408,8 @@
   // update the partition context at the end notes. set partition bits
   // of block sizes larger than the current one to be one, and partition
   // bits of smaller block sizes to be zero.
-  vpx_memset(above_ctx, partition_context_lookup[subsize].above, bs);
-  vpx_memset(left_ctx, partition_context_lookup[subsize].left, bs);
+  memset(above_ctx, partition_context_lookup[subsize].above, bs);
+  memset(left_ctx, partition_context_lookup[subsize].left, bs);
 }
 
 static INLINE int partition_plane_context(const MACROBLOCKD *xd,
--- a/vp9/common/vp9_postproc.c
+++ b/vp9/common/vp9_postproc.c
@@ -651,8 +651,8 @@
       return 1;
     }
     ppstate->prev_mi = ppstate->prev_mip + cm->mi_stride + 1;
-    vpx_memset(ppstate->prev_mip, 0,
-               cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
+    memset(ppstate->prev_mip, 0,
+           cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
   }
 
   // Allocate post_proc_buffer_int if needed.
@@ -674,8 +674,8 @@
 
       // Ensure that postproc is set to all 0s so that post proc
       // doesn't pull random data in from edge.
-      vpx_memset(cm->post_proc_buffer_int.buffer_alloc, 128,
-                 cm->post_proc_buffer.frame_size);
+      memset(cm->post_proc_buffer_int.buffer_alloc, 128,
+             cm->post_proc_buffer.frame_size);
     }
   }
 
--- a/vp9/common/vp9_reconintra.c
+++ b/vp9/common/vp9_reconintra.c
@@ -245,7 +245,7 @@
   (void) left;
   (void) bd;
   for (r = 0; r < bs; r++) {
-    vpx_memcpy(dst, above, bs * sizeof(uint16_t));
+    memcpy(dst, above, bs * sizeof(uint16_t));
     dst += stride;
   }
 }
@@ -488,7 +488,7 @@
   (void) left;
 
   for (r = 0; r < bs; r++) {
-    vpx_memcpy(dst, above, bs);
+    memcpy(dst, above, bs);
     dst += stride;
   }
 }
@@ -500,7 +500,7 @@
   (void) above;
 
   for (r = 0; r < bs; r++) {
-    vpx_memset(dst, left[r], bs);
+    memset(dst, left[r], bs);
     dst += stride;
   }
 }
@@ -526,7 +526,7 @@
   (void) left;
 
   for (r = 0; r < bs; r++) {
-    vpx_memset(dst, 128, bs);
+    memset(dst, 128, bs);
     dst += stride;
   }
 }
@@ -543,7 +543,7 @@
   expected_dc = (sum + (bs >> 1)) / bs;
 
   for (r = 0; r < bs; r++) {
-    vpx_memset(dst, expected_dc, bs);
+    memset(dst, expected_dc, bs);
     dst += stride;
   }
 }
@@ -559,7 +559,7 @@
   expected_dc = (sum + (bs >> 1)) / bs;
 
   for (r = 0; r < bs; r++) {
-    vpx_memset(dst, expected_dc, bs);
+    memset(dst, expected_dc, bs);
     dst += stride;
   }
 }
@@ -578,7 +578,7 @@
   expected_dc = (sum + (count >> 1)) / count;
 
   for (r = 0; r < bs; r++) {
-    vpx_memset(dst, expected_dc, bs);
+    memset(dst, expected_dc, bs);
     dst += stride;
   }
 }
@@ -718,24 +718,24 @@
       /* slower path if the block needs border extension */
       if (x0 + 2 * bs <= frame_width) {
         if (right_available && bs == 4) {
-          vpx_memcpy(above_row, above_ref, 2 * bs * sizeof(uint16_t));
+          memcpy(above_row, above_ref, 2 * bs * sizeof(uint16_t));
         } else {
-          vpx_memcpy(above_row, above_ref, bs * sizeof(uint16_t));
+          memcpy(above_row, above_ref, bs * sizeof(uint16_t));
           vpx_memset16(above_row + bs, above_row[bs - 1], bs);
         }
       } else if (x0 + bs <= frame_width) {
         const int r = frame_width - x0;
         if (right_available && bs == 4) {
-          vpx_memcpy(above_row, above_ref, r * sizeof(uint16_t));
+          memcpy(above_row, above_ref, r * sizeof(uint16_t));
           vpx_memset16(above_row + r, above_row[r - 1],
                        x0 + 2 * bs - frame_width);
         } else {
-          vpx_memcpy(above_row, above_ref, bs * sizeof(uint16_t));
+          memcpy(above_row, above_ref, bs * sizeof(uint16_t));
           vpx_memset16(above_row + bs, above_row[bs - 1], bs);
         }
       } else if (x0 <= frame_width) {
         const int r = frame_width - x0;
-        vpx_memcpy(above_row, above_ref, r * sizeof(uint16_t));
+        memcpy(above_row, above_ref, r * sizeof(uint16_t));
         vpx_memset16(above_row + r, above_row[r - 1],
                        x0 + 2 * bs - frame_width);
       }
@@ -746,9 +746,9 @@
       if (bs == 4 && right_available && left_available) {
         const_above_row = above_ref;
       } else {
-        vpx_memcpy(above_row, above_ref, bs * sizeof(uint16_t));
+        memcpy(above_row, above_ref, bs * sizeof(uint16_t));
         if (bs == 4 && right_available)
-          vpx_memcpy(above_row + bs, above_ref + bs, bs * sizeof(uint16_t));
+          memcpy(above_row + bs, above_ref + bs, bs * sizeof(uint16_t));
         else
           vpx_memset16(above_row + bs, above_row[bs - 1], bs);
         // TODO(Peter): this value should probably change for high bitdepth
@@ -830,7 +830,7 @@
           left_col[i] = ref[i * ref_stride - 1];
       }
     } else {
-      vpx_memset(left_col, 129, bs);
+      memset(left_col, 129, bs);
     }
   }
 
@@ -841,12 +841,11 @@
       if (xd->mb_to_right_edge < 0) {
         /* slower path if the block needs border extension */
         if (x0 + bs <= frame_width) {
-          vpx_memcpy(above_row, above_ref, bs);
+          memcpy(above_row, above_ref, bs);
         } else if (x0 <= frame_width) {
           const int r = frame_width - x0;
-          vpx_memcpy(above_row, above_ref, r);
-          vpx_memset(above_row + r, above_row[r - 1],
-                     x0 + bs - frame_width);
+          memcpy(above_row, above_ref, r);
+          memset(above_row + r, above_row[r - 1], x0 + bs - frame_width);
         }
       } else {
         /* faster path if the block does not need extension */
@@ -853,12 +852,12 @@
         if (bs == 4 && right_available && left_available) {
           const_above_row = above_ref;
         } else {
-          vpx_memcpy(above_row, above_ref, bs);
+          memcpy(above_row, above_ref, bs);
         }
       }
       above_row[-1] = left_available ? above_ref[-1] : 129;
     } else {
-      vpx_memset(above_row, 127, bs);
+      memset(above_row, 127, bs);
       above_row[-1] = 127;
     }
   }
@@ -871,26 +870,24 @@
         /* slower path if the block needs border extension */
         if (x0 + 2 * bs <= frame_width) {
           if (right_available && bs == 4) {
-            vpx_memcpy(above_row, above_ref, 2 * bs);
+            memcpy(above_row, above_ref, 2 * bs);
           } else {
-            vpx_memcpy(above_row, above_ref, bs);
-            vpx_memset(above_row + bs, above_row[bs - 1], bs);
+            memcpy(above_row, above_ref, bs);
+            memset(above_row + bs, above_row[bs - 1], bs);
           }
         } else if (x0 + bs <= frame_width) {
           const int r = frame_width - x0;
           if (right_available && bs == 4) {
-            vpx_memcpy(above_row, above_ref, r);
-            vpx_memset(above_row + r, above_row[r - 1],
-                       x0 + 2 * bs - frame_width);
+            memcpy(above_row, above_ref, r);
+            memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width);
           } else {
-            vpx_memcpy(above_row, above_ref, bs);
-            vpx_memset(above_row + bs, above_row[bs - 1], bs);
+            memcpy(above_row, above_ref, bs);
+            memset(above_row + bs, above_row[bs - 1], bs);
           }
         } else if (x0 <= frame_width) {
           const int r = frame_width - x0;
-          vpx_memcpy(above_row, above_ref, r);
-          vpx_memset(above_row + r, above_row[r - 1],
-                     x0 + 2 * bs - frame_width);
+          memcpy(above_row, above_ref, r);
+          memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width);
         }
       } else {
         /* faster path if the block does not need extension */
@@ -897,16 +894,16 @@
         if (bs == 4 && right_available && left_available) {
           const_above_row = above_ref;
         } else {
-          vpx_memcpy(above_row, above_ref, bs);
+          memcpy(above_row, above_ref, bs);
           if (bs == 4 && right_available)
-            vpx_memcpy(above_row + bs, above_ref + bs, bs);
+            memcpy(above_row + bs, above_ref + bs, bs);
           else
-            vpx_memset(above_row + bs, above_row[bs - 1], bs);
+            memset(above_row + bs, above_row[bs - 1], bs);
         }
       }
       above_row[-1] = left_available ? above_ref[-1] : 129;
     } else {
-      vpx_memset(above_row, 127, bs * 2);
+      memset(above_row, 127, bs * 2);
       above_row[-1] = 127;
     }
   }
--- a/vp9/common/vp9_thread_common.c
+++ b/vp9/common/vp9_thread_common.c
@@ -175,7 +175,7 @@
   }
 
   // Initialize cur_sb_col to -1 for all SB rows.
-  vpx_memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows);
+  memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows);
 
   // Set up loopfilter thread data.
   // The decoder is capping num_workers because it has been observed that using
--- a/vp9/decoder/vp9_decodeframe.c
+++ b/vp9/decoder/vp9_decodeframe.c
@@ -276,14 +276,14 @@
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 
     if (eob == 1) {
-      vpx_memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0]));
+      memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0]));
     } else {
       if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
-        vpx_memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
+        memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
       else if (tx_size == TX_32X32 && eob <= 34)
-        vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
+        memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
       else
-        vpx_memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
+        memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
     }
   }
 }
@@ -952,11 +952,11 @@
 
   // Note: this memset assumes above_context[0], [1] and [2]
   // are allocated as part of the same buffer.
-  vpx_memset(cm->above_context, 0,
-             sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols);
+  memset(cm->above_context, 0,
+         sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols);
 
-  vpx_memset(cm->above_seg_context, 0,
-             sizeof(*cm->above_seg_context) * aligned_cols);
+  memset(cm->above_seg_context, 0,
+         sizeof(*cm->above_seg_context) * aligned_cols);
 
   get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
 
@@ -1153,10 +1153,10 @@
 
   // Note: this memset assumes above_context[0], [1] and [2]
   // are allocated as part of the same buffer.
-  vpx_memset(cm->above_context, 0,
-             sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols);
-  vpx_memset(cm->above_seg_context, 0,
-             sizeof(*cm->above_seg_context) * aligned_mi_cols);
+  memset(cm->above_context, 0,
+         sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols);
+  memset(cm->above_seg_context, 0,
+         sizeof(*cm->above_seg_context) * aligned_mi_cols);
 
   // Load tile data into tile_buffers
   get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
@@ -1378,7 +1378,7 @@
 
     setup_frame_size(cm, rb);
     if (pbi->need_resync) {
-      vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+      memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
       pbi->need_resync = 0;
     }
   } else {
@@ -1409,7 +1409,7 @@
       pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
       setup_frame_size(cm, rb);
       if (pbi->need_resync) {
-        vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+        memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
         pbi->need_resync = 0;
       }
     } else if (pbi->need_resync != 1) {  /* Skip if need resync */
--- a/vp9/decoder/vp9_decoder.c
+++ b/vp9/decoder/vp9_decoder.c
@@ -48,10 +48,10 @@
 
 static void vp9_dec_setup_mi(VP9_COMMON *cm) {
   cm->mi = cm->mip + cm->mi_stride + 1;
-  vpx_memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
+  memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
   cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
-  vpx_memset(cm->mi_grid_base, 0,
-             cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
+  memset(cm->mi_grid_base, 0,
+         cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
 }
 
 static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) {
@@ -99,8 +99,8 @@
   once(initialize_dec);
 
   // Initialize the references to not point to any frame buffers.
-  vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
-  vpx_memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map));
+  memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+  memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map));
 
   cm->current_video_frame = 0;
   pbi->ready_for_new_data = 1;
--- a/vp9/encoder/arm/neon/vp9_quantize_neon.c
+++ b/vp9/encoder/arm/neon/vp9_quantize_neon.c
@@ -111,8 +111,8 @@
       *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0);
     }
   } else {
-    vpx_memset(qcoeff_ptr, 0, count * sizeof(int16_t));
-    vpx_memset(dqcoeff_ptr, 0, count * sizeof(int16_t));
+    memset(qcoeff_ptr, 0, count * sizeof(int16_t));
+    memset(dqcoeff_ptr, 0, count * sizeof(int16_t));
     *eob_ptr = 0;
   }
 }
--- a/vp9/encoder/vp9_aq_complexity.c
+++ b/vp9/encoder/vp9_aq_complexity.c
@@ -55,8 +55,7 @@
     const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
 
     // Clear down the segment map.
-    vpx_memset(cpi->segmentation_map, DEFAULT_AQ2_SEG,
-               cm->mi_rows * cm->mi_cols);
+    memset(cpi->segmentation_map, DEFAULT_AQ2_SEG, cm->mi_rows * cm->mi_cols);
 
     vp9_clearall_segfeatures(seg);
 
--- a/vp9/encoder/vp9_aq_cyclicrefresh.c
+++ b/vp9/encoder/vp9_aq_cyclicrefresh.c
@@ -324,7 +324,7 @@
   unsigned char *const seg_map = cpi->segmentation_map;
   int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame;
   int xmis, ymis, x, y;
-  vpx_memset(seg_map, CR_SEGMENT_ID_BASE, cm->mi_rows * cm->mi_cols);
+  memset(seg_map, CR_SEGMENT_ID_BASE, cm->mi_rows * cm->mi_cols);
   sb_cols = (cm->mi_cols + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE;
   sb_rows = (cm->mi_rows + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE;
   sbs_in_frame = sb_cols * sb_rows;
@@ -411,7 +411,7 @@
       (cpi->svc.spatial_layer_id > 0)) {
     // Set segmentation map to 0 and disable.
     unsigned char *const seg_map = cpi->segmentation_map;
-    vpx_memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
+    memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
     vp9_disable_segmentation(&cm->seg);
     if (cm->frame_type == KEY_FRAME)
       cr->sb_index = 0;
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -928,8 +928,8 @@
   const int tile_cols = 1 << cm->log2_tile_cols;
   const int tile_rows = 1 << cm->log2_tile_rows;
 
-  vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) *
-             mi_cols_aligned_to_sb(cm->mi_cols));
+  memset(cm->above_seg_context, 0,
+         sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols));
 
   for (tile_row = 0; tile_row < tile_rows; tile_row++) {
     for (tile_col = 0; tile_col < tile_cols; tile_col++) {
--- a/vp9/encoder/vp9_dct.c
+++ b/vp9/encoder/vp9_dct.c
@@ -417,8 +417,8 @@
   (void)quant_shift_ptr;
   (void)iscan;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     // Quantization pass: All coefficients with index >= zero_flag are
--- a/vp9/encoder/vp9_denoiser.c
+++ b/vp9/encoder/vp9_denoiser.c
@@ -357,7 +357,7 @@
   assert(dest.y_height == src.y_height);
 
   for (r = 0; r < dest.y_height; ++r) {
-    vpx_memcpy(destbuf, srcbuf, dest.y_width);
+    memcpy(destbuf, srcbuf, dest.y_width);
     destbuf += dest.y_stride;
     srcbuf += src.y_stride;
   }
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -1036,8 +1036,8 @@
   }
 
   x->skip = ctx->skip;
-  vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
-             sizeof(uint8_t) * ctx->num_4x4_blk);
+  memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
+         sizeof(uint8_t) * ctx->num_4x4_blk);
 
   if (!output_enabled)
     return;
@@ -1341,12 +1341,12 @@
   int mi_width = num_8x8_blocks_wide_lookup[bsize];
   int mi_height = num_8x8_blocks_high_lookup[bsize];
   for (p = 0; p < MAX_MB_PLANE; p++) {
-    vpx_memcpy(
+    memcpy(
         xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
         a + num_4x4_blocks_wide * p,
         (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
         xd->plane[p].subsampling_x);
-    vpx_memcpy(
+    memcpy(
         xd->left_context[p]
             + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
         l + num_4x4_blocks_high * p,
@@ -1353,10 +1353,10 @@
         (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
         xd->plane[p].subsampling_y);
   }
-  vpx_memcpy(xd->above_seg_context + mi_col, sa,
-             sizeof(*xd->above_seg_context) * mi_width);
-  vpx_memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
-             sizeof(xd->left_seg_context[0]) * mi_height);
+  memcpy(xd->above_seg_context + mi_col, sa,
+         sizeof(*xd->above_seg_context) * mi_width);
+  memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
+         sizeof(xd->left_seg_context[0]) * mi_height);
 }
 
 static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
@@ -1373,12 +1373,12 @@
 
   // buffer the above/left context information of the block in search.
   for (p = 0; p < MAX_MB_PLANE; ++p) {
-    vpx_memcpy(
+    memcpy(
         a + num_4x4_blocks_wide * p,
         xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
         (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
         xd->plane[p].subsampling_x);
-    vpx_memcpy(
+    memcpy(
         l + num_4x4_blocks_high * p,
         xd->left_context[p]
             + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
@@ -1385,10 +1385,10 @@
         (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
         xd->plane[p].subsampling_y);
   }
-  vpx_memcpy(sa, xd->above_seg_context + mi_col,
-             sizeof(*xd->above_seg_context) * mi_width);
-  vpx_memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
-             sizeof(xd->left_seg_context[0]) * mi_height);
+  memcpy(sa, xd->above_seg_context + mi_col,
+         sizeof(*xd->above_seg_context) * mi_width);
+  memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
+         sizeof(xd->left_seg_context[0]) * mi_height);
 }
 
 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
@@ -1595,7 +1595,7 @@
     int use32x32 = 0;
     unsigned int thr = cpi->source_var_thresh;
 
-    vpx_memset(d32, 0, 4 * sizeof(diff));
+    memset(d32, 0, 4 * sizeof(diff));
 
     for (i = 0; i < 4; i++) {
       diff *d16[4];
@@ -2353,11 +2353,11 @@
 }
 
 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
-  vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
+  memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
 }
 
 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
-  vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
+  memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
 }
 
 #if CONFIG_FP_MB_STATS
@@ -2832,8 +2832,8 @@
   int mi_col;
 
   // Initialize the left context for the new SB row
-  vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
-  vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
+  memset(&xd->left_context, 0, sizeof(xd->left_context));
+  memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
 
   // Code each SB in the row
   for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
@@ -2917,11 +2917,11 @@
 
   // Note: this memset assumes above_context[0], [1] and [2]
   // are allocated as part of the same buffer.
-  vpx_memset(xd->above_context[0], 0,
-             sizeof(*xd->above_context[0]) *
-             2 * aligned_mi_cols * MAX_MB_PLANE);
-  vpx_memset(xd->above_seg_context, 0,
-             sizeof(*xd->above_seg_context) * aligned_mi_cols);
+  memset(xd->above_context[0], 0,
+         sizeof(*xd->above_context[0]) *
+         2 * aligned_mi_cols * MAX_MB_PLANE);
+  memset(xd->above_seg_context, 0,
+         sizeof(*xd->above_seg_context) * aligned_mi_cols);
 }
 
 static int check_dual_ref_flags(VP9_COMP *cpi) {
@@ -3594,8 +3594,8 @@
   int mi_col;
 
   // Initialize the left context for the new SB row
-  vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
-  vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
+  memset(&xd->left_context, 0, sizeof(xd->left_context));
+  memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
 
   // Code each SB in the row
   for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
@@ -3689,7 +3689,7 @@
   int sum = 0;
   int i, j;
 
-  vpx_memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
+  memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
 
   for (i = 0; i < cm->mb_rows; i++) {
     for (j = 0; j < cm->mb_cols; j++) {
@@ -4187,7 +4187,7 @@
                    cpi->sf.allow_skip_recode;
 
   if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
-    vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+    memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
 
   x->skip_optimize = ctx->is_coded;
   ctx->is_coded = 1;
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -319,8 +319,8 @@
   UPDATE_RD_COST();
   best = rd_cost1 < rd_cost0;
   final_eob = -1;
-  vpx_memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2)));
-  vpx_memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2)));
+  memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2)));
+  memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2)));
   for (i = next; i < eob; i = next) {
     const int x = tokens[i][best].qc;
     const int rc = scan[i];
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -193,7 +193,7 @@
     unsigned char* const seg_map_8x8 = cpi->segmentation_map;
     const int mi_rows = cpi->common.mi_rows;
     const int mi_cols = cpi->common.mi_cols;
-    vpx_memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
+    memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
     if (cpi->active_map.enabled) {
       int r, c;
       for (r = 0; r < mi_rows; ++r) {
@@ -251,19 +251,19 @@
 static void vp9_enc_setup_mi(VP9_COMMON *cm) {
   int i;
   cm->mi = cm->mip + cm->mi_stride + 1;
-  vpx_memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
+  memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
   cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
   // Clear top border row
-  vpx_memset(cm->prev_mip, 0, sizeof(*cm->prev_mip) * cm->mi_stride);
+  memset(cm->prev_mip, 0, sizeof(*cm->prev_mip) * cm->mi_stride);
   // Clear left border column
   for (i = 1; i < cm->mi_rows + 1; ++i)
-    vpx_memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip));
+    memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip));
 
   cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
   cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
 
-  vpx_memset(cm->mi_grid_base, 0,
-             cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
+  memset(cm->mi_grid_base, 0,
+         cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
 }
 
 static int vp9_enc_alloc_mi(VP9_COMMON *cm, int mi_size) {
@@ -399,11 +399,11 @@
   for (i = 0; i < MAX_LAG_BUFFERS; ++i) {
     vp9_free_frame_buffer(&cpi->svc.scaled_frames[i]);
   }
-  vpx_memset(&cpi->svc.scaled_frames[0], 0,
-             MAX_LAG_BUFFERS * sizeof(cpi->svc.scaled_frames[0]));
+  memset(&cpi->svc.scaled_frames[0], 0,
+         MAX_LAG_BUFFERS * sizeof(cpi->svc.scaled_frames[0]));
 
   vp9_free_frame_buffer(&cpi->svc.empty_frame.img);
-  vpx_memset(&cpi->svc.empty_frame, 0, sizeof(cpi->svc.empty_frame));
+  memset(&cpi->svc.empty_frame, 0, sizeof(cpi->svc.empty_frame));
 }
 
 static void save_coding_context(VP9_COMP *cpi) {
@@ -416,19 +416,19 @@
   // quantizer value is adjusted between loop iterations.
   vp9_copy(cc->nmvjointcost,  cpi->td.mb.nmvjointcost);
 
-  vpx_memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
-             MV_VALS * sizeof(*cpi->nmvcosts[0]));
-  vpx_memcpy(cc->nmvcosts[1], cpi->nmvcosts[1],
-             MV_VALS * sizeof(*cpi->nmvcosts[1]));
-  vpx_memcpy(cc->nmvcosts_hp[0], cpi->nmvcosts_hp[0],
-             MV_VALS * sizeof(*cpi->nmvcosts_hp[0]));
-  vpx_memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1],
-             MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
+  memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
+         MV_VALS * sizeof(*cpi->nmvcosts[0]));
+  memcpy(cc->nmvcosts[1], cpi->nmvcosts[1],
+         MV_VALS * sizeof(*cpi->nmvcosts[1]));
+  memcpy(cc->nmvcosts_hp[0], cpi->nmvcosts_hp[0],
+         MV_VALS * sizeof(*cpi->nmvcosts_hp[0]));
+  memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1],
+         MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
 
   vp9_copy(cc->segment_pred_probs, cm->seg.pred_probs);
 
-  vpx_memcpy(cpi->coding_context.last_frame_seg_map_copy,
-             cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
+  memcpy(cpi->coding_context.last_frame_seg_map_copy,
+         cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
 
   vp9_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
   vp9_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
@@ -444,20 +444,18 @@
   // previous call to vp9_save_coding_context.
   vp9_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
 
-  vpx_memcpy(cpi->nmvcosts[0], cc->nmvcosts[0],
-             MV_VALS * sizeof(*cc->nmvcosts[0]));
-  vpx_memcpy(cpi->nmvcosts[1], cc->nmvcosts[1],
-             MV_VALS * sizeof(*cc->nmvcosts[1]));
-  vpx_memcpy(cpi->nmvcosts_hp[0], cc->nmvcosts_hp[0],
-             MV_VALS * sizeof(*cc->nmvcosts_hp[0]));
-  vpx_memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1],
-             MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
+  memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
+  memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
+  memcpy(cpi->nmvcosts_hp[0], cc->nmvcosts_hp[0],
+         MV_VALS * sizeof(*cc->nmvcosts_hp[0]));
+  memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1],
+         MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
 
   vp9_copy(cm->seg.pred_probs, cc->segment_pred_probs);
 
-  vpx_memcpy(cm->last_frame_seg_map,
-             cpi->coding_context.last_frame_seg_map_copy,
-             (cm->mi_rows * cm->mi_cols));
+  memcpy(cm->last_frame_seg_map,
+         cpi->coding_context.last_frame_seg_map_copy,
+         (cm->mi_rows * cm->mi_cols));
 
   vp9_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
   vp9_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
@@ -476,7 +474,7 @@
   // Disable and clear down for KF
   if (cm->frame_type == KEY_FRAME) {
     // Clear down the global segmentation map
-    vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+    memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
     seg->update_map = 0;
     seg->update_data = 0;
     cpi->static_mb_pct = 0;
@@ -489,7 +487,7 @@
   } else if (cpi->refresh_alt_ref_frame) {
     // If this is an alt ref frame
     // Clear down the global segmentation map
-    vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+    memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
     seg->update_map = 0;
     seg->update_data = 0;
     cpi->static_mb_pct = 0;
@@ -550,7 +548,7 @@
 
         vp9_disable_segmentation(seg);
 
-        vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+        memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
 
         seg->update_map = 0;
         seg->update_data = 0;
@@ -2606,9 +2604,9 @@
 
       ref_cnt_fb(pool->frame_bufs,
                  &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
-      vpx_memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
-                 cpi->interp_filter_selected[0],
-                 sizeof(cpi->interp_filter_selected[0]));
+      memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
+             cpi->interp_filter_selected[0],
+             sizeof(cpi->interp_filter_selected[0]));
     }
 
     if (cpi->refresh_golden_frame) {
@@ -2615,13 +2613,13 @@
       ref_cnt_fb(pool->frame_bufs,
                  &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
       if (!cpi->rc.is_src_frame_alt_ref)
-        vpx_memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
-                   cpi->interp_filter_selected[0],
-                   sizeof(cpi->interp_filter_selected[0]));
+        memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
+               cpi->interp_filter_selected[0],
+               sizeof(cpi->interp_filter_selected[0]));
       else
-        vpx_memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
-                   cpi->interp_filter_selected[ALTREF_FRAME],
-                   sizeof(cpi->interp_filter_selected[ALTREF_FRAME]));
+        memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
+               cpi->interp_filter_selected[ALTREF_FRAME],
+               sizeof(cpi->interp_filter_selected[ALTREF_FRAME]));
     }
   }
 
@@ -2629,9 +2627,9 @@
     ref_cnt_fb(pool->frame_bufs,
                &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
     if (!cpi->rc.is_src_frame_alt_ref)
-      vpx_memcpy(cpi->interp_filter_selected[LAST_FRAME],
-                 cpi->interp_filter_selected[0],
-                 sizeof(cpi->interp_filter_selected[0]));
+      memcpy(cpi->interp_filter_selected[LAST_FRAME],
+             cpi->interp_filter_selected[0],
+             sizeof(cpi->interp_filter_selected[0]));
   }
 #if CONFIG_VP9_TEMPORAL_DENOISING
   if (cpi->oxcf.noise_sensitivity > 0) {
@@ -3517,8 +3515,8 @@
   vp9_clear_system_state();
 
 #if CONFIG_INTERNAL_STATS
-  vpx_memset(cpi->mode_chosen_counts, 0,
-             MAX_MODES * sizeof(*cpi->mode_chosen_counts));
+  memset(cpi->mode_chosen_counts, 0,
+         MAX_MODES * sizeof(*cpi->mode_chosen_counts));
 #endif
 
   if (cpi->sf.recode_loop == DISALLOW_RECODE) {
--- a/vp9/encoder/vp9_ethread.c
+++ b/vp9/encoder/vp9_ethread.c
@@ -124,8 +124,8 @@
       thread_data->td->rd_counts = cpi->td.rd_counts;
     }
     if (thread_data->td->counts != &cpi->common.counts) {
-      vpx_memcpy(thread_data->td->counts, &cpi->common.counts,
-                 sizeof(cpi->common.counts));
+      memcpy(thread_data->td->counts, &cpi->common.counts,
+             sizeof(cpi->common.counts));
     }
 
     // Handle use_nonrd_pick_mode case.
--- a/vp9/encoder/vp9_extend.c
+++ b/vp9/encoder/vp9_extend.c
@@ -27,9 +27,9 @@
   uint8_t *dst_ptr2 = dst + w;
 
   for (i = 0; i < h; i++) {
-    vpx_memset(dst_ptr1, src_ptr1[0], extend_left);
-    vpx_memcpy(dst_ptr1 + extend_left, src_ptr1, w);
-    vpx_memset(dst_ptr2, src_ptr2[0], extend_right);
+    memset(dst_ptr1, src_ptr1[0], extend_left);
+    memcpy(dst_ptr1 + extend_left, src_ptr1, w);
+    memset(dst_ptr2, src_ptr2[0], extend_right);
     src_ptr1 += src_pitch;
     src_ptr2 += src_pitch;
     dst_ptr1 += dst_pitch;
@@ -45,12 +45,12 @@
   linesize = extend_left + extend_right + w;
 
   for (i = 0; i < extend_top; i++) {
-    vpx_memcpy(dst_ptr1, src_ptr1, linesize);
+    memcpy(dst_ptr1, src_ptr1, linesize);
     dst_ptr1 += dst_pitch;
   }
 
   for (i = 0; i < extend_bottom; i++) {
-    vpx_memcpy(dst_ptr2, src_ptr2, linesize);
+    memcpy(dst_ptr2, src_ptr2, linesize);
     dst_ptr2 += dst_pitch;
   }
 }
@@ -73,7 +73,7 @@
 
   for (i = 0; i < h; i++) {
     vpx_memset16(dst_ptr1, src_ptr1[0], extend_left);
-    vpx_memcpy(dst_ptr1 + extend_left, src_ptr1, w * sizeof(uint16_t));
+    memcpy(dst_ptr1 + extend_left, src_ptr1, w * sizeof(uint16_t));
     vpx_memset16(dst_ptr2, src_ptr2[0], extend_right);
     src_ptr1 += src_pitch;
     src_ptr2 += src_pitch;
@@ -90,12 +90,12 @@
   linesize = extend_left + extend_right + w;
 
   for (i = 0; i < extend_top; i++) {
-    vpx_memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t));
+    memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t));
     dst_ptr1 += dst_pitch;
   }
 
   for (i = 0; i < extend_bottom; i++) {
-    vpx_memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t));
+    memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t));
     dst_ptr2 += dst_pitch;
   }
 }
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -389,9 +389,8 @@
   cpi->mbgraph_n_frames = n_frames;
   for (i = 0; i < n_frames; i++) {
     MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
-    vpx_memset(frame_stats->mb_stats, 0,
-               cm->mb_rows * cm->mb_cols *
-               sizeof(*cpi->mbgraph_stats[i].mb_stats));
+    memset(frame_stats->mb_stats, 0,
+           cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
   }
 
   // do motion search to find contribution of each reference to data
--- a/vp9/encoder/vp9_picklpf.c
+++ b/vp9/encoder/vp9_picklpf.c
@@ -81,7 +81,7 @@
   int64_t ss_err[MAX_LOOP_FILTER + 1];
 
   // Set each entry to -1
-  vpx_memset(ss_err, 0xFF, sizeof(ss_err));
+  memset(ss_err, 0xFF, sizeof(ss_err));
 
   //  Make a copy of the unfiltered / processed recon buffer
   vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -51,7 +51,7 @@
   int const_motion = 0;
 
   // Blank the reference vector list
-  vpx_memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
+  memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
 
   // The nearest 2 blocks are treated differently
   // if the size < 8x8 we get the mv from the bmi substructure,
@@ -957,7 +957,7 @@
   mbmi->ref_frame[0] = INTRA_FRAME;
   mbmi->mv[0].as_int = INVALID_MV;
   mbmi->uv_mode = DC_PRED;
-  vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+  memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
 
   // Change the limit of this loop to add other intra prediction
   // mode tests.
@@ -1669,7 +1669,7 @@
         const struct buf_2d orig_src = p->src;
         const struct buf_2d orig_dst = pd->dst;
         struct buf_2d orig_pre[2];
-        vpx_memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
+        memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
 
         // set buffer pointers for sub8x8 motion search.
         p->src.buf =
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -30,8 +30,8 @@
   const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
   int tmp, eob = -1;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX);
@@ -52,8 +52,8 @@
                             const int16_t dequant_ptr, uint16_t *eob_ptr) {
   int eob = -1;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     const int rc = 0;
@@ -84,8 +84,8 @@
   const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
   int tmp, eob = -1;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
 
@@ -112,8 +112,8 @@
   const int n_coeffs = 1024;
   int eob = -1;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     const int rc = 0;
@@ -148,8 +148,8 @@
   (void)quant_shift_ptr;
   (void)iscan;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     // Quantization pass: All coefficients with index >= zero_flag are
@@ -195,8 +195,8 @@
   (void)quant_shift_ptr;
   (void)iscan;
 
-  vpx_memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     // Quantization pass: All coefficients with index >= zero_flag are
@@ -238,8 +238,8 @@
   (void)quant_shift_ptr;
   (void)iscan;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     for (i = 0; i < n_coeffs; i++) {
@@ -281,8 +281,8 @@
   (void)quant_shift_ptr;
   (void)iscan;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     for (i = 0; i < n_coeffs; i++) {
@@ -321,8 +321,8 @@
   const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
   (void)iscan;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     // Pre-scan pass
@@ -373,8 +373,8 @@
   const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
   (void)iscan;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     // Pre-scan pass
@@ -431,8 +431,8 @@
   int i, eob = -1;
   (void)iscan;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     // Pre-scan pass
@@ -490,8 +490,8 @@
   int i, eob = -1;
   (void)iscan;
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     // Pre-scan pass
--- a/vp9/encoder/vp9_rd.c
+++ b/vp9/encoder/vp9_rd.c
@@ -419,8 +419,8 @@
   int i;
   switch (tx_size) {
     case TX_4X4:
-      vpx_memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w);
-      vpx_memcpy(t_left, left, sizeof(ENTROPY_CONTEXT) * num_4x4_h);
+      memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w);
+      memcpy(t_left, left, sizeof(ENTROPY_CONTEXT) * num_4x4_h);
       break;
     case TX_8X8:
       for (i = 0; i < num_4x4_w; i += 2)
@@ -616,8 +616,7 @@
        {2000, 2000, 2000, 4000, 4000, 2000}};
   RD_OPT *const rd = &cpi->rd;
   const int idx = cpi->oxcf.mode == BEST;
-  vpx_memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx],
-             sizeof(thresh_mult[idx]));
+  memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
 }
 
 void vp9_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -728,7 +728,7 @@
   assert(bs == xd->mi[0]->mbmi.sb_type);
 
   if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
-    vpx_memset(txfm_cache, 0, TX_MODES * sizeof(int64_t));
+    memset(txfm_cache, 0, TX_MODES * sizeof(int64_t));
     choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
                            bs);
   } else {
@@ -790,8 +790,8 @@
 
   assert(ib < 4);
 
-  vpx_memcpy(ta, a, sizeof(ta));
-  vpx_memcpy(tl, l, sizeof(tl));
+  memcpy(ta, a, sizeof(ta));
+  memcpy(tl, l, sizeof(tl));
   xd->mi[0]->mbmi.tx_size = TX_4X4;
 
 #if CONFIG_VP9_HIGHBITDEPTH
@@ -812,8 +812,8 @@
             continue;
       }
 
-      vpx_memcpy(tempa, ta, sizeof(ta));
-      vpx_memcpy(templ, tl, sizeof(tl));
+      memcpy(tempa, ta, sizeof(ta));
+      memcpy(templ, tl, sizeof(tl));
 
       for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
         for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
@@ -873,12 +873,12 @@
         *bestdistortion = distortion;
         best_rd = this_rd;
         *best_mode = mode;
-        vpx_memcpy(a, tempa, sizeof(tempa));
-        vpx_memcpy(l, templ, sizeof(templ));
+        memcpy(a, tempa, sizeof(tempa));
+        memcpy(l, templ, sizeof(templ));
         for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
-          vpx_memcpy(best_dst16 + idy * 8,
-                     CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
-                     num_4x4_blocks_wide * 4 * sizeof(uint16_t));
+          memcpy(best_dst16 + idy * 8,
+                 CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
+                 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
         }
       }
     next_highbd:
@@ -888,9 +888,9 @@
       return best_rd;
 
     for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
-      vpx_memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
-                 best_dst16 + idy * 8,
-                 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
+      memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
+             best_dst16 + idy * 8,
+             num_4x4_blocks_wide * 4 * sizeof(uint16_t));
     }
 
     return best_rd;
@@ -913,8 +913,8 @@
           continue;
     }
 
-    vpx_memcpy(tempa, ta, sizeof(ta));
-    vpx_memcpy(templ, tl, sizeof(tl));
+    memcpy(tempa, ta, sizeof(ta));
+    memcpy(templ, tl, sizeof(tl));
 
     for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
       for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
@@ -971,11 +971,11 @@
       *bestdistortion = distortion;
       best_rd = this_rd;
       *best_mode = mode;
-      vpx_memcpy(a, tempa, sizeof(tempa));
-      vpx_memcpy(l, templ, sizeof(templ));
+      memcpy(a, tempa, sizeof(tempa));
+      memcpy(l, templ, sizeof(templ));
       for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
-        vpx_memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
-                   num_4x4_blocks_wide * 4);
+        memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
+               num_4x4_blocks_wide * 4);
     }
   next:
     {}
@@ -985,8 +985,8 @@
     return best_rd;
 
   for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
-    vpx_memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
-               num_4x4_blocks_wide * 4);
+    memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
+           num_4x4_blocks_wide * 4);
 
   return best_rd;
 }
@@ -1011,8 +1011,8 @@
   ENTROPY_CONTEXT t_above[4], t_left[4];
   const int *bmode_costs = cpi->mbmode_cost;
 
-  vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
-  vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
+  memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
+  memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
 
   // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
   for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
@@ -1084,7 +1084,7 @@
     for (i = 0; i < TX_MODES; i++)
       tx_cache[i] = INT64_MAX;
 
-  vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+  memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
   /* Y Search for intra prediction mode */
   for (mode = DC_PRED; mode <= TM_PRED; mode++) {
     int64_t local_tx_cache[TX_MODES];
@@ -1201,7 +1201,7 @@
   int this_rate_tokenonly, this_rate, s;
   int64_t this_distortion, this_sse;
 
-  vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+  memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
   for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
     if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
       continue;
@@ -1239,7 +1239,7 @@
   int64_t unused;
 
   x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED;
-  vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+  memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
   super_block_uvrd(cpi, x, rate_tokenonly, distortion,
                    skippable, &unused, bsize, INT64_MAX);
   *rate = *rate_tokenonly + cpi->intra_uv_mode_cost[cm->frame_type][DC_PRED];
@@ -1321,8 +1321,7 @@
 
   for (idy = 0; idy < num_4x4_blocks_high; ++idy)
     for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
-      vpx_memmove(&mic->bmi[i + idy * 2 + idx],
-                  &mic->bmi[i], sizeof(mic->bmi[i]));
+      memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i]));
 
   return cost_mv_ref(cpi, mode, mbmi->mode_context[mbmi->ref_frame[0]]) +
             thismvcost;
@@ -1777,8 +1776,8 @@
   for (i = 0; i < 4; i++)
     bsi->modes[i] = ZEROMV;
 
-  vpx_memcpy(t_above, pd->above_context, sizeof(t_above));
-  vpx_memcpy(t_left, pd->left_context, sizeof(t_left));
+  memcpy(t_above, pd->above_context, sizeof(t_above));
+  memcpy(t_left, pd->left_context, sizeof(t_left));
 
   // 64 makes this threshold really big effectively
   // making it so that we very rarely check mvs on
@@ -1820,11 +1819,11 @@
                                 this_mode, mbmi->ref_frame))
           continue;
 
-        vpx_memcpy(orig_pre, pd->pre, sizeof(orig_pre));
-        vpx_memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
-                   sizeof(bsi->rdstat[i][mode_idx].ta));
-        vpx_memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
-                   sizeof(bsi->rdstat[i][mode_idx].tl));
+        memcpy(orig_pre, pd->pre, sizeof(orig_pre));
+        memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
+               sizeof(bsi->rdstat[i][mode_idx].ta));
+        memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
+               sizeof(bsi->rdstat[i][mode_idx].tl));
 
         // motion search for newmv (single predictor case only)
         if (!has_second_rf && this_mode == NEWMV &&
@@ -2000,8 +1999,8 @@
 
           if (!subpelmv && have_ref &&
               ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
-            vpx_memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
-                       sizeof(SEG_RDSTAT));
+            memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
+                   sizeof(SEG_RDSTAT));
             if (num_4x4_blocks_wide > 1)
               bsi->rdstat[i + 1][mode_idx].eobs =
                   ref_bsi->rdstat[i + 1][mode_idx].eobs;
@@ -2053,8 +2052,8 @@
       }
 
       mode_idx = INTER_OFFSET(mode_selected);
-      vpx_memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
-      vpx_memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
+      memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
+      memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
 
       set_and_cost_bmi_mvs(cpi, xd, i, mode_selected, mode_mv[mode_selected],
                            frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
@@ -2121,8 +2120,8 @@
   int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
                                              SEG_LVL_REF_FRAME);
   if (seg_ref_active) {
-    vpx_memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
-    vpx_memset(ref_costs_comp,   0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
+    memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
+    memset(ref_costs_comp,   0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
     *comp_mode_p = 128;
   } else {
     vp9_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
@@ -2191,9 +2190,9 @@
   ctx->comp_pred_diff   = (int)comp_pred_diff[COMPOUND_REFERENCE];
   ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
 
-  vpx_memcpy(ctx->tx_rd_diff, tx_size_diff, sizeof(ctx->tx_rd_diff));
-  vpx_memcpy(ctx->best_filter_diff, best_filter_diff,
-             sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
+  memcpy(ctx->tx_rd_diff, tx_size_diff, sizeof(ctx->tx_rd_diff));
+  memcpy(ctx->best_filter_diff, best_filter_diff,
+         sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
 }
 
 static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
@@ -2659,8 +2658,8 @@
 
           skip_txfm_sb = tmp_skip_sb;
           skip_sse_sb = tmp_skip_sse;
-          vpx_memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
-          vpx_memcpy(bsse, x->bsse, sizeof(bsse));
+          memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
+          memcpy(bsse, x->bsse, sizeof(bsse));
         }
       }
       restore_dst_buf(xd, orig_dst, orig_dst_stride);
@@ -2690,8 +2689,8 @@
     model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
                     &skip_txfm_sb, &skip_sse_sb);
     rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
-    vpx_memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
-    vpx_memcpy(bsse, x->bsse, sizeof(bsse));
+    memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
+    memcpy(bsse, x->bsse, sizeof(bsse));
   }
 
   if (!is_comp_pred)
@@ -2701,7 +2700,7 @@
     if (is_comp_pred)
       if (single_skippable[this_mode][refs[0]] &&
           single_skippable[this_mode][refs[1]])
-        vpx_memset(skip_txfm, 1, sizeof(skip_txfm));
+        memset(skip_txfm, 1, sizeof(skip_txfm));
 
   if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
     // if current pred_error modeled rd is substantially more than the best
@@ -2715,8 +2714,8 @@
   if (cm->interp_filter == SWITCHABLE)
     *rate2 += rs;
 
-  vpx_memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
-  vpx_memcpy(x->bsse, bsse, sizeof(bsse));
+  memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
+  memcpy(x->bsse, bsse, sizeof(bsse));
 
   if (!skip_txfm_sb) {
     int skippable_y, skippable_uv;
@@ -3266,7 +3265,7 @@
     if (ref_frame == INTRA_FRAME) {
       TX_SIZE uv_tx;
       struct macroblockd_plane *const pd = &xd->plane[1];
-      vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+      memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
       super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
                       NULL, bsize, tx_cache, best_rd);
       if (rate_y == INT_MAX)
@@ -3390,8 +3389,8 @@
 
         if (!x->select_tx_size)
           swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
-        vpx_memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
-                   sizeof(uint8_t) * ctx->num_4x4_blk);
+        memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
+               sizeof(uint8_t) * ctx->num_4x4_blk);
 
         // TODO(debargha): enhance this test with a better distortion prediction
         // based on qp, activity mask and history
@@ -3751,7 +3750,7 @@
   int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
 
   x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
-  vpx_memset(x->zcoeff_blk[TX_4X4], 0, 4);
+  memset(x->zcoeff_blk[TX_4X4], 0, 4);
   vp9_zero(best_mbmode);
 
   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
@@ -4075,7 +4074,7 @@
         // then dont bother looking at UV
         vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
                                         BLOCK_8X8);
-        vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+        memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
         if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
                               &uv_sse, BLOCK_8X8, tmp_best_rdu))
           continue;
@@ -4156,8 +4155,8 @@
         best_skip2 = this_skip2;
         if (!x->select_tx_size)
           swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
-        vpx_memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
-                   sizeof(uint8_t) * ctx->num_4x4_blk);
+        memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
+               sizeof(uint8_t) * ctx->num_4x4_blk);
 
         for (i = 0; i < 4; i++)
           best_bmodes[i] = xd->mi[0]->bmi[i];
@@ -4281,8 +4280,7 @@
       xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
   } else {
     for (i = 0; i < 4; ++i)
-      vpx_memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i],
-                 sizeof(b_mode_info));
+      memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
 
     mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
     mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -36,7 +36,7 @@
                           unsigned char abs_delta) {
   seg->abs_delta = abs_delta;
 
-  vpx_memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
+  memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
 }
 void vp9_disable_segfeature(struct segmentation *seg, int segment_id,
                             SEG_LVL_FEATURES feature_id) {
@@ -213,8 +213,8 @@
 
   // Set default state for the segment tree probabilities and the
   // temporal coding probabilities
-  vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
-  vpx_memset(seg->pred_probs, 255, sizeof(seg->pred_probs));
+  memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
+  memset(seg->pred_probs, 255, sizeof(seg->pred_probs));
 
   // First of all generate stats regarding how well the last segment map
   // predicts this one
@@ -263,11 +263,11 @@
   // Now choose which coding method to use.
   if (t_pred_cost < no_pred_cost) {
     seg->temporal_update = 1;
-    vpx_memcpy(seg->tree_probs, t_pred_tree, sizeof(t_pred_tree));
-    vpx_memcpy(seg->pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
+    memcpy(seg->tree_probs, t_pred_tree, sizeof(t_pred_tree));
+    memcpy(seg->pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
   } else {
     seg->temporal_update = 0;
-    vpx_memcpy(seg->tree_probs, no_pred_tree, sizeof(no_pred_tree));
+    memcpy(seg->tree_probs, no_pred_tree, sizeof(no_pred_tree));
   }
 }
 
@@ -276,6 +276,6 @@
   seg->enabled = 0;
   seg->update_map = 0;
   seg->update_data = 0;
-  vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
+  memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
   vp9_clearall_segfeatures(seg);
 }
--- a/vp9/encoder/vp9_skin_detection.c
+++ b/vp9/encoder/vp9_skin_detection.c
@@ -60,7 +60,7 @@
   const int src_ystride = cpi->Source->y_stride;
   const int src_uvstride = cpi->Source->uv_stride;
   YV12_BUFFER_CONFIG skinmap;
-  vpx_memset(&skinmap, 0, sizeof(YV12_BUFFER_CONFIG));
+  memset(&skinmap, 0, sizeof(YV12_BUFFER_CONFIG));
   if (vp9_alloc_frame_buffer(&skinmap, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
                                VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment)) {
@@ -67,7 +67,7 @@
       vp9_free_frame_buffer(&skinmap);
       return;
   }
-  vpx_memset(skinmap.buffer_alloc, 128, skinmap.frame_size);
+  memset(skinmap.buffer_alloc, 128, skinmap.frame_size);
   y = skinmap.y_buffer;
   // Loop through 8x8 blocks and set skin map based on center pixel of block.
   // Set y to white for skin block, otherwise set to source with gray scale.
--- a/vp9/encoder/vp9_subexp.c
+++ b/vp9/encoder/vp9_subexp.c
@@ -146,7 +146,7 @@
   int newp;
   vp9_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
   vp9_model_to_full_probs(oldp, oldplist);
-  vpx_memcpy(newplist, oldp, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
+  memcpy(newplist, oldp, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
   for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
     old_b += cost_branch256(ct + 2 * i, oldplist[i]);
   old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]);
--- a/vp9/encoder/vp9_svc_layercontext.c
+++ b/vp9/encoder/vp9_svc_layercontext.c
@@ -46,8 +46,8 @@
                            "Failed to allocate empty frame for multiple frame "
                            "contexts");
 
-      vpx_memset(cpi->svc.empty_frame.img.buffer_alloc, 0x80,
-                 cpi->svc.empty_frame.img.buffer_alloc_sz);
+      memset(cpi->svc.empty_frame.img.buffer_alloc, 0x80,
+             cpi->svc.empty_frame.img.buffer_alloc_sz);
       cpi->svc.empty_frame_width = cpi->common.width;
       cpi->svc.empty_frame_height = cpi->common.height;
     }
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -329,8 +329,8 @@
       int i, j, k;
       int stride;
 
-      vpx_memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
-      vpx_memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
+      memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
+      memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
 
       cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND));
       cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16)
--- a/vp9/encoder/x86/vp9_denoiser_sse2.c
+++ b/vp9/encoder/x86/vp9_denoiser_sse2.c
@@ -150,25 +150,24 @@
   const uint8_t shift = (width == 4) ? 2 : 1;
 
   for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) {
-    vpx_memcpy(sig_buffer[r], sig, width);
-    vpx_memcpy(sig_buffer[r] + width, sig + sig_stride, width);
-    vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, width);
-    vpx_memcpy(mc_running_buffer[r] + width,
-               mc_running_avg_y + mc_avg_y_stride, width);
-    vpx_memcpy(running_buffer[r], running_avg_y, width);
-    vpx_memcpy(running_buffer[r] + width,
-               running_avg_y + avg_y_stride, width);
+    memcpy(sig_buffer[r], sig, width);
+    memcpy(sig_buffer[r] + width, sig + sig_stride, width);
+    memcpy(mc_running_buffer[r], mc_running_avg_y, width);
+    memcpy(mc_running_buffer[r] + width,
+           mc_running_avg_y + mc_avg_y_stride, width);
+    memcpy(running_buffer[r], running_avg_y, width);
+    memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width);
     if (width == 4) {
-      vpx_memcpy(sig_buffer[r] + width * 2, sig + sig_stride * 2, width);
-      vpx_memcpy(sig_buffer[r] + width * 3, sig + sig_stride * 3, width);
-      vpx_memcpy(mc_running_buffer[r] + width * 2,
-                 mc_running_avg_y + mc_avg_y_stride * 2, width);
-      vpx_memcpy(mc_running_buffer[r] + width * 3,
-                 mc_running_avg_y + mc_avg_y_stride * 3, width);
-      vpx_memcpy(running_buffer[r] + width * 2,
-                 running_avg_y + avg_y_stride * 2, width);
-      vpx_memcpy(running_buffer[r] + width * 3,
-                 running_avg_y + avg_y_stride * 3, width);
+      memcpy(sig_buffer[r] + width * 2, sig + sig_stride * 2, width);
+      memcpy(sig_buffer[r] + width * 3, sig + sig_stride * 3, width);
+      memcpy(mc_running_buffer[r] + width * 2,
+             mc_running_avg_y + mc_avg_y_stride * 2, width);
+      memcpy(mc_running_buffer[r] + width * 3,
+             mc_running_avg_y + mc_avg_y_stride * 3, width);
+      memcpy(running_buffer[r] + width * 2,
+             running_avg_y + avg_y_stride * 2, width);
+      memcpy(running_buffer[r] + width * 3,
+             running_avg_y + avg_y_stride * 3, width);
     }
     acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r],
                                       mc_running_buffer[r],
@@ -175,13 +174,13 @@
                                       running_buffer[r],
                                       &k_0, &k_4, &k_8, &k_16,
                                       &l3, &l32, &l21, acc_diff);
-    vpx_memcpy(running_avg_y, running_buffer[r], width);
-    vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width);
+    memcpy(running_avg_y, running_buffer[r], width);
+    memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width);
     if (width == 4) {
-      vpx_memcpy(running_avg_y + avg_y_stride * 2,
-                 running_buffer[r] + width * 2, width);
-      vpx_memcpy(running_avg_y + avg_y_stride * 3,
-                 running_buffer[r] + width * 3, width);
+      memcpy(running_avg_y + avg_y_stride * 2,
+             running_buffer[r] + width * 2, width);
+      memcpy(running_avg_y + avg_y_stride * 3,
+             running_buffer[r] + width * 3, width);
     }
     // Update pointers for next iteration.
     sig += (sig_stride << shift);
@@ -213,14 +212,14 @@
           acc_diff = vp9_denoiser_adj_16x1_sse2(
               sig_buffer[r], mc_running_buffer[r], running_buffer[r],
               k_0, k_delta, acc_diff);
-          vpx_memcpy(running_avg_y, running_buffer[r], width);
-          vpx_memcpy(running_avg_y + avg_y_stride,
-                     running_buffer[r] + width, width);
+          memcpy(running_avg_y, running_buffer[r], width);
+          memcpy(running_avg_y + avg_y_stride,
+                 running_buffer[r] + width, width);
           if (width == 4) {
-            vpx_memcpy(running_avg_y + avg_y_stride * 2,
-                       running_buffer[r] + width * 2, width);
-            vpx_memcpy(running_avg_y + avg_y_stride * 3,
-                       running_buffer[r] + width * 3, width);
+            memcpy(running_avg_y + avg_y_stride * 2,
+                   running_buffer[r] + width * 2, width);
+            memcpy(running_avg_y + avg_y_stride * 3,
+                   running_buffer[r] + width * 3, width);
           }
           // Update pointers for next iteration.
           running_avg_y += (avg_y_stride << shift);
--- a/vp9/encoder/x86/vp9_highbd_quantize_intrin_sse2.c
+++ b/vp9/encoder/x86/vp9_highbd_quantize_intrin_sse2.c
@@ -44,8 +44,8 @@
 
   (void)scan;
 
-  vpx_memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     // Pre-scan pass
@@ -132,8 +132,8 @@
   nzbins[0] = _mm_sub_epi32(nzbins[0], zbins[0]);
   nzbins[1] = _mm_sub_epi32(nzbins[1], zbins[1]);
 
-  vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
-  vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
 
   if (!skip_block) {
     // Pre-scan pass
--- a/vp9/vp9_dx_iface.c
+++ b/vp9/vp9_dx_iface.c
@@ -527,7 +527,7 @@
       frame_worker_data->scratch_buffer_size = data_sz;
     }
     frame_worker_data->data_size = data_sz;
-    vpx_memcpy(frame_worker_data->scratch_buffer, *data, data_sz);
+    memcpy(frame_worker_data->scratch_buffer, *data, data_sz);
 
     frame_worker_data->frame_decoded = 0;
     frame_worker_data->frame_context_ready = 0;
--- a/vpx_mem/vpx_mem.c
+++ b/vpx_mem/vpx_mem.c
@@ -91,14 +91,6 @@
   }
 }
 
-void *vpx_memcpy(void *dest, const void *source, size_t length) {
-  return memcpy(dest, source, length);
-}
-
-void *vpx_memset(void *dest, int val, size_t length) {
-  return memset(dest, val, length);
-}
-
 #if CONFIG_VP9 && CONFIG_VP9_HIGHBITDEPTH
 void *vpx_memset16(void *dest, int val, size_t length) {
   int i;
@@ -109,7 +101,3 @@
   return orig;
 }
 #endif  // CONFIG_VP9 && CONFIG_VP9_HIGHBITDEPTH
-
-void *vpx_memmove(void *dest, const void *src, size_t count) {
-  return memmove(dest, src, count);
-}
--- a/vpx_mem/vpx_mem.h
+++ b/vpx_mem/vpx_mem.h
@@ -30,19 +30,11 @@
   void *vpx_realloc(void *memblk, size_t size);
   void vpx_free(void *memblk);
 
-  void *vpx_memcpy(void *dest, const void *src, size_t length);
-  void *vpx_memset(void *dest, int val, size_t length);
 #if CONFIG_VP9 && CONFIG_VP9_HIGHBITDEPTH
   void *vpx_memset16(void *dest, int val, size_t length);
 #endif
-  void *vpx_memmove(void *dest, const void *src, size_t count);
 
-#ifndef __VPX_MEM_C__
-# include <string.h>
-# define vpx_memcpy  memcpy
-# define vpx_memset  memset
-# define vpx_memmove memmove
-#endif
+#include <string.h>
 
 #ifdef VPX_MEM_PLTFRM
 # include VPX_MEM_PLTFRM
--- a/vpx_scale/generic/gen_scalers.c
+++ b/vpx_scale/generic/gen_scalers.c
@@ -215,7 +215,7 @@
                                    unsigned int dest_width) {
   (void) dest_pitch;
   (void) src_pitch;
-  vpx_memcpy(dest, source, dest_width);
+  memcpy(dest, source, dest_width);
 }
 
 void vp8_vertical_band_2_1_scale_i_c(unsigned char *source,
--- a/vpx_scale/generic/vpx_scale.c
+++ b/vpx_scale/generic/vpx_scale.c
@@ -379,7 +379,7 @@
       vert_band_scale(temp_area + dest_pitch, dest_pitch, dest, dest_pitch, dest_width);
 
       if (interpolation)
-        vpx_memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_width);
+        memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_width);
 
       /* Next band... */
       source += (unsigned long) source_band_height  * source_pitch;
@@ -432,7 +432,7 @@
                  temp_area + i * dest_pitch, 1, hratio, dest_width);
       } else { /*  Duplicate the last row */
         /* copy temp_area row 0 over from last row in the past */
-        vpx_memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch);
+        memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch);
       }
     }
 
@@ -443,7 +443,7 @@
     }
 
     /* copy temp_area row 0 over from last row in the past */
-    vpx_memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_pitch);
+    memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_pitch);
 
     /* move to the next band */
     source += source_band_height * source_pitch;
@@ -498,11 +498,11 @@
 
   if (dw < (int)dst->y_width)
     for (i = 0; i < dh; i++)
-      vpx_memset(dst->y_buffer + i * dst->y_stride + dw - 1, dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1);
+      memset(dst->y_buffer + i * dst->y_stride + dw - 1, dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1);
 
   if (dh < (int)dst->y_height)
     for (i = dh - 1; i < (int)dst->y_height; i++)
-      vpx_memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
+      memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
 
   Scale2D((unsigned char *) src->u_buffer, src->uv_stride, src->uv_width, src->uv_height,
           (unsigned char *) dst->u_buffer, dst->uv_stride, dw / 2, dh / 2,
@@ -510,11 +510,11 @@
 
   if (dw / 2 < (int)dst->uv_width)
     for (i = 0; i < dst->uv_height; i++)
-      vpx_memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1, dst->u_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
+      memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1, dst->u_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
 
   if (dh / 2 < (int)dst->uv_height)
     for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
-      vpx_memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
+      memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
 
   Scale2D((unsigned char *) src->v_buffer, src->uv_stride, src->uv_width, src->uv_height,
           (unsigned char *) dst->v_buffer, dst->uv_stride, dw / 2, dh / 2,
@@ -522,9 +522,9 @@
 
   if (dw / 2 < (int)dst->uv_width)
     for (i = 0; i < dst->uv_height; i++)
-      vpx_memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1, dst->v_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
+      memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1, dst->v_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
 
   if (dh / 2 < (int) dst->uv_height)
     for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
-      vpx_memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
+      memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
 }
--- a/vpx_scale/generic/yv12config.c
+++ b/vpx_scale/generic/yv12config.c
@@ -38,7 +38,7 @@
     /* buffer_alloc isn't accessed by most functions.  Rather y_buffer,
       u_buffer and v_buffer point to buffer_alloc and are used.  Clear out
       all of this so that a freed pointer isn't inadvertently used */
-    vpx_memset(ybf, 0, sizeof(YV12_BUFFER_CONFIG));
+    memset(ybf, 0, sizeof(YV12_BUFFER_CONFIG));
   } else {
     return -1;
   }
@@ -128,7 +128,7 @@
     /* buffer_alloc isn't accessed by most functions.  Rather y_buffer,
       u_buffer and v_buffer point to buffer_alloc and are used.  Clear out
       all of this so that a freed pointer isn't inadvertently used */
-    vpx_memset(ybf, 0, sizeof(YV12_BUFFER_CONFIG));
+    memset(ybf, 0, sizeof(YV12_BUFFER_CONFIG));
   } else {
     return -1;
   }
@@ -222,7 +222,7 @@
       // This memset is needed for fixing valgrind error from C loop filter
       // due to access uninitialized memory in frame border. It could be
       // removed if border is totally removed.
-      vpx_memset(ybf->buffer_alloc, 0, ybf->buffer_alloc_sz);
+      memset(ybf->buffer_alloc, 0, ybf->buffer_alloc_sz);
     }
 
     /* Only support allocating buffers that have a border that's a multiple
--- a/vpx_scale/generic/yv12extend.c
+++ b/vpx_scale/generic/yv12extend.c
@@ -31,8 +31,8 @@
   uint8_t *dst_ptr2 = src + width;
 
   for (i = 0; i < height; ++i) {
-    vpx_memset(dst_ptr1, src_ptr1[0], extend_left);
-    vpx_memset(dst_ptr2, src_ptr2[0], extend_right);
+    memset(dst_ptr1, src_ptr1[0], extend_left);
+    memset(dst_ptr2, src_ptr2[0], extend_right);
     src_ptr1 += src_stride;
     src_ptr2 += src_stride;
     dst_ptr1 += src_stride;
@@ -48,12 +48,12 @@
   dst_ptr2 = src + src_stride * height - extend_left;
 
   for (i = 0; i < extend_top; ++i) {
-    vpx_memcpy(dst_ptr1, src_ptr1, linesize);
+    memcpy(dst_ptr1, src_ptr1, linesize);
     dst_ptr1 += src_stride;
   }
 
   for (i = 0; i < extend_bottom; ++i) {
-    vpx_memcpy(dst_ptr2, src_ptr2, linesize);
+    memcpy(dst_ptr2, src_ptr2, linesize);
     dst_ptr2 += src_stride;
   }
 }
@@ -91,12 +91,12 @@
   dst_ptr2 = src + src_stride * height - extend_left;
 
   for (i = 0; i < extend_top; ++i) {
-    vpx_memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t));
+    memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t));
     dst_ptr1 += src_stride;
   }
 
   for (i = 0; i < extend_bottom; ++i) {
-    vpx_memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t));
+    memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t));
     dst_ptr2 += src_stride;
   }
 }
@@ -212,7 +212,7 @@
 void memcpy_short_addr(uint8_t *dst8, const uint8_t *src8, int num) {
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
-  vpx_memcpy(dst, src, num * sizeof(uint16_t));
+  memcpy(dst, src, num * sizeof(uint16_t));
 }
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 #endif  // CONFIG_VP9
@@ -269,7 +269,7 @@
 #endif
 
   for (row = 0; row < src_ybc->y_height; ++row) {
-    vpx_memcpy(dst, src, src_ybc->y_width);
+    memcpy(dst, src, src_ybc->y_width);
     src += src_ybc->y_stride;
     dst += dst_ybc->y_stride;
   }
@@ -278,7 +278,7 @@
   dst = dst_ybc->u_buffer;
 
   for (row = 0; row < src_ybc->uv_height; ++row) {
-    vpx_memcpy(dst, src, src_ybc->uv_width);
+    memcpy(dst, src, src_ybc->uv_width);
     src += src_ybc->uv_stride;
     dst += dst_ybc->uv_stride;
   }
@@ -287,7 +287,7 @@
   dst = dst_ybc->v_buffer;
 
   for (row = 0; row < src_ybc->uv_height; ++row) {
-    vpx_memcpy(dst, src, src_ybc->uv_width);
+    memcpy(dst, src, src_ybc->uv_width);
     src += src_ybc->uv_stride;
     dst += dst_ybc->uv_stride;
   }
@@ -306,7 +306,7 @@
     const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
     uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
     for (row = 0; row < src_ybc->y_height; ++row) {
-      vpx_memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t));
+      memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t));
       src16 += src_ybc->y_stride;
       dst16 += dst_ybc->y_stride;
     }
@@ -315,7 +315,7 @@
 #endif
 
   for (row = 0; row < src_ybc->y_height; ++row) {
-    vpx_memcpy(dst, src, src_ybc->y_width);
+    memcpy(dst, src, src_ybc->y_width);
     src += src_ybc->y_stride;
     dst += dst_ybc->y_stride;
   }
--- a/vpx_scale/mips/dspr2/yv12extend_dspr2.c
+++ b/vpx_scale/mips/dspr2/yv12extend_dspr2.c
@@ -94,12 +94,12 @@
   linesize = extend_left + extend_right + width;
 
   for (i = 0; i < extend_top; i++) {
-    vpx_memcpy(top_dst, top_src, linesize);
+    memcpy(top_dst, top_src, linesize);
     top_dst += src_stride;
   }
 
   for (i = 0; i < extend_bottom; i++) {
-    vpx_memcpy(bot_dst, bot_src, linesize);
+    memcpy(bot_dst, bot_src, linesize);
     bot_dst += src_stride;
   }
 }