| Index: source/libvpx/vp8/common/x86/variance_sse2.c
|
| ===================================================================
|
| --- source/libvpx/vp8/common/x86/variance_sse2.c (revision 191025)
|
| +++ source/libvpx/vp8/common/x86/variance_sse2.c (working copy)
|
| @@ -148,7 +148,7 @@
|
|
|
| vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
|
| *sse = var;
|
| - return (var - ((unsigned int)(avg * avg) >> 4));
|
| + return (var - (((unsigned int)avg * avg) >> 4));
|
|
|
| }
|
|
|
| @@ -165,7 +165,7 @@
|
|
|
| vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
|
| *sse = var;
|
| - return (var - ((unsigned int)(avg * avg) >> 6));
|
| + return (var - (((unsigned int)avg * avg) >> 6));
|
|
|
| }
|
|
|
| @@ -184,7 +184,7 @@
|
|
|
| vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
|
| *sse = sse0;
|
| - return (sse0 - ((unsigned int)(sum0 * sum0) >> 8));
|
| + return (sse0 - (((unsigned int)sum0 * sum0) >> 8));
|
| }
|
| unsigned int vp8_mse16x16_wmt(
|
| const unsigned char *src_ptr,
|
| @@ -220,7 +220,7 @@
|
| var = sse0 + sse1;
|
| avg = sum0 + sum1;
|
| *sse = var;
|
| - return (var - ((unsigned int)(avg * avg) >> 7));
|
| + return (var - (((unsigned int)avg * avg) >> 7));
|
|
|
| }
|
|
|
| @@ -241,7 +241,7 @@
|
| var = sse0 + sse1;
|
| avg = sum0 + sum1;
|
| *sse = var;
|
| - return (var - ((unsigned int)(avg * avg) >> 7));
|
| + return (var - (((unsigned int)avg * avg) >> 7));
|
|
|
| }
|
|
|
| @@ -265,7 +265,7 @@
|
| &xsum, &xxsum
|
| );
|
| *sse = xxsum;
|
| - return (xxsum - ((unsigned int)(xsum * xsum) >> 4));
|
| + return (xxsum - (((unsigned int)xsum * xsum) >> 4));
|
| }
|
|
|
|
|
| @@ -314,7 +314,7 @@
|
| }
|
|
|
| *sse = xxsum;
|
| - return (xxsum - ((unsigned int)(xsum * xsum) >> 6));
|
| + return (xxsum - (((unsigned int)xsum * xsum) >> 6));
|
| }
|
|
|
| unsigned int vp8_sub_pixel_variance16x16_wmt
|
| @@ -376,7 +376,7 @@
|
| }
|
|
|
| *sse = xxsum0;
|
| - return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
|
| + return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
|
| }
|
|
|
| unsigned int vp8_sub_pixel_mse16x16_wmt(
|
| @@ -447,7 +447,7 @@
|
| }
|
|
|
| *sse = xxsum0;
|
| - return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 7));
|
| + return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
|
| }
|
|
|
| unsigned int vp8_sub_pixel_variance8x16_wmt
|
| @@ -495,7 +495,7 @@
|
| }
|
|
|
| *sse = xxsum;
|
| - return (xxsum - ((unsigned int)(xsum * xsum) >> 7));
|
| + return (xxsum - (((unsigned int)xsum * xsum) >> 7));
|
| }
|
|
|
|
|
| @@ -515,7 +515,7 @@
|
| &xsum0, &xxsum0);
|
|
|
| *sse = xxsum0;
|
| - return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
|
| + return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
|
| }
|
|
|
|
|
| @@ -534,7 +534,7 @@
|
| &xsum0, &xxsum0);
|
|
|
| *sse = xxsum0;
|
| - return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
|
| + return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
|
| }
|
|
|
|
|
| @@ -554,5 +554,5 @@
|
| &xsum0, &xxsum0);
|
|
|
| *sse = xxsum0;
|
| - return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
|
| + return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
|
| }
|
|
|