| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license | |
| 5 * that can be found in the LICENSE file in the root of the source | |
| 6 * tree. An additional intellectual property rights grant can be found | |
| 7 * in the file PATENTS. All contributing project authors may | |
| 8 * be found in the AUTHORS file in the root of the source tree. | |
| 9 */ | |
| 10 | |
| 11 #include <emmintrin.h> // SSE2 | |
| 12 #include "./vpx_config.h" | |
| 13 #include "./vp9_rtcd.h" | |
| 14 #include "vpx/vpx_integer.h" | |
| 15 #include "vpx_ports/emmintrin_compat.h" | |
| 16 | |
| 17 #if HAVE_SSE2 | |
| 18 unsigned int vp9_sad16x3_sse2( | |
| 19 const unsigned char *src_ptr, | |
| 20 int src_stride, | |
| 21 const unsigned char *ref_ptr, | |
| 22 int ref_stride) { | |
| 23 __m128i s0, s1, s2; | |
| 24 __m128i r0, r1, r2; | |
| 25 __m128i sad; | |
| 26 | |
| 27 s0 = _mm_loadu_si128((const __m128i *)(src_ptr + 0 * src_stride)); | |
| 28 s1 = _mm_loadu_si128((const __m128i *)(src_ptr + 1 * src_stride)); | |
| 29 s2 = _mm_loadu_si128((const __m128i *)(src_ptr + 2 * src_stride)); | |
| 30 | |
| 31 r0 = _mm_loadu_si128((const __m128i *)(ref_ptr + 0 * ref_stride)); | |
| 32 r1 = _mm_loadu_si128((const __m128i *)(ref_ptr + 1 * ref_stride)); | |
| 33 r2 = _mm_loadu_si128((const __m128i *)(ref_ptr + 2 * ref_stride)); | |
| 34 | |
| 35 sad = _mm_sad_epu8(s0, r0); | |
| 36 sad = _mm_add_epi16(sad, _mm_sad_epu8(s1, r1)); | |
| 37 sad = _mm_add_epi16(sad, _mm_sad_epu8(s2, r2)); | |
| 38 sad = _mm_add_epi16(sad, _mm_srli_si128(sad, 8)); | |
| 39 | |
| 40 return _mm_cvtsi128_si32(sad); | |
| 41 } | |
| 42 | |
| 43 unsigned int vp9_sad3x16_sse2( | |
| 44 const unsigned char *src_ptr, | |
| 45 int src_stride, | |
| 46 const unsigned char *ref_ptr, | |
| 47 int ref_stride) { | |
| 48 int r; | |
| 49 __m128i s0, s1, s2, s3; | |
| 50 __m128i r0, r1, r2, r3; | |
| 51 __m128i sad = _mm_setzero_si128(); | |
| 52 __m128i mask; | |
| 53 const int offset = (uintptr_t)src_ptr & 3; | |
| 54 | |
| 55 /* In current use case, the offset is 1 if CONFIG_SUBPELREFMV is off. | |
| 56 * Here, for offset=1, we adjust src_ptr to be 4-byte aligned. Then, movd | |
| 57 * takes much less time. | |
| 58 */ | |
| 59 if (offset == 1) | |
| 60 src_ptr -= 1; | |
| 61 | |
| 62 /* mask = 0xffffffffffff0000ffffffffffff0000 */ | |
| 63 mask = _mm_cmpeq_epi32(sad, sad); | |
| 64 mask = _mm_slli_epi64(mask, 16); | |
| 65 | |
| 66 for (r = 0; r < 16; r += 4) { | |
| 67 s0 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 0 * src_stride)); | |
| 68 s1 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 1 * src_stride)); | |
| 69 s2 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 2 * src_stride)); | |
| 70 s3 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 3 * src_stride)); | |
| 71 r0 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 0 * ref_stride)); | |
| 72 r1 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 1 * ref_stride)); | |
| 73 r2 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 2 * ref_stride)); | |
| 74 r3 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 3 * ref_stride)); | |
| 75 | |
| 76 s0 = _mm_unpacklo_epi8(s0, s1); | |
| 77 r0 = _mm_unpacklo_epi8(r0, r1); | |
| 78 s2 = _mm_unpacklo_epi8(s2, s3); | |
| 79 r2 = _mm_unpacklo_epi8(r2, r3); | |
| 80 s0 = _mm_unpacklo_epi64(s0, s2); | |
| 81 r0 = _mm_unpacklo_epi64(r0, r2); | |
| 82 | |
| 83 // throw out extra byte | |
| 84 if (offset == 1) | |
| 85 s0 = _mm_and_si128(s0, mask); | |
| 86 else | |
| 87 s0 = _mm_slli_epi64(s0, 16); | |
| 88 r0 = _mm_slli_epi64(r0, 16); | |
| 89 | |
| 90 sad = _mm_add_epi16(sad, _mm_sad_epu8(s0, r0)); | |
| 91 | |
| 92 src_ptr += src_stride*4; | |
| 93 ref_ptr += ref_stride*4; | |
| 94 } | |
| 95 | |
| 96 sad = _mm_add_epi16(sad, _mm_srli_si128(sad, 8)); | |
| 97 return _mm_cvtsi128_si32(sad); | |
| 98 } | |
| 99 | |
| 100 #endif | |
| OLD | NEW |