aboutsummaryrefslogtreecommitdiff
path: root/lib/clang/12.0.0/include/ppc_wrappers
diff options
context:
space:
mode:
Diffstat (limited to 'lib/clang/12.0.0/include/ppc_wrappers')
-rw-r--r--lib/clang/12.0.0/include/ppc_wrappers/emmintrin.h2324
-rw-r--r--lib/clang/12.0.0/include/ppc_wrappers/mm_malloc.h50
-rw-r--r--lib/clang/12.0.0/include/ppc_wrappers/mmintrin.h1450
-rw-r--r--lib/clang/12.0.0/include/ppc_wrappers/pmmintrin.h150
-rw-r--r--lib/clang/12.0.0/include/ppc_wrappers/smmintrin.h109
-rw-r--r--lib/clang/12.0.0/include/ppc_wrappers/tmmintrin.h495
-rw-r--r--lib/clang/12.0.0/include/ppc_wrappers/xmmintrin.h1844
7 files changed, 6422 insertions, 0 deletions
diff --git a/lib/clang/12.0.0/include/ppc_wrappers/emmintrin.h b/lib/clang/12.0.0/include/ppc_wrappers/emmintrin.h
new file mode 100644
index 0000000..4dcb848
--- /dev/null
+++ b/lib/clang/12.0.0/include/ppc_wrappers/emmintrin.h
@@ -0,0 +1,2324 @@
+/*===---- emmintrin.h - Implementation of SSE2 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+ explicitly from x86_64 to powerpc64/powerpc64le.
+
+ Since X86 SSE2 intrinsics mainly handles __m128i and __m128d type,
+ PowerPC VMX/VSX ISA is a good match for vector float SIMD operations.
+ However scalar float operations in vector (XMM) registers require
+ the POWER8 VSX ISA (2.07) level. There are differences for data
+ format and placement of float scalars in the vector register, which
+ require extra steps to match SSE2 scalar float semantics on POWER.
+
+ It should be noted that there's much difference between X86_64's
+ MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
+ portable <fenv.h> instead of access MXSCR directly.
+
+ Most SSE2 scalar float intrinsic operations can be performed more
+ efficiently as C language float scalar operations or optimized to
+ use vector SIMD operations. We recommend this for new applications.
+*/
+#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef EMMINTRIN_H_
+#define EMMINTRIN_H_
+
+#if defined(__linux__) && defined(__ppc64__)
+
+#include <altivec.h>
+
+/* We need definitions from the SSE header files. */
+#include <xmmintrin.h>
+
+/* SSE2 */
+typedef __vector double __v2df;
+typedef __vector long long __v2di;
+typedef __vector unsigned long long __v2du;
+typedef __vector int __v4si;
+typedef __vector unsigned int __v4su;
+typedef __vector short __v8hi;
+typedef __vector unsigned short __v8hu;
+typedef __vector signed char __v16qi;
+typedef __vector unsigned char __v16qu;
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
+typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
+
+/* Unaligned version of the same types. */
+typedef long long __m128i_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
+typedef double __m128d_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
+
+/* Define two value permute mask. */
+#define _MM_SHUFFLE2(x,y) (((x) << 1) | (y))
+
+/* Create a vector with element 0 as F and the rest zero. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_sd (double __F)
+{
+ return __extension__ (__m128d){ __F, 0.0 };
+}
+
+/* Create a vector with both elements equal to F. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_pd (double __F)
+{
+ return __extension__ (__m128d){ __F, __F };
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_pd1 (double __F)
+{
+ return _mm_set1_pd (__F);
+}
+
+/* Create a vector with the lower value X and upper value W. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_pd (double __W, double __X)
+{
+ return __extension__ (__m128d){ __X, __W };
+}
+
+/* Create a vector with the lower value W and upper value X. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_pd (double __W, double __X)
+{
+ return __extension__ (__m128d){ __W, __X };
+}
+
+/* Create an undefined vector. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_undefined_pd (void)
+{
+ __m128d __Y = __Y;
+ return __Y;
+}
+
+/* Create a vector of zeros. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setzero_pd (void)
+{
+ return (__m128d) vec_splats (0);
+}
+
+/* Sets the low DPFP value of A from the low value of B. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_move_sd (__m128d __A, __m128d __B)
+{
+ __v2df result = (__v2df) __A;
+ result [0] = ((__v2df) __B)[0];
+ return (__m128d) result;
+}
+
+/* Load two DPFP values from P. The address must be 16-byte aligned. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_pd (double const *__P)
+{
+ return ((__m128d)vec_ld(0, (__v16qu*)__P));
+}
+
+/* Load two DPFP values from P. The address need not be 16-byte aligned. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_pd (double const *__P)
+{
+ return (vec_vsx_ld(0, __P));
+}
+
+/* Create a vector with all two elements equal to *P. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load1_pd (double const *__P)
+{
+ return (vec_splats (*__P));
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_sd (double const *__P)
+{
+ return _mm_set_sd (*__P);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_pd1 (double const *__P)
+{
+ return _mm_load1_pd (__P);
+}
+
+/* Load two DPFP values in reverse order. The address must be aligned. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadr_pd (double const *__P)
+{
+ __v2df __tmp = _mm_load_pd (__P);
+ return (__m128d)vec_xxpermdi (__tmp, __tmp, 2);
+}
+
+/* Store two DPFP values. The address must be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_pd (double *__P, __m128d __A)
+{
+ vec_st((__v16qu)__A, 0, (__v16qu*)__P);
+}
+
+/* Store two DPFP values. The address need not be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_pd (double *__P, __m128d __A)
+{
+ *(__m128d_u *)__P = __A;
+}
+
+/* Stores the lower DPFP value. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_sd (double *__P, __m128d __A)
+{
+ *__P = ((__v2df)__A)[0];
+}
+
+extern __inline double __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_f64 (__m128d __A)
+{
+ return ((__v2df)__A)[0];
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storel_pd (double *__P, __m128d __A)
+{
+ _mm_store_sd (__P, __A);
+}
+
+/* Stores the upper DPFP value. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeh_pd (double *__P, __m128d __A)
+{
+ *__P = ((__v2df)__A)[1];
+}
+/* Store the lower DPFP value across two words.
+ The address must be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store1_pd (double *__P, __m128d __A)
+{
+ _mm_store_pd (__P, vec_splat (__A, 0));
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_pd1 (double *__P, __m128d __A)
+{
+ _mm_store1_pd (__P, __A);
+}
+
+/* Store two DPFP values in reverse order. The address must be aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storer_pd (double *__P, __m128d __A)
+{
+ _mm_store_pd (__P, vec_xxpermdi (__A, __A, 2));
+}
+
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi128_si64 (__m128i __A)
+{
+ return ((__v2di)__A)[0];
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi128_si64x (__m128i __A)
+{
+ return ((__v2di)__A)[0];
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) ((__v2df)__A + (__v2df)__B);
+}
+
+/* Add the lower double-precision (64-bit) floating-point element in
+ a and b, store the result in the lower element of dst, and copy
+ the upper element from a to the upper element of dst. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_sd (__m128d __A, __m128d __B)
+{
+ __A[0] = __A[0] + __B[0];
+ return (__A);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) ((__v2df)__A - (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_sd (__m128d __A, __m128d __B)
+{
+ __A[0] = __A[0] - __B[0];
+ return (__A);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) ((__v2df)__A * (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_sd (__m128d __A, __m128d __B)
+{
+ __A[0] = __A[0] * __B[0];
+ return (__A);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) ((__v2df)__A / (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_sd (__m128d __A, __m128d __B)
+{
+ __A[0] = __A[0] / __B[0];
+ return (__A);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_pd (__m128d __A)
+{
+ return (vec_sqrt (__A));
+}
+
+/* Return pair {sqrt (B[0]), A[1]}. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_sd (__m128d __A, __m128d __B)
+{
+ __v2df c;
+ c = vec_sqrt ((__v2df) _mm_set1_pd (__B[0]));
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_pd (__m128d __A, __m128d __B)
+{
+ return (vec_min (__A, __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = vec_min (a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_pd (__m128d __A, __m128d __B)
+{
+ return (vec_max (__A, __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = vec_max (a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpeq ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpge ((__v2df) __A,(__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_pd (__m128d __A, __m128d __B)
+{
+ __v2df temp = (__v2df) vec_cmpeq ((__v2df) __A, (__v2df)__B);
+ return ((__m128d)vec_nor (temp, temp));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnlt_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpge ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnle_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpngt_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnge_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpord_pd (__m128d __A, __m128d __B)
+{
+#if _ARCH_PWR8
+ __v2du c, d;
+ /* Compare against self will return false (0's) if NAN. */
+ c = (__v2du)vec_cmpeq (__A, __A);
+ d = (__v2du)vec_cmpeq (__B, __B);
+#else
+ __v2du a, b;
+ __v2du c, d;
+ const __v2du double_exp_mask = {0x7ff0000000000000, 0x7ff0000000000000};
+ a = (__v2du)vec_abs ((__v2df)__A);
+ b = (__v2du)vec_abs ((__v2df)__B);
+ c = (__v2du)vec_cmpgt (double_exp_mask, a);
+ d = (__v2du)vec_cmpgt (double_exp_mask, b);
+#endif
+ /* A != NAN and B != NAN. */
+ return ((__m128d)vec_and(c, d));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpunord_pd (__m128d __A, __m128d __B)
+{
+#if _ARCH_PWR8
+ __v2du c, d;
+ /* Compare against self will return false (0's) if NAN. */
+ c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
+ d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
+ /* A == NAN OR B == NAN converts too:
+ NOT(A != NAN) OR NOT(B != NAN). */
+ c = vec_nor (c, c);
+ return ((__m128d)vec_orc(c, d));
+#else
+ __v2du c, d;
+ /* Compare against self will return false (0's) if NAN. */
+ c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
+ d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
+ /* Convert the true ('1's) is NAN. */
+ c = vec_nor (c, c);
+ d = vec_nor (d, d);
+ return ((__m128d)vec_or(c, d));
+#endif
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_sd(__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we do the operation. */
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmpeq(a, b);
+ /* Then we merge the lower double result with the original upper
+ double from __A. */
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmplt(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmple(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmpgt(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmpge(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmpeq(a, b);
+ c = vec_nor (c, c);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnlt_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ /* Not less than is just greater than or equal. */
+ c = (__v2df) vec_cmpge(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnle_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ /* Not less than or equal is just greater than. */
+ c = (__v2df) vec_cmpge(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpngt_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ /* Not greater than is just less than or equal. */
+ c = (__v2df) vec_cmple(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnge_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ /* Not greater than or equal is just less than. */
+ c = (__v2df) vec_cmplt(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpord_sd (__m128d __A, __m128d __B)
+{
+ __v2df r;
+ r = (__v2df)_mm_cmpord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
+ return (__m128d) _mm_setr_pd (r[0], ((__v2df)__A)[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpunord_sd (__m128d __A, __m128d __B)
+{
+ __v2df r;
+ r = _mm_cmpunord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
+ return (__m128d) _mm_setr_pd (r[0], __A[1]);
+}
+
+/* FIXME
+ The __mm_comi??_sd and __mm_ucomi??_sd implementations below are
+ exactly the same because GCC for PowerPC only generates unordered
+ compares (scalar and vector).
+ Technically __mm_comieq_sp et all should be using the ordered
+ compare and signal for QNaNs. The __mm_ucomieq_sd et all should
+ be OK. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comieq_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] == __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comilt_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] < __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comile_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] <= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comigt_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] > __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comige_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] >= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comineq_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] != __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomieq_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] == __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomilt_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] < __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomile_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] <= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomigt_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] > __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomige_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] >= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomineq_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] != __B[0]);
+}
+
+/* Create a vector of Qi, where i is the element number. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi64x (long long __q1, long long __q0)
+{
+ return __extension__ (__m128i)(__v2di){ __q0, __q1 };
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi64 (__m64 __q1, __m64 __q0)
+{
+ return _mm_set_epi64x ((long long)__q1, (long long)__q0);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
+{
+ return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
+ short __q3, short __q2, short __q1, short __q0)
+{
+ return __extension__ (__m128i)(__v8hi){
+ __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
+ char __q11, char __q10, char __q09, char __q08,
+ char __q07, char __q06, char __q05, char __q04,
+ char __q03, char __q02, char __q01, char __q00)
+{
+ return __extension__ (__m128i)(__v16qi){
+ __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
+ __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
+ };
+}
+
+/* Set all of the elements of the vector to A. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi64x (long long __A)
+{
+ return _mm_set_epi64x (__A, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi64 (__m64 __A)
+{
+ return _mm_set_epi64 (__A, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi32 (int __A)
+{
+ return _mm_set_epi32 (__A, __A, __A, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi16 (short __A)
+{
+ return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi8 (char __A)
+{
+ return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+/* Create a vector of Qi, where i is the element number.
+ The parameter order is reversed from the _mm_set_epi* functions. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_epi64 (__m64 __q0, __m64 __q1)
+{
+ return _mm_set_epi64 (__q1, __q0);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
+{
+ return _mm_set_epi32 (__q3, __q2, __q1, __q0);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
+ short __q4, short __q5, short __q6, short __q7)
+{
+ return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
+ char __q04, char __q05, char __q06, char __q07,
+ char __q08, char __q09, char __q10, char __q11,
+ char __q12, char __q13, char __q14, char __q15)
+{
+ return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
+ __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_si128 (__m128i const *__P)
+{
+ return *__P;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_si128 (__m128i_u const *__P)
+{
+ return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadl_epi64 (__m128i_u const *__P)
+{
+ return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_si128 (__m128i *__P, __m128i __B)
+{
+ vec_st ((__v16qu) __B, 0, (__v16qu*)__P);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_si128 (__m128i_u *__P, __m128i __B)
+{
+ *__P = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storel_epi64 (__m128i_u *__P, __m128i __B)
+{
+ *(long long *)__P = ((__v2di)__B)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movepi64_pi64 (__m128i_u __B)
+{
+ return (__m64) ((__v2di)__B)[0];
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movpi64_epi64 (__m64 __A)
+{
+ return _mm_set_epi64 ((__m64)0LL, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_move_epi64 (__m128i __A)
+{
+ return _mm_set_epi64 ((__m64)0LL, (__m64)__A[0]);
+}
+
+/* Create an undefined vector. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_undefined_si128 (void)
+{
+ __m128i __Y = __Y;
+ return __Y;
+}
+
+/* Create a vector of zeros. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setzero_si128 (void)
+{
+ return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi32_pd (__m128i __A)
+{
+ __v2di val;
+ /* For LE need to generate Vector Unpack Low Signed Word.
+ Which is generated from unpackh. */
+ val = (__v2di)vec_unpackh ((__v4si)__A);
+
+ return (__m128d)vec_ctf (val, 0);
+}
+#endif
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi32_ps (__m128i __A)
+{
+ return ((__m128)vec_ctf((__v4si)__A, 0));
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpd_epi32 (__m128d __A)
+{
+ __v2df rounded = vec_rint (__A);
+ __v4si result, temp;
+ const __v4si vzero =
+ { 0, 0, 0, 0 };
+
+ /* VSX Vector truncate Double-Precision to integer and Convert to
+ Signed Integer Word format with Saturate. */
+ __asm__(
+ "xvcvdpsxws %x0,%x1"
+ : "=wa" (temp)
+ : "wa" (rounded)
+ : );
+
+#ifdef _ARCH_PWR8
+ temp = vec_mergeo (temp, temp);
+ result = (__v4si) vec_vpkudum ((__vector long long) temp,
+ (__vector long long) vzero);
+#else
+ {
+ const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
+ result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ }
+#endif
+ return (__m128i) result;
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpd_pi32 (__m128d __A)
+{
+ __m128i result = _mm_cvtpd_epi32(__A);
+
+ return (__m64) result[0];
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpd_ps (__m128d __A)
+{
+ __v4sf result;
+ __v4si temp;
+ const __v4si vzero = { 0, 0, 0, 0 };
+
+ __asm__(
+ "xvcvdpsp %x0,%x1"
+ : "=wa" (temp)
+ : "wa" (__A)
+ : );
+
+#ifdef _ARCH_PWR8
+ temp = vec_mergeo (temp, temp);
+ result = (__v4sf) vec_vpkudum ((__vector long long) temp,
+ (__vector long long) vzero);
+#else
+ {
+ const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
+ result = (__v4sf) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ }
+#endif
+ return ((__m128)result);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttpd_epi32 (__m128d __A)
+{
+ __v4si result;
+ __v4si temp;
+ const __v4si vzero = { 0, 0, 0, 0 };
+
+ /* VSX Vector truncate Double-Precision to integer and Convert to
+ Signed Integer Word format with Saturate. */
+ __asm__(
+ "xvcvdpsxws %x0,%x1"
+ : "=wa" (temp)
+ : "wa" (__A)
+ : );
+
+#ifdef _ARCH_PWR8
+ temp = vec_mergeo (temp, temp);
+ result = (__v4si) vec_vpkudum ((__vector long long) temp,
+ (__vector long long) vzero);
+#else
+ {
+ const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
+ result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ }
+#endif
+
+ return ((__m128i) result);
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttpd_pi32 (__m128d __A)
+{
+ __m128i result = _mm_cvttpd_epi32 (__A);
+
+ return (__m64) result[0];
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi128_si32 (__m128i __A)
+{
+ return ((__v4si)__A)[0];
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi32_pd (__m64 __A)
+{
+ __v4si temp;
+ __v2di tmp2;
+ __v2df result;
+
+ temp = (__v4si)vec_splats (__A);
+ tmp2 = (__v2di)vec_unpackl (temp);
+ result = vec_ctf ((__vector signed long long) tmp2, 0);
+ return (__m128d)result;
+}
+#endif
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_epi32 (__m128 __A)
+{
+ __v4sf rounded;
+ __v4si result;
+
+ rounded = vec_rint((__v4sf) __A);
+ result = vec_cts (rounded, 0);
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttps_epi32 (__m128 __A)
+{
+ __v4si result;
+
+ result = vec_cts ((__v4sf) __A, 0);
+ return (__m128i) result;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_pd (__m128 __A)
+{
+ /* Check if vec_doubleh is defined by <altivec.h>. If so use that. */
+#ifdef vec_doubleh
+ return (__m128d) vec_doubleh ((__v4sf)__A);
+#else
+ /* Otherwise the compiler is not current and so need to generate the
+ equivalent code. */
+ __v4sf a = (__v4sf)__A;
+ __v4sf temp;
+ __v2df result;
+#ifdef __LITTLE_ENDIAN__
+ /* The input float values are in elements {[0], [1]} but the convert
+ instruction needs them in elements {[1], [3]}, So we use two
+ shift left double vector word immediates to get the elements
+ lined up. */
+ temp = __builtin_vsx_xxsldwi (a, a, 3);
+ temp = __builtin_vsx_xxsldwi (a, temp, 2);
+#else
+ /* The input float values are in elements {[0], [1]} but the convert
+ instruction needs them in elements {[0], [2]}, So we use two
+ shift left double vector word immediates to get the elements
+ lined up. */
+ temp = vec_vmrghw (a, a);
+#endif
+ __asm__(
+ " xvcvspdp %x0,%x1"
+ : "=wa" (result)
+ : "wa" (temp)
+ : );
+ return (__m128d) result;
+#endif
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_si32 (__m128d __A)
+{
+ __v2df rounded = vec_rint((__v2df) __A);
+ int result = ((__v2df)rounded)[0];
+
+ return result;
+}
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_si64 (__m128d __A)
+{
+ __v2df rounded = vec_rint ((__v2df) __A );
+ long long result = ((__v2df) rounded)[0];
+
+ return result;
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_si64x (__m128d __A)
+{
+ return _mm_cvtsd_si64 ((__v2df)__A);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_si32 (__m128d __A)
+{
+ int result = ((__v2df)__A)[0];
+
+ return result;
+}
+
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_si64 (__m128d __A)
+{
+ long long result = ((__v2df)__A)[0];
+
+ return result;
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_si64x (__m128d __A)
+{
+ return _mm_cvttsd_si64 (__A);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_ss (__m128 __A, __m128d __B)
+{
+ __v4sf result = (__v4sf)__A;
+
+#ifdef __LITTLE_ENDIAN__
+ __v4sf temp_s;
+ /* Copy double element[0] to element [1] for conversion. */
+ __v2df temp_b = vec_splat((__v2df)__B, 0);
+
+ /* Pre-rotate __A left 3 (logically right 1) elements. */
+ result = __builtin_vsx_xxsldwi (result, result, 3);
+ /* Convert double to single float scalar in a vector. */
+ __asm__(
+ "xscvdpsp %x0,%x1"
+ : "=wa" (temp_s)
+ : "wa" (temp_b)
+ : );
+ /* Shift the resulting scalar into vector element [0]. */
+ result = __builtin_vsx_xxsldwi (result, temp_s, 1);
+#else
+ result [0] = ((__v2df)__B)[0];
+#endif
+ return (__m128) result;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi32_sd (__m128d __A, int __B)
+{
+ __v2df result = (__v2df)__A;
+ double db = __B;
+ result [0] = db;
+ return (__m128d)result;
+}
+
+/* Intel intrinsic. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64_sd (__m128d __A, long long __B)
+{
+ __v2df result = (__v2df)__A;
+ double db = __B;
+ result [0] = db;
+ return (__m128d)result;
+}
+
+/* Microsoft intrinsic. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64x_sd (__m128d __A, long long __B)
+{
+ return _mm_cvtsi64_sd (__A, __B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_sd (__m128d __A, __m128 __B)
+{
+#ifdef __LITTLE_ENDIAN__
+ /* Use splat to move element [0] into position for the convert. */
+ __v4sf temp = vec_splat ((__v4sf)__B, 0);
+ __v2df res;
+ /* Convert single float scalar to double in a vector. */
+ __asm__(
+ "xscvspdp %x0,%x1"
+ : "=wa" (res)
+ : "wa" (temp)
+ : );
+ return (__m128d) vec_mergel (res, (__v2df)__A);
+#else
+ __v2df res = (__v2df)__A;
+ res [0] = ((__v4sf)__B) [0];
+ return (__m128d) res;
+#endif
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
+{
+ __vector double result;
+ const int litmsk = __mask & 0x3;
+
+ if (litmsk == 0)
+ result = vec_mergeh (__A, __B);
+#if __GNUC__ < 6
+ else if (litmsk == 1)
+ result = vec_xxpermdi (__B, __A, 2);
+ else if (litmsk == 2)
+ result = vec_xxpermdi (__B, __A, 1);
+#else
+ else if (litmsk == 1)
+ result = vec_xxpermdi (__A, __B, 2);
+ else if (litmsk == 2)
+ result = vec_xxpermdi (__A, __B, 1);
+#endif
+ else
+ result = vec_mergel (__A, __B);
+
+ return result;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) vec_mergel ((__v2df)__A, (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) vec_mergeh ((__v2df)__A, (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadh_pd (__m128d __A, double const *__B)
+{
+ __v2df result = (__v2df)__A;
+ result [1] = *__B;
+ return (__m128d)result;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadl_pd (__m128d __A, double const *__B)
+{
+ __v2df result = (__v2df)__A;
+ result [0] = *__B;
+ return (__m128d)result;
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum. */
+
+/* Creates a 2-bit mask from the most significant bits of the DPFP values. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movemask_pd (__m128d __A)
+{
+ __vector unsigned long long result;
+ static const __vector unsigned int perm_mask =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x80800040, 0x80808080, 0x80808080, 0x80808080
+#else
+ 0x80808080, 0x80808080, 0x80808080, 0x80804000
+#endif
+ };
+
+ result = ((__vector unsigned long long)
+ vec_vbpermq ((__vector unsigned char) __A,
+ (__vector unsigned char) perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+ return result[1];
+#else
+ return result[0];
+#endif
+}
+#endif /* _ARCH_PWR8 */
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_packs_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_packs ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_packs_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_packs ((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_packus_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_packsu ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergel ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergel ((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergel ((__v4su)__A, (__v4su)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergel ((__vector long long) __A,
+ (__vector long long) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergeh ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergeh ((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergeh ((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergeh ((__vector long long) __A,
+ (__vector long long) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v16qu)__A + (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v8hu)__A + (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v4su)__A + (__v4su)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v2du)__A + (__v2du)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_adds_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_adds ((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_adds_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_adds ((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_adds_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_adds ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_adds_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_adds ((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v16qu)__A - (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v8hu)__A - (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v4su)__A - (__v4su)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v2du)__A - (__v2du)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_subs_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_subs ((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_subs_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_subs ((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_subs_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_subs ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_subs_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_subs ((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_madd_epi16 (__m128i __A, __m128i __B)
+{
+ __vector signed int zero = {0, 0, 0, 0};
+
+ return (__m128i) vec_vmsumshm ((__v8hi)__A, (__v8hi)__B, zero);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mulhi_epi16 (__m128i __A, __m128i __B)
+{
+ __vector signed int w0, w1;
+
+ __vector unsigned char xform1 = {
+#ifdef __LITTLE_ENDIAN__
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
+ 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
+ 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+#endif
+ };
+
+ w0 = vec_vmulesh ((__v8hi)__A, (__v8hi)__B);
+ w1 = vec_vmulosh ((__v8hi)__A, (__v8hi)__B);
+ return (__m128i) vec_perm (w0, w1, xform1);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mullo_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v8hi)__A * (__v8hi)__B);
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_su32 (__m64 __A, __m64 __B)
+{
+ unsigned int a = __A;
+ unsigned int b = __B;
+
+ return ((__m64)a * (__m64)b);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_epu32 (__m128i __A, __m128i __B)
+{
+#if __GNUC__ < 8
+ __v2du result;
+
+#ifdef __LITTLE_ENDIAN__
+ /* VMX Vector Multiply Odd Unsigned Word. */
+ __asm__(
+ "vmulouw %0,%1,%2"
+ : "=v" (result)
+ : "v" (__A), "v" (__B)
+ : );
+#else
+ /* VMX Vector Multiply Even Unsigned Word. */
+ __asm__(
+ "vmuleuw %0,%1,%2"
+ : "=v" (result)
+ : "v" (__A), "v" (__B)
+ : );
+#endif
+ return (__m128i) result;
+#else
+ return (__m128i) vec_mule ((__v4su)__A, (__v4su)__B);
+#endif
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_slli_epi16 (__m128i __A, int __B)
+{
+ __v8hu lshift;
+ __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (__B >= 0 && __B < 16)
+ {
+ if (__builtin_constant_p(__B))
+ lshift = (__v8hu) vec_splat_s16(__B);
+ else
+ lshift = vec_splats ((unsigned short) __B);
+
+ result = vec_sl ((__v8hi) __A, lshift);
+ }
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_slli_epi32 (__m128i __A, int __B)
+{
+ __v4su lshift;
+ __v4si result = { 0, 0, 0, 0 };
+
+ if (__B >= 0 && __B < 32)
+ {
+ if (__builtin_constant_p(__B) && __B < 16)
+ lshift = (__v4su) vec_splat_s32(__B);
+ else
+ lshift = vec_splats ((unsigned int) __B);
+
+ result = vec_sl ((__v4si) __A, lshift);
+ }
+
+ return (__m128i) result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_slli_epi64 (__m128i __A, int __B)
+{
+ __v2du lshift;
+ __v2di result = { 0, 0 };
+
+ if (__B >= 0 && __B < 64)
+ {
+ if (__builtin_constant_p(__B) && __B < 16)
+ lshift = (__v2du) vec_splat_s32(__B);
+ else
+ lshift = (__v2du) vec_splats ((unsigned int) __B);
+
+ result = vec_sl ((__v2di) __A, lshift);
+ }
+
+ return (__m128i) result;
+}
+#endif
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srai_epi16 (__m128i __A, int __B)
+{
+ __v8hu rshift = { 15, 15, 15, 15, 15, 15, 15, 15 };
+ __v8hi result;
+
+ if (__B < 16)
+ {
+ if (__builtin_constant_p(__B))
+ rshift = (__v8hu) vec_splat_s16(__B);
+ else
+ rshift = vec_splats ((unsigned short) __B);
+ }
+ result = vec_sra ((__v8hi) __A, rshift);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srai_epi32 (__m128i __A, int __B)
+{
+ __v4su rshift = { 31, 31, 31, 31 };
+ __v4si result;
+
+ if (__B < 32)
+ {
+ if (__builtin_constant_p(__B))
+ {
+ if (__B < 16)
+ rshift = (__v4su) vec_splat_s32(__B);
+ else
+ rshift = (__v4su) vec_splats((unsigned int)__B);
+ }
+ else
+ rshift = vec_splats ((unsigned int) __B);
+ }
+ result = vec_sra ((__v4si) __A, rshift);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_bslli_si128 (__m128i __A, const int __N)
+{
+ __v16qu result;
+ const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (__N < 16)
+ result = vec_sld ((__v16qu) __A, zeros, __N);
+ else
+ result = zeros;
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_bsrli_si128 (__m128i __A, const int __N)
+{
+ __v16qu result;
+ const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (__N < 16)
+#ifdef __LITTLE_ENDIAN__
+ if (__builtin_constant_p(__N))
+ /* Would like to use Vector Shift Left Double by Octet
+ Immediate here to use the immediate form and avoid
+ load of __N * 8 value into a separate VR. */
+ result = vec_sld (zeros, (__v16qu) __A, (16 - __N));
+ else
+#endif
+ {
+ __v16qu shift = vec_splats((unsigned char)(__N*8));
+#ifdef __LITTLE_ENDIAN__
+ result = vec_sro ((__v16qu)__A, shift);
+#else
+ result = vec_slo ((__v16qu)__A, shift);
+#endif
+ }
+ else
+ result = zeros;
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srli_si128 (__m128i __A, const int __N)
+{
+ return _mm_bsrli_si128 (__A, __N);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_slli_si128 (__m128i __A, const int _imm5)
+{
+ __v16qu result;
+ const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (_imm5 < 16)
+#ifdef __LITTLE_ENDIAN__
+ result = vec_sld ((__v16qu) __A, zeros, _imm5);
+#else
+ result = vec_sld (zeros, (__v16qu) __A, (16 - _imm5));
+#endif
+ else
+ result = zeros;
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+_mm_srli_epi16 (__m128i __A, int __B)
+{
+ __v8hu rshift;
+ __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (__B < 16)
+ {
+ if (__builtin_constant_p(__B))
+ rshift = (__v8hu) vec_splat_s16(__B);
+ else
+ rshift = vec_splats ((unsigned short) __B);
+
+ result = vec_sr ((__v8hi) __A, rshift);
+ }
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srli_epi32 (__m128i __A, int __B)
+{
+ __v4su rshift;
+ __v4si result = { 0, 0, 0, 0 };
+
+ if (__B < 32)
+ {
+ if (__builtin_constant_p(__B))
+ {
+ if (__B < 16)
+ rshift = (__v4su) vec_splat_s32(__B);
+ else
+ rshift = (__v4su) vec_splats((unsigned int)__B);
+ }
+ else
+ rshift = vec_splats ((unsigned int) __B);
+
+ result = vec_sr ((__v4si) __A, rshift);
+ }
+
+ return (__m128i) result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srli_epi64 (__m128i __A, int __B)
+{
+ __v2du rshift;
+ __v2di result = { 0, 0 };
+
+ if (__B < 64)
+ {
+ if (__builtin_constant_p(__B))
+ {
+ if (__B < 16)
+ rshift = (__v2du) vec_splat_s32(__B);
+ else
+ rshift = (__v2du) vec_splats((unsigned long long)__B);
+ }
+ else
+ rshift = (__v2du) vec_splats ((unsigned int) __B);
+
+ result = vec_sr ((__v2di) __A, rshift);
+ }
+
+ return (__m128i) result;
+}
+#endif
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sll_epi16 (__m128i __A, __m128i __B)
+{
+ __v8hu lshift;
+ __vector __bool short shmask;
+ const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
+ __v8hu result;
+
+#ifdef __LITTLE_ENDIAN__
+ lshift = vec_splat ((__v8hu) __B, 0);
+#else
+ lshift = vec_splat ((__v8hu) __B, 3);
+#endif
+ shmask = vec_cmple (lshift, shmax);
+ result = vec_sl ((__v8hu) __A, lshift);
+ result = vec_sel ((__v8hu) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sll_epi32 (__m128i __A, __m128i __B)
+{
+ __v4su lshift;
+ __vector __bool int shmask;
+ const __v4su shmax = { 32, 32, 32, 32 };
+ __v4su result;
+#ifdef __LITTLE_ENDIAN__
+ lshift = vec_splat ((__v4su) __B, 0);
+#else
+ lshift = vec_splat ((__v4su) __B, 1);
+#endif
+ shmask = vec_cmplt (lshift, shmax);
+ result = vec_sl ((__v4su) __A, lshift);
+ result = vec_sel ((__v4su) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sll_epi64 (__m128i __A, __m128i __B)
+{
+ __v2du lshift;
+ __vector __bool long long shmask;
+ const __v2du shmax = { 64, 64 };
+ __v2du result;
+
+ lshift = vec_splat ((__v2du) __B, 0);
+ shmask = vec_cmplt (lshift, shmax);
+ result = vec_sl ((__v2du) __A, lshift);
+ result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
+
+ return (__m128i) result;
+}
+#endif
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sra_epi16 (__m128i __A, __m128i __B)
+{
+ const __v8hu rshmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
+ __v8hu rshift;
+ __v8hi result;
+
+#ifdef __LITTLE_ENDIAN__
+ rshift = vec_splat ((__v8hu)__B, 0);
+#else
+ rshift = vec_splat ((__v8hu)__B, 3);
+#endif
+ rshift = vec_min (rshift, rshmax);
+ result = vec_sra ((__v8hi) __A, rshift);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sra_epi32 (__m128i __A, __m128i __B)
+{
+ const __v4su rshmax = { 31, 31, 31, 31 };
+ __v4su rshift;
+ __v4si result;
+
+#ifdef __LITTLE_ENDIAN__
+ rshift = vec_splat ((__v4su)__B, 0);
+#else
+ rshift = vec_splat ((__v4su)__B, 1);
+#endif
+ rshift = vec_min (rshift, rshmax);
+ result = vec_sra ((__v4si) __A, rshift);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srl_epi16 (__m128i __A, __m128i __B)
+{
+ __v8hu rshift;
+ __vector __bool short shmask;
+ const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
+ __v8hu result;
+
+#ifdef __LITTLE_ENDIAN__
+ rshift = vec_splat ((__v8hu) __B, 0);
+#else
+ rshift = vec_splat ((__v8hu) __B, 3);
+#endif
+ shmask = vec_cmple (rshift, shmax);
+ result = vec_sr ((__v8hu) __A, rshift);
+ result = vec_sel ((__v8hu) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srl_epi32 (__m128i __A, __m128i __B)
+{
+ __v4su rshift;
+ __vector __bool int shmask;
+ const __v4su shmax = { 32, 32, 32, 32 };
+ __v4su result;
+
+#ifdef __LITTLE_ENDIAN__
+ rshift = vec_splat ((__v4su) __B, 0);
+#else
+ rshift = vec_splat ((__v4su) __B, 1);
+#endif
+ shmask = vec_cmplt (rshift, shmax);
+ result = vec_sr ((__v4su) __A, rshift);
+ result = vec_sel ((__v4su) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srl_epi64 (__m128i __A, __m128i __B)
+{
+ __v2du rshift;
+ __vector __bool long long shmask;
+ const __v2du shmax = { 64, 64 };
+ __v2du result;
+
+ rshift = vec_splat ((__v2du) __B, 0);
+ shmask = vec_cmplt (rshift, shmax);
+ result = vec_sr ((__v2du) __A, rshift);
+ result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
+
+ return (__m128i) result;
+}
+#endif
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_and_pd (__m128d __A, __m128d __B)
+{
+ return (vec_and ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_andnot_pd (__m128d __A, __m128d __B)
+{
+ return (vec_andc ((__v2df) __B, (__v2df) __A));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_or_pd (__m128d __A, __m128d __B)
+{
+ return (vec_or ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_xor_pd (__m128d __A, __m128d __B)
+{
+ return (vec_xor ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_and_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)vec_and ((__v2di) __A, (__v2di) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_andnot_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)vec_andc ((__v2di) __B, (__v2di) __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_or_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)vec_or ((__v2di) __A, (__v2di) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_xor_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)vec_xor ((__v2di) __A, (__v2di) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpeq ((__v16qi) __A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpeq ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpeq ((__v4si) __A, (__v4si)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmplt ((__v16qi) __A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmplt ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmplt ((__v4si) __A, (__v4si)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpgt ((__v16qi) __A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpgt ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpgt ((__v4si) __A, (__v4si)__B);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_extract_epi16 (__m128i const __A, int const __N)
+{
+ return (unsigned short) ((__v8hi)__A)[__N & 7];
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
+{
+ __v8hi result = (__v8hi)__A;
+
+ result [(__N & 7)] = __D;
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_max ((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_max ((__v16qu) __A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_min ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_min ((__v16qu) __A, (__v16qu)__B);
+}
+
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum. */
+
+/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movemask_epi8 (__m128i __A)
+{
+ __vector unsigned long long result;
+ static const __vector unsigned char perm_mask =
+ {
+ 0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
+ 0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00
+ };
+
+ result = ((__vector unsigned long long)
+ vec_vbpermq ((__vector unsigned char) __A,
+ (__vector unsigned char) perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+ return result[1];
+#else
+ return result[0];
+#endif
+}
+#endif /* _ARCH_PWR8 */
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mulhi_epu16 (__m128i __A, __m128i __B)
+{
+ __v4su w0, w1;
+ __v16qu xform1 = {
+#ifdef __LITTLE_ENDIAN__
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
+ 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
+ 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+#endif
+ };
+
+ w0 = vec_vmuleuh ((__v8hu)__A, (__v8hu)__B);
+ w1 = vec_vmulouh ((__v8hu)__A, (__v8hu)__B);
+ return (__m128i) vec_perm (w0, w1, xform1);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shufflehi_epi16 (__m128i __A, const int __mask)
+{
+ unsigned long element_selector_98 = __mask & 0x03;
+ unsigned long element_selector_BA = (__mask >> 2) & 0x03;
+ unsigned long element_selector_DC = (__mask >> 4) & 0x03;
+ unsigned long element_selector_FE = (__mask >> 6) & 0x03;
+ static const unsigned short permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+#else
+ 0x0809, 0x0A0B, 0x0C0D, 0x0E0F
+#endif
+ };
+ __v2du pmask =
+#ifdef __LITTLE_ENDIAN__
+ { 0x1716151413121110UL, 0UL};
+#else
+ { 0x1011121314151617UL, 0UL};
+#endif
+ __m64_union t;
+ __v2du a, r;
+
+ t.as_short[0] = permute_selectors[element_selector_98];
+ t.as_short[1] = permute_selectors[element_selector_BA];
+ t.as_short[2] = permute_selectors[element_selector_DC];
+ t.as_short[3] = permute_selectors[element_selector_FE];
+ pmask[1] = t.as_m64;
+ a = (__v2du)__A;
+ r = vec_perm (a, a, (__vector unsigned char)pmask);
+ return (__m128i) r;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shufflelo_epi16 (__m128i __A, const int __mask)
+{
+ unsigned long element_selector_10 = __mask & 0x03;
+ unsigned long element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned short permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x0100, 0x0302, 0x0504, 0x0706
+#else
+ 0x0001, 0x0203, 0x0405, 0x0607
+#endif
+ };
+ __v2du pmask =
+#ifdef __LITTLE_ENDIAN__
+ { 0UL, 0x1f1e1d1c1b1a1918UL};
+#else
+ { 0UL, 0x18191a1b1c1d1e1fUL};
+#endif
+ __m64_union t;
+ __v2du a, r;
+ t.as_short[0] = permute_selectors[element_selector_10];
+ t.as_short[1] = permute_selectors[element_selector_32];
+ t.as_short[2] = permute_selectors[element_selector_54];
+ t.as_short[3] = permute_selectors[element_selector_76];
+ pmask[0] = t.as_m64;
+ a = (__v2du)__A;
+ r = vec_perm (a, a, (__vector unsigned char)pmask);
+ return (__m128i) r;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shuffle_epi32 (__m128i __A, const int __mask)
+{
+ unsigned long element_selector_10 = __mask & 0x03;
+ unsigned long element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned int permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+#else
+ 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
+#endif
+ };
+ __v4su t;
+
+ t[0] = permute_selectors[element_selector_10];
+ t[1] = permute_selectors[element_selector_32];
+ t[2] = permute_selectors[element_selector_54] + 0x10101010;
+ t[3] = permute_selectors[element_selector_76] + 0x10101010;
+ return (__m128i)vec_perm ((__v4si) __A, (__v4si)__A, (__vector unsigned char)t);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
+{
+ __v2du hibit = { 0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
+ __v16qu mask, tmp;
+ __m128i_u *p = (__m128i_u*)__C;
+
+ tmp = (__v16qu)_mm_loadu_si128(p);
+ mask = (__v16qu)vec_cmpgt ((__v16qu)__B, (__v16qu)hibit);
+ tmp = vec_sel (tmp, (__v16qu)__A, mask);
+ _mm_storeu_si128 (p, (__m128i)tmp);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_avg_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_avg ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_avg_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_avg ((__v8hu)__A, (__v8hu)__B);
+}
+
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sad_epu8 (__m128i __A, __m128i __B)
+{
+ __v16qu a, b;
+ __v16qu vmin, vmax, vabsdiff;
+ __v4si vsum;
+ const __v4su zero = { 0, 0, 0, 0 };
+ __v4si result;
+
+ a = (__v16qu) __A;
+ b = (__v16qu) __B;
+ vmin = vec_min (a, b);
+ vmax = vec_max (a, b);
+ vabsdiff = vec_sub (vmax, vmin);
+ /* Sum four groups of bytes into integers. */
+ vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
+ /* Sum across four integers with two integer results. */
+ result = vec_sum2s (vsum, (__vector signed int) zero);
+ /* Rotate the sums into the correct position. */
+#ifdef __LITTLE_ENDIAN__
+ result = vec_sld (result, result, 4);
+#else
+ result = vec_sld (result, result, 6);
+#endif
+ /* Rotate the sums into the correct position. */
+ return (__m128i) result;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_si32 (int *__A, int __B)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ "dcbtstt 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+ *__A = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_si64 (long long int *__A, long long int __B)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ " dcbtstt 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+ *__A = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_si128 (__m128i *__A, __m128i __B)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ "dcbtstt 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+ *__A = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_pd (double *__A, __m128d __B)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ "dcbtstt 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+ *(__m128d*)__A = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_clflush (void const *__A)
+{
+ /* Use the data cache block flush. */
+ __asm__ (
+ "dcbf 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_lfence (void)
+{
+ /* Use light weight sync for load to load ordering. */
+ __atomic_thread_fence (__ATOMIC_RELEASE);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mfence (void)
+{
+ /* Use heavy weight sync for any to any ordering. */
+ __atomic_thread_fence (__ATOMIC_SEQ_CST);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi32_si128 (int __A)
+{
+ return _mm_set_epi32 (0, 0, 0, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64_si128 (long long __A)
+{
+ return __extension__ (__m128i)(__v2di){ __A, 0LL };
+}
+
+/* Microsoft intrinsic. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64x_si128 (long long __A)
+{
+ return __extension__ (__m128i)(__v2di){ __A, 0LL };
+}
+
+/* Casts between various SP, DP, INT vector types. Note that these do no
+ conversion of values, they just change the type. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castpd_ps(__m128d __A)
+{
+ return (__m128) __A;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castpd_si128(__m128d __A)
+{
+ return (__m128i) __A;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castps_pd(__m128 __A)
+{
+ return (__m128d) __A;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castps_si128(__m128 __A)
+{
+ return (__m128i) __A;
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castsi128_ps(__m128i __A)
+{
+ return (__m128) __A;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castsi128_pd(__m128i __A)
+{
+ return (__m128d) __A;
+}
+
+#else
+#include_next <emmintrin.h>
+#endif /* defined(__linux__) && defined(__ppc64__) */
+
+#endif /* EMMINTRIN_H_ */
diff --git a/lib/clang/12.0.0/include/ppc_wrappers/mm_malloc.h b/lib/clang/12.0.0/include/ppc_wrappers/mm_malloc.h
new file mode 100644
index 0000000..24b14c8
--- /dev/null
+++ b/lib/clang/12.0.0/include/ppc_wrappers/mm_malloc.h
@@ -0,0 +1,50 @@
+/*===---- mm_malloc.h - Implementation of _mm_malloc and _mm_free ----------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _MM_MALLOC_H_INCLUDED
+#define _MM_MALLOC_H_INCLUDED
+
+#if defined(__linux__) && defined(__ppc64__)
+
+#include <stdlib.h>
+
+/* We can't depend on <stdlib.h> since the prototype of posix_memalign
+ may not be visible. */
+#ifndef __cplusplus
+extern int posix_memalign (void **, size_t, size_t);
+#else
+extern "C" int posix_memalign (void **, size_t, size_t) throw ();
+#endif
+
+static __inline void *
+_mm_malloc (size_t size, size_t alignment)
+{
+ /* PowerPC64 ELF V2 ABI requires quadword alignment. */
+ size_t vec_align = sizeof (__vector float);
+ void *ptr;
+
+ if (alignment < vec_align)
+ alignment = vec_align;
+ if (posix_memalign (&ptr, alignment, size) == 0)
+ return ptr;
+ else
+ return NULL;
+}
+
+static __inline void
+_mm_free (void * ptr)
+{
+ free (ptr);
+}
+
+#else
+#include_next <mm_malloc.h>
+#endif
+
+#endif /* _MM_MALLOC_H_INCLUDED */
diff --git a/lib/clang/12.0.0/include/ppc_wrappers/mmintrin.h b/lib/clang/12.0.0/include/ppc_wrappers/mmintrin.h
new file mode 100644
index 0000000..c55c447
--- /dev/null
+++ b/lib/clang/12.0.0/include/ppc_wrappers/mmintrin.h
@@ -0,0 +1,1450 @@
+/*===---- mmintrin.h - Implementation of MMX intrinsics on PowerPC ---------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+ explicitly from x86_64 to powerpc64/powerpc64le.
+
+ Since PowerPC target doesn't support native 64-bit vector type, we
+ typedef __m64 to 64-bit unsigned long long in MMX intrinsics, which
+ works well for _si64 and some _pi32 operations.
+
+ For _pi16 and _pi8 operations, it's better to transfer __m64 into
+ 128-bit PowerPC vector first. Power8 introduced direct register
+ move instructions which helps for more efficient implementation.
+
+ It's user's responsibility to determine if the results of such port
+ are acceptable or further changes are needed. Please note that much
+ code using Intel intrinsics CAN BE REWRITTEN in more portable and
+ efficient standard C or GNU C extensions with 64-bit scalar
+ operations, or 128-bit SSE/Altivec operations, which are more
+ recommended. */
+#error \
+ "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef _MMINTRIN_H_INCLUDED
+#define _MMINTRIN_H_INCLUDED
+
+#if defined(__linux__) && defined(__ppc64__)
+
+#include <altivec.h>
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+typedef __attribute__((__aligned__(8))) unsigned long long __m64;
+
+typedef __attribute__((__aligned__(8))) union {
+ __m64 as_m64;
+ char as_char[8];
+ signed char as_signed_char[8];
+ short as_short[4];
+ int as_int[2];
+ long long as_long_long;
+ float as_float[2];
+ double as_double;
+} __m64_union;
+
+/* Empty the multimedia state. */
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_empty(void) {
+ /* nothing to do on PowerPC. */
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_empty(void) {
+ /* nothing to do on PowerPC. */
+}
+
+/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi32_si64(int __i) {
+ return (__m64)(unsigned int)__i;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_from_int(int __i) {
+ return _mm_cvtsi32_si64(__i);
+}
+
+/* Convert the lower 32 bits of the __m64 object into an integer. */
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_si32(__m64 __i) {
+ return ((int)__i);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_to_int(__m64 __i) {
+ return _mm_cvtsi64_si32(__i);
+}
+
+/* Convert I to a __m64 object. */
+
+/* Intel intrinsic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_from_int64(long long __i) {
+ return (__m64)__i;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_m64(long long __i) {
+ return (__m64)__i;
+}
+
+/* Microsoft intrinsic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64x_si64(long long __i) {
+ return (__m64)__i;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pi64x(long long __i) {
+ return (__m64)__i;
+}
+
+/* Convert the __m64 object to a 64bit integer. */
+
+/* Intel intrinsic. */
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_to_int64(__m64 __i) {
+ return (long long)__i;
+}
+
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtm64_si64(__m64 __i) {
+ return (long long)__i;
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_si64x(__m64 __i) {
+ return (long long)__i;
+}
+
+#ifdef _ARCH_PWR8
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with signed saturation. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short vm1;
+ __vector signed char vresult;
+
+ vm1 = (__vector signed short)(__vector unsigned long long)
+#ifdef __LITTLE_ENDIAN__
+ {__m1, __m2};
+#else
+ {__m2, __m1};
+#endif
+ vresult = vec_packs(vm1, vm1);
+ return (__m64)((__vector long long)vresult)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_packsswb(__m64 __m1, __m64 __m2) {
+ return _mm_packs_pi16(__m1, __m2);
+}
+
+/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with signed saturation. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_pi32(__m64 __m1, __m64 __m2) {
+ __vector signed int vm1;
+ __vector signed short vresult;
+
+ vm1 = (__vector signed int)(__vector unsigned long long)
+#ifdef __LITTLE_ENDIAN__
+ {__m1, __m2};
+#else
+ {__m2, __m1};
+#endif
+ vresult = vec_packs(vm1, vm1);
+ return (__m64)((__vector long long)vresult)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_packssdw(__m64 __m1, __m64 __m2) {
+ return _mm_packs_pi32(__m1, __m2);
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with unsigned saturation. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_pu16(__m64 __m1, __m64 __m2) {
+ __vector unsigned char r;
+ __vector signed short vm1 = (__vector signed short)(__vector long long)
+#ifdef __LITTLE_ENDIAN__
+ {__m1, __m2};
+#else
+ {__m2, __m1};
+#endif
+ const __vector signed short __zero = {0};
+ __vector __bool short __select = vec_cmplt(vm1, __zero);
+ r = vec_packs((__vector unsigned short)vm1, (__vector unsigned short)vm1);
+ __vector __bool char packsel = vec_pack(__select, __select);
+ r = vec_sel(r, (const __vector unsigned char)__zero, packsel);
+ return (__m64)((__vector long long)r)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_packuswb(__m64 __m1, __m64 __m2) {
+ return _mm_packs_pu16(__m1, __m2);
+}
+#endif /* end ARCH_PWR8 */
+
+/* Interleave the four 8-bit values from the high half of M1 with the four
+ 8-bit values from the high half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats(__m1);
+ b = (__vector unsigned char)vec_splats(__m2);
+ c = vec_mergel(a, b);
+ return (__m64)((__vector long long)c)[1];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = m1.as_char[4];
+ res.as_char[1] = m2.as_char[4];
+ res.as_char[2] = m1.as_char[5];
+ res.as_char[3] = m2.as_char[5];
+ res.as_char[4] = m1.as_char[6];
+ res.as_char[5] = m2.as_char[6];
+ res.as_char[6] = m1.as_char[7];
+ res.as_char[7] = m2.as_char[7];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpckhbw(__m64 __m1, __m64 __m2) {
+ return _mm_unpackhi_pi8(__m1, __m2);
+}
+
+/* Interleave the two 16-bit values from the high half of M1 with the two
+ 16-bit values from the high half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) {
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = m1.as_short[2];
+ res.as_short[1] = m2.as_short[2];
+ res.as_short[2] = m1.as_short[3];
+ res.as_short[3] = m2.as_short[3];
+
+ return (__m64)res.as_m64;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpckhwd(__m64 __m1, __m64 __m2) {
+ return _mm_unpackhi_pi16(__m1, __m2);
+}
+/* Interleave the 32-bit value from the high half of M1 with the 32-bit
+ value from the high half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) {
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = m1.as_int[1];
+ res.as_int[1] = m2.as_int[1];
+
+ return (__m64)res.as_m64;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpckhdq(__m64 __m1, __m64 __m2) {
+ return _mm_unpackhi_pi32(__m1, __m2);
+}
+/* Interleave the four 8-bit values from the low half of M1 with the four
+ 8-bit values from the low half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats(__m1);
+ b = (__vector unsigned char)vec_splats(__m2);
+ c = vec_mergel(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = m1.as_char[0];
+ res.as_char[1] = m2.as_char[0];
+ res.as_char[2] = m1.as_char[1];
+ res.as_char[3] = m2.as_char[1];
+ res.as_char[4] = m1.as_char[2];
+ res.as_char[5] = m2.as_char[2];
+ res.as_char[6] = m1.as_char[3];
+ res.as_char[7] = m2.as_char[3];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpcklbw(__m64 __m1, __m64 __m2) {
+ return _mm_unpacklo_pi8(__m1, __m2);
+}
+/* Interleave the two 16-bit values from the low half of M1 with the two
+ 16-bit values from the low half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) {
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = m1.as_short[0];
+ res.as_short[1] = m2.as_short[0];
+ res.as_short[2] = m1.as_short[1];
+ res.as_short[3] = m2.as_short[1];
+
+ return (__m64)res.as_m64;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpcklwd(__m64 __m1, __m64 __m2) {
+ return _mm_unpacklo_pi16(__m1, __m2);
+}
+
+/* Interleave the 32-bit value from the low half of M1 with the 32-bit
+ value from the low half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) {
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = m1.as_int[0];
+ res.as_int[1] = m2.as_int[0];
+
+ return (__m64)res.as_m64;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpckldq(__m64 __m1, __m64 __m2) {
+ return _mm_unpacklo_pi32(__m1, __m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = vec_add(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = m1.as_char[0] + m2.as_char[0];
+ res.as_char[1] = m1.as_char[1] + m2.as_char[1];
+ res.as_char[2] = m1.as_char[2] + m2.as_char[2];
+ res.as_char[3] = m1.as_char[3] + m2.as_char[3];
+ res.as_char[4] = m1.as_char[4] + m2.as_char[4];
+ res.as_char[5] = m1.as_char[5] + m2.as_char[5];
+ res.as_char[6] = m1.as_char[6] + m2.as_char[6];
+ res.as_char[7] = m1.as_char[7] + m2.as_char[7];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddb(__m64 __m1, __m64 __m2) {
+ return _mm_add_pi8(__m1, __m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_pi16(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_add(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = m1.as_short[0] + m2.as_short[0];
+ res.as_short[1] = m1.as_short[1] + m2.as_short[1];
+ res.as_short[2] = m1.as_short[2] + m2.as_short[2];
+ res.as_short[3] = m1.as_short[3] + m2.as_short[3];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddw(__m64 __m1, __m64 __m2) {
+ return _mm_add_pi16(__m1, __m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_pi32(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR9
+ __vector signed int a, b, c;
+
+ a = (__vector signed int)vec_splats(__m1);
+ b = (__vector signed int)vec_splats(__m2);
+ c = vec_add(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = m1.as_int[0] + m2.as_int[0];
+ res.as_int[1] = m1.as_int[1] + m2.as_int[1];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddd(__m64 __m1, __m64 __m2) {
+ return _mm_add_pi32(__m1, __m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = vec_sub(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = m1.as_char[0] - m2.as_char[0];
+ res.as_char[1] = m1.as_char[1] - m2.as_char[1];
+ res.as_char[2] = m1.as_char[2] - m2.as_char[2];
+ res.as_char[3] = m1.as_char[3] - m2.as_char[3];
+ res.as_char[4] = m1.as_char[4] - m2.as_char[4];
+ res.as_char[5] = m1.as_char[5] - m2.as_char[5];
+ res.as_char[6] = m1.as_char[6] - m2.as_char[6];
+ res.as_char[7] = m1.as_char[7] - m2.as_char[7];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubb(__m64 __m1, __m64 __m2) {
+ return _mm_sub_pi8(__m1, __m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_pi16(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_sub(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = m1.as_short[0] - m2.as_short[0];
+ res.as_short[1] = m1.as_short[1] - m2.as_short[1];
+ res.as_short[2] = m1.as_short[2] - m2.as_short[2];
+ res.as_short[3] = m1.as_short[3] - m2.as_short[3];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubw(__m64 __m1, __m64 __m2) {
+ return _mm_sub_pi16(__m1, __m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_pi32(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR9
+ __vector signed int a, b, c;
+
+ a = (__vector signed int)vec_splats(__m1);
+ b = (__vector signed int)vec_splats(__m2);
+ c = vec_sub(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = m1.as_int[0] - m2.as_int[0];
+ res.as_int[1] = m1.as_int[1] - m2.as_int[1];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubd(__m64 __m1, __m64 __m2) {
+ return _mm_sub_pi32(__m1, __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 + __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 - __m2);
+}
+
+/* Shift the 64-bit value in M left by COUNT. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_si64(__m64 __m, __m64 __count) {
+ return (__m << __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psllq(__m64 __m, __m64 __count) {
+ return _mm_sll_si64(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_si64(__m64 __m, const int __count) {
+ return (__m << __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psllqi(__m64 __m, const int __count) {
+ return _mm_slli_si64(__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_si64(__m64 __m, __m64 __count) {
+ return (__m >> __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrlq(__m64 __m, __m64 __count) {
+ return _mm_srl_si64(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_si64(__m64 __m, const int __count) {
+ return (__m >> __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrlqi(__m64 __m, const int __count) {
+ return _mm_srli_si64(__m, __count);
+}
+
+/* Bit-wise AND the 64-bit values in M1 and M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_and_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 & __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pand(__m64 __m1, __m64 __m2) {
+ return _mm_and_si64(__m1, __m2);
+}
+
+/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
+ 64-bit value in M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_andnot_si64(__m64 __m1, __m64 __m2) {
+ return (~__m1 & __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pandn(__m64 __m1, __m64 __m2) {
+ return _mm_andnot_si64(__m1, __m2);
+}
+
+/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_or_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 | __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_por(__m64 __m1, __m64 __m2) {
+ return _mm_or_si64(__m1, __m2);
+}
+
+/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_xor_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 ^ __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pxor(__m64 __m1, __m64 __m2) {
+ return _mm_xor_si64(__m1, __m2);
+}
+
+/* Creates a 64-bit zero. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setzero_si64(void) {
+ return (__m64)0;
+}
+
+/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
+ test is true and zero if false. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) {
+#if defined(_ARCH_PWR6) && defined(__powerpc64__)
+ __m64 res;
+ __asm__("cmpb %0,%1,%2;\n" : "=r"(res) : "r"(__m1), "r"(__m2) :);
+ return (res);
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = (m1.as_char[0] == m2.as_char[0]) ? -1 : 0;
+ res.as_char[1] = (m1.as_char[1] == m2.as_char[1]) ? -1 : 0;
+ res.as_char[2] = (m1.as_char[2] == m2.as_char[2]) ? -1 : 0;
+ res.as_char[3] = (m1.as_char[3] == m2.as_char[3]) ? -1 : 0;
+ res.as_char[4] = (m1.as_char[4] == m2.as_char[4]) ? -1 : 0;
+ res.as_char[5] = (m1.as_char[5] == m2.as_char[5]) ? -1 : 0;
+ res.as_char[6] = (m1.as_char[6] == m2.as_char[6]) ? -1 : 0;
+ res.as_char[7] = (m1.as_char[7] == m2.as_char[7]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpeqb(__m64 __m1, __m64 __m2) {
+ return _mm_cmpeq_pi8(__m1, __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = (__vector signed char)vec_cmpgt(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = (m1.as_char[0] > m2.as_char[0]) ? -1 : 0;
+ res.as_char[1] = (m1.as_char[1] > m2.as_char[1]) ? -1 : 0;
+ res.as_char[2] = (m1.as_char[2] > m2.as_char[2]) ? -1 : 0;
+ res.as_char[3] = (m1.as_char[3] > m2.as_char[3]) ? -1 : 0;
+ res.as_char[4] = (m1.as_char[4] > m2.as_char[4]) ? -1 : 0;
+ res.as_char[5] = (m1.as_char[5] > m2.as_char[5]) ? -1 : 0;
+ res.as_char[6] = (m1.as_char[6] > m2.as_char[6]) ? -1 : 0;
+ res.as_char[7] = (m1.as_char[7] > m2.as_char[7]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpgtb(__m64 __m1, __m64 __m2) {
+ return _mm_cmpgt_pi8(__m1, __m2);
+}
+
+/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
+ the test is true and zero if false. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = (__vector signed short)vec_cmpeq(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = (m1.as_short[0] == m2.as_short[0]) ? -1 : 0;
+ res.as_short[1] = (m1.as_short[1] == m2.as_short[1]) ? -1 : 0;
+ res.as_short[2] = (m1.as_short[2] == m2.as_short[2]) ? -1 : 0;
+ res.as_short[3] = (m1.as_short[3] == m2.as_short[3]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpeqw(__m64 __m1, __m64 __m2) {
+ return _mm_cmpeq_pi16(__m1, __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = (__vector signed short)vec_cmpgt(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = (m1.as_short[0] > m2.as_short[0]) ? -1 : 0;
+ res.as_short[1] = (m1.as_short[1] > m2.as_short[1]) ? -1 : 0;
+ res.as_short[2] = (m1.as_short[2] > m2.as_short[2]) ? -1 : 0;
+ res.as_short[3] = (m1.as_short[3] > m2.as_short[3]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpgtw(__m64 __m1, __m64 __m2) {
+ return _mm_cmpgt_pi16(__m1, __m2);
+}
+
+/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
+ the test is true and zero if false. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR9
+ __vector signed int a, b, c;
+
+ a = (__vector signed int)vec_splats(__m1);
+ b = (__vector signed int)vec_splats(__m2);
+ c = (__vector signed int)vec_cmpeq(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = (m1.as_int[0] == m2.as_int[0]) ? -1 : 0;
+ res.as_int[1] = (m1.as_int[1] == m2.as_int[1]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpeqd(__m64 __m1, __m64 __m2) {
+ return _mm_cmpeq_pi32(__m1, __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR9
+ __vector signed int a, b, c;
+
+ a = (__vector signed int)vec_splats(__m1);
+ b = (__vector signed int)vec_splats(__m2);
+ c = (__vector signed int)vec_cmpgt(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = (m1.as_int[0] > m2.as_int[0]) ? -1 : 0;
+ res.as_int[1] = (m1.as_int[1] > m2.as_int[1]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpgtd(__m64 __m1, __m64 __m2) {
+ return _mm_cmpgt_pi32(__m1, __m2);
+}
+
+#if _ARCH_PWR8
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
+ saturated arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_pi8(__m64 __m1, __m64 __m2) {
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = vec_adds(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddsb(__m64 __m1, __m64 __m2) {
+ return _mm_adds_pi8(__m1, __m2);
+}
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
+ saturated arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_adds(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddsw(__m64 __m1, __m64 __m2) {
+ return _mm_adds_pi16(__m1, __m2);
+}
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
+ saturated arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_pu8(__m64 __m1, __m64 __m2) {
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats(__m1);
+ b = (__vector unsigned char)vec_splats(__m2);
+ c = vec_adds(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddusb(__m64 __m1, __m64 __m2) {
+ return _mm_adds_pu8(__m1, __m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
+ saturated arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_pu16(__m64 __m1, __m64 __m2) {
+ __vector unsigned short a, b, c;
+
+ a = (__vector unsigned short)vec_splats(__m1);
+ b = (__vector unsigned short)vec_splats(__m2);
+ c = vec_adds(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddusw(__m64 __m1, __m64 __m2) {
+ return _mm_adds_pu16(__m1, __m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
+ saturating arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_pi8(__m64 __m1, __m64 __m2) {
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = vec_subs(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubsb(__m64 __m1, __m64 __m2) {
+ return _mm_subs_pi8(__m1, __m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ signed saturating arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_subs(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubsw(__m64 __m1, __m64 __m2) {
+ return _mm_subs_pi16(__m1, __m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
+ unsigned saturating arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_pu8(__m64 __m1, __m64 __m2) {
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats(__m1);
+ b = (__vector unsigned char)vec_splats(__m2);
+ c = vec_subs(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubusb(__m64 __m1, __m64 __m2) {
+ return _mm_subs_pu8(__m1, __m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ unsigned saturating arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_pu16(__m64 __m1, __m64 __m2) {
+ __vector unsigned short a, b, c;
+
+ a = (__vector unsigned short)vec_splats(__m1);
+ b = (__vector unsigned short)vec_splats(__m2);
+ c = vec_subs(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubusw(__m64 __m1, __m64 __m2) {
+ return _mm_subs_pu16(__m1, __m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_madd_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b;
+ __vector signed int c;
+ __vector signed int zero = {0, 0, 0, 0};
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_vmsumshm(a, b, zero);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmaddwd(__m64 __m1, __m64 __m2) {
+ return _mm_madd_pi16(__m1, __m2);
+}
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhi_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b;
+ __vector signed short c;
+ __vector signed int w0, w1;
+ __vector unsigned char xform1 = {
+#ifdef __LITTLE_ENDIAN__
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+ 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00,
+ 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
+#endif
+ };
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+
+ w0 = vec_vmulesh(a, b);
+ w1 = vec_vmulosh(a, b);
+ c = (__vector signed short)vec_perm(w0, w1, xform1);
+
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmulhw(__m64 __m1, __m64 __m2) {
+ return _mm_mulhi_pi16(__m1, __m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
+ the low 16 bits of the results. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mullo_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = a * b;
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmullw(__m64 __m1, __m64 __m2) {
+ return _mm_mullo_pi16(__m1, __m2);
+}
+
+/* Shift four 16-bit values in M left by COUNT. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_pi16(__m64 __m, __m64 __count) {
+ __vector signed short m, r;
+ __vector unsigned short c;
+
+ if (__count <= 15) {
+ m = (__vector signed short)vec_splats(__m);
+ c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ r = vec_sl(m, (__vector unsigned short)c);
+ return (__m64)((__vector long long)r)[0];
+ } else
+ return (0);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psllw(__m64 __m, __m64 __count) {
+ return _mm_sll_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_pi16(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sll_pi16. */
+ return _mm_sll_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psllwi(__m64 __m, int __count) {
+ return _mm_slli_pi16(__m, __count);
+}
+
+/* Shift two 32-bit values in M left by COUNT. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_pi32(__m64 __m, __m64 __count) {
+ __m64_union m, res;
+
+ m.as_m64 = __m;
+
+ res.as_int[0] = m.as_int[0] << __count;
+ res.as_int[1] = m.as_int[1] << __count;
+ return (res.as_m64);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pslld(__m64 __m, __m64 __count) {
+ return _mm_sll_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_pi32(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sll_pi32. */
+ return _mm_sll_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pslldi(__m64 __m, int __count) {
+ return _mm_slli_pi32(__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sra_pi16(__m64 __m, __m64 __count) {
+ __vector signed short m, r;
+ __vector unsigned short c;
+
+ if (__count <= 15) {
+ m = (__vector signed short)vec_splats(__m);
+ c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ r = vec_sra(m, (__vector unsigned short)c);
+ return (__m64)((__vector long long)r)[0];
+ } else
+ return (0);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psraw(__m64 __m, __m64 __count) {
+ return _mm_sra_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srai_pi16(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sra_pi32. */
+ return _mm_sra_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrawi(__m64 __m, int __count) {
+ return _mm_srai_pi16(__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sra_pi32(__m64 __m, __m64 __count) {
+ __m64_union m, res;
+
+ m.as_m64 = __m;
+
+ res.as_int[0] = m.as_int[0] >> __count;
+ res.as_int[1] = m.as_int[1] >> __count;
+ return (res.as_m64);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrad(__m64 __m, __m64 __count) {
+ return _mm_sra_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srai_pi32(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sra_pi32. */
+ return _mm_sra_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psradi(__m64 __m, int __count) {
+ return _mm_srai_pi32(__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_pi16(__m64 __m, __m64 __count) {
+ __vector unsigned short m, r;
+ __vector unsigned short c;
+
+ if (__count <= 15) {
+ m = (__vector unsigned short)vec_splats(__m);
+ c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ r = vec_sr(m, (__vector unsigned short)c);
+ return (__m64)((__vector long long)r)[0];
+ } else
+ return (0);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrlw(__m64 __m, __m64 __count) {
+ return _mm_srl_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_pi16(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sra_pi32. */
+ return _mm_srl_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrlwi(__m64 __m, int __count) {
+ return _mm_srli_pi16(__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_pi32(__m64 __m, __m64 __count) {
+ __m64_union m, res;
+
+ m.as_m64 = __m;
+
+ res.as_int[0] = (unsigned int)m.as_int[0] >> __count;
+ res.as_int[1] = (unsigned int)m.as_int[1] >> __count;
+ return (res.as_m64);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrld(__m64 __m, __m64 __count) {
+ return _mm_srl_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_pi32(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_srl_pi32. */
+ return _mm_srl_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrldi(__m64 __m, int __count) {
+ return _mm_srli_pi32(__m, __count);
+}
+#endif /* _ARCH_PWR8 */
+
+/* Creates a vector of two 32-bit values; I0 is least significant. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pi32(int __i1, int __i0) {
+ __m64_union res;
+
+ res.as_int[0] = __i0;
+ res.as_int[1] = __i1;
+ return (res.as_m64);
+}
+
+/* Creates a vector of four 16-bit values; W0 is least significant. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pi16(short __w3, short __w2, short __w1, short __w0) {
+ __m64_union res;
+
+ res.as_short[0] = __w0;
+ res.as_short[1] = __w1;
+ res.as_short[2] = __w2;
+ res.as_short[3] = __w3;
+ return (res.as_m64);
+}
+
+/* Creates a vector of eight 8-bit values; B0 is least significant. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3,
+ char __b2, char __b1, char __b0) {
+ __m64_union res;
+
+ res.as_char[0] = __b0;
+ res.as_char[1] = __b1;
+ res.as_char[2] = __b2;
+ res.as_char[3] = __b3;
+ res.as_char[4] = __b4;
+ res.as_char[5] = __b5;
+ res.as_char[6] = __b6;
+ res.as_char[7] = __b7;
+ return (res.as_m64);
+}
+
+/* Similar, but with the arguments in reverse order. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_pi32(int __i0, int __i1) {
+ __m64_union res;
+
+ res.as_int[0] = __i0;
+ res.as_int[1] = __i1;
+ return (res.as_m64);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) {
+ return _mm_set_pi16(__w3, __w2, __w1, __w0);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4,
+ char __b5, char __b6, char __b7) {
+ return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+/* Creates a vector of two 32-bit values, both elements containing I. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_pi32(int __i) {
+ __m64_union res;
+
+ res.as_int[0] = __i;
+ res.as_int[1] = __i;
+ return (res.as_m64);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing W. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_pi16(short __w) {
+#if _ARCH_PWR9
+ __vector signed short w;
+
+ w = (__vector signed short)vec_splats(__w);
+ return (__m64)((__vector long long)w)[0];
+#else
+ __m64_union res;
+
+ res.as_short[0] = __w;
+ res.as_short[1] = __w;
+ res.as_short[2] = __w;
+ res.as_short[3] = __w;
+ return (res.as_m64);
+#endif
+}
+
+/* Creates a vector of eight 8-bit values, all elements containing B. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_pi8(signed char __b) {
+#if _ARCH_PWR8
+ __vector signed char b;
+
+ b = (__vector signed char)vec_splats(__b);
+ return (__m64)((__vector long long)b)[0];
+#else
+ __m64_union res;
+
+ res.as_char[0] = __b;
+ res.as_char[1] = __b;
+ res.as_char[2] = __b;
+ res.as_char[3] = __b;
+ res.as_char[4] = __b;
+ res.as_char[5] = __b;
+ res.as_char[6] = __b;
+ res.as_char[7] = __b;
+ return (res.as_m64);
+#endif
+}
+
+#else
+#include_next <mmintrin.h>
+#endif /* defined(__linux__) && defined(__ppc64__) */
+
+#endif /* _MMINTRIN_H_INCLUDED */
diff --git a/lib/clang/12.0.0/include/ppc_wrappers/pmmintrin.h b/lib/clang/12.0.0/include/ppc_wrappers/pmmintrin.h
new file mode 100644
index 0000000..6d93383
--- /dev/null
+++ b/lib/clang/12.0.0/include/ppc_wrappers/pmmintrin.h
@@ -0,0 +1,150 @@
+/*===---- pmmintrin.h - Implementation of SSE3 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+ makes explicit use of Intel intrinsics to powerpc64le.
+ It is the user's responsibility to determine if the results are
+ acceptable and make additional changes as necessary.
+ Note that much code that uses Intel intrinsics can be rewritten in
+ standard C or GNU C extensions, which are more portable and better
+ optimized across multiple targets.
+
+ In the specific case of X86 SSE3 intrinsics, the PowerPC VMX/VSX ISA
+ is a good match for most SIMD operations. However the Horizontal
+ add/sub requires the data pairs be permuted into a separate
+ registers with vertical even/odd alignment for the operation.
+ And the addsub operation requires the sign of only the even numbered
+ elements be flipped (xored with -0.0).
+ For larger blocks of code using these intrinsic implementations,
+ the compiler be should be able to schedule instructions to avoid
+ additional latency.
+
+ In the specific case of the monitor and mwait instructions there are
+ no direct equivalent in the PowerISA at this time. So those
+ intrinsics are not implemented. */
+#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this warning."
+#endif
+
+#ifndef PMMINTRIN_H_
+#define PMMINTRIN_H_
+
+#if defined(__linux__) && defined(__ppc64__)
+
+/* We need definitions from the SSE2 and SSE header files*/
+#include <emmintrin.h>
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_addsub_ps (__m128 __X, __m128 __Y)
+{
+ const __v4sf even_n0 = {-0.0, 0.0, -0.0, 0.0};
+ __v4sf even_neg_Y = vec_xor(__Y, even_n0);
+ return (__m128) vec_add (__X, even_neg_Y);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_addsub_pd (__m128d __X, __m128d __Y)
+{
+ const __v2df even_n0 = {-0.0, 0.0};
+ __v2df even_neg_Y = vec_xor(__Y, even_n0);
+ return (__m128d) vec_add (__X, even_neg_Y);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hadd_ps (__m128 __X, __m128 __Y)
+{
+ __vector unsigned char xform2 = {
+ 0x00, 0x01, 0x02, 0x03,
+ 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x18, 0x19, 0x1A, 0x1B
+ };
+ __vector unsigned char xform1 = {
+ 0x04, 0x05, 0x06, 0x07,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17,
+ 0x1C, 0x1D, 0x1E, 0x1F
+ };
+ return (__m128) vec_add (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
+ vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hsub_ps (__m128 __X, __m128 __Y)
+{
+ __vector unsigned char xform2 = {
+ 0x00, 0x01, 0x02, 0x03,
+ 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x18, 0x19, 0x1A, 0x1B
+ };
+ __vector unsigned char xform1 = {
+ 0x04, 0x05, 0x06, 0x07,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17,
+ 0x1C, 0x1D, 0x1E, 0x1F
+ };
+ return (__m128) vec_sub (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
+ vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hadd_pd (__m128d __X, __m128d __Y)
+{
+ return (__m128d) vec_add (vec_mergeh ((__v2df) __X, (__v2df)__Y),
+ vec_mergel ((__v2df) __X, (__v2df)__Y));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hsub_pd (__m128d __X, __m128d __Y)
+{
+ return (__m128d) vec_sub (vec_mergeh ((__v2df) __X, (__v2df)__Y),
+ vec_mergel ((__v2df) __X, (__v2df)__Y));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movehdup_ps (__m128 __X)
+{
+ return (__m128)vec_mergeo ((__v4su)__X, (__v4su)__X);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_moveldup_ps (__m128 __X)
+{
+ return (__m128)vec_mergee ((__v4su)__X, (__v4su)__X);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loaddup_pd (double const *__P)
+{
+ return (__m128d) vec_splats (*__P);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movedup_pd (__m128d __X)
+{
+ return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0));
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_lddqu_si128 (__m128i const *__P)
+{
+ return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
+}
+
+/* POWER8 / POWER9 have no equivalent for _mm_monitor nor _mm_wait. */
+
+#else
+#include_next <pmmintrin.h>
+#endif /* defined(__linux__) && defined(__ppc64__) */
+
+#endif /* PMMINTRIN_H_ */
diff --git a/lib/clang/12.0.0/include/ppc_wrappers/smmintrin.h b/lib/clang/12.0.0/include/ppc_wrappers/smmintrin.h
new file mode 100644
index 0000000..64f0c76
--- /dev/null
+++ b/lib/clang/12.0.0/include/ppc_wrappers/smmintrin.h
@@ -0,0 +1,109 @@
+/*===---- smmintrin.h - Implementation of SSE4 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0.
+
+ NOTE: This is NOT a complete implementation of the SSE4 intrinsics! */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+ makes explicit use of Intel intrinsics to powerp64/powerpc64le.
+
+ It is the user's responsibility to determine if the results are
+ acceptable and make additional changes as necessary.
+
+ Note that much code that uses Intel intrinsics can be rewritten in
+ standard C or GNU C extensions, which are more portable and better
+ optimized across multiple targets. */
+#error \
+ "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef SMMINTRIN_H_
+#define SMMINTRIN_H_
+
+#if defined(__linux__) && defined(__ppc64__)
+
+#include <altivec.h>
+#include <emmintrin.h>
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_extract_epi8(__m128i __X, const int __N) {
+ return (unsigned char)((__v16qi)__X)[__N & 15];
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_extract_epi32(__m128i __X, const int __N) {
+ return ((__v4si)__X)[__N & 3];
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_extract_epi64(__m128i __X, const int __N) {
+ return ((__v2di)__X)[__N & 1];
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_extract_ps(__m128 __X, const int __N) {
+ return ((__v4si)__X)[__N & 3];
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) {
+ __v16qi __charmask = vec_splats((signed char)__imm8);
+ __charmask = vec_gb(__charmask);
+ __v8hu __shortmask = (__v8hu)vec_unpackh(__charmask);
+#ifdef __BIG_ENDIAN__
+ __shortmask = vec_reve(__shortmask);
+#endif
+ return (__m128i)vec_sel((__v8hu)__A, (__v8hu)__B, __shortmask);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blendv_epi8(__m128i __A, __m128i __B, __m128i __mask) {
+ const __v16qu __seven = vec_splats((unsigned char)0x07);
+ __v16qu __lmask = vec_sra((__v16qu)__mask, __seven);
+ return (__m128i)vec_sel((__v16qu)__A, (__v16qu)__B, __lmask);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
+ __v16qi result = (__v16qi)__A;
+ result[__N & 0xf] = __D;
+ return (__m128i)result;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
+ __v4si result = (__v4si)__A;
+ result[__N & 3] = __D;
+ return (__m128i)result;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
+ __v2di result = (__v2di)__A;
+ result[__N & 1] = __D;
+ return (__m128i)result;
+}
+
+#else
+#include_next <smmintrin.h>
+#endif /* defined(__linux__) && defined(__ppc64__) */
+
+#endif /* _SMMINTRIN_H_ */
diff --git a/lib/clang/12.0.0/include/ppc_wrappers/tmmintrin.h b/lib/clang/12.0.0/include/ppc_wrappers/tmmintrin.h
new file mode 100644
index 0000000..b5a935d
--- /dev/null
+++ b/lib/clang/12.0.0/include/ppc_wrappers/tmmintrin.h
@@ -0,0 +1,495 @@
+/*===---- tmmintrin.h - Implementation of SSSE3 intrinsics on PowerPC ------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+ makes explicit use of Intel intrinsics to powerpc64le.
+
+ It is the user's responsibility to determine if the results are
+ acceptable and make additional changes as necessary.
+
+ Note that much code that uses Intel intrinsics can be rewritten in
+ standard C or GNU C extensions, which are more portable and better
+ optimized across multiple targets. */
+#endif
+
+#ifndef TMMINTRIN_H_
+#define TMMINTRIN_H_
+
+#if defined(__linux__) && defined(__ppc64__)
+
+#include <altivec.h>
+
+/* We need definitions from the SSE header files. */
+#include <pmmintrin.h>
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_abs_epi16 (__m128i __A)
+{
+ return (__m128i) vec_abs ((__v8hi) __A);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_abs_epi32 (__m128i __A)
+{
+ return (__m128i) vec_abs ((__v4si) __A);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_abs_epi8 (__m128i __A)
+{
+ return (__m128i) vec_abs ((__v16qi) __A);
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_abs_pi16 (__m64 __A)
+{
+ __v8hi __B = (__v8hi) (__v2du) { __A, __A };
+ return (__m64) ((__v2du) vec_abs (__B))[0];
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_abs_pi32 (__m64 __A)
+{
+ __v4si __B = (__v4si) (__v2du) { __A, __A };
+ return (__m64) ((__v2du) vec_abs (__B))[0];
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_abs_pi8 (__m64 __A)
+{
+ __v16qi __B = (__v16qi) (__v2du) { __A, __A };
+ return (__m64) ((__v2du) vec_abs (__B))[0];
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_alignr_epi8 (__m128i __A, __m128i __B, const unsigned int __count)
+{
+ if (__builtin_constant_p (__count) && __count < 16)
+ {
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i) vec_reve ((__v16qu) __A);
+ __B = (__m128i) vec_reve ((__v16qu) __B);
+#endif
+ __A = (__m128i) vec_sld ((__v16qu) __B, (__v16qu) __A, __count);
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i) vec_reve ((__v16qu) __A);
+#endif
+ return __A;
+ }
+
+ if (__count == 0)
+ return __B;
+
+ if (__count >= 16)
+ {
+ if (__count >= 32)
+ {
+ const __v16qu zero = { 0 };
+ return (__m128i) zero;
+ }
+ else
+ {
+ const __v16qu __shift =
+ vec_splats ((unsigned char) ((__count - 16) * 8));
+#ifdef __LITTLE_ENDIAN__
+ return (__m128i) vec_sro ((__v16qu) __A, __shift);
+#else
+ return (__m128i) vec_slo ((__v16qu) __A, __shift);
+#endif
+ }
+ }
+ else
+ {
+ const __v16qu __shiftA =
+ vec_splats ((unsigned char) ((16 - __count) * 8));
+ const __v16qu __shiftB = vec_splats ((unsigned char) (__count * 8));
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i) vec_slo ((__v16qu) __A, __shiftA);
+ __B = (__m128i) vec_sro ((__v16qu) __B, __shiftB);
+#else
+ __A = (__m128i) vec_sro ((__v16qu) __A, __shiftA);
+ __B = (__m128i) vec_slo ((__v16qu) __B, __shiftB);
+#endif
+ return (__m128i) vec_or ((__v16qu) __A, (__v16qu) __B);
+ }
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_alignr_pi8 (__m64 __A, __m64 __B, unsigned int __count)
+{
+ if (__count < 16)
+ {
+ __v2du __C = { __B, __A };
+#ifdef __LITTLE_ENDIAN__
+ const __v4su __shift = { __count << 3, 0, 0, 0 };
+ __C = (__v2du) vec_sro ((__v16qu) __C, (__v16qu) __shift);
+#else
+ const __v4su __shift = { 0, 0, 0, __count << 3 };
+ __C = (__v2du) vec_slo ((__v16qu) __C, (__v16qu) __shift);
+#endif
+ return (__m64) __C[0];
+ }
+ else
+ {
+ const __m64 __zero = { 0 };
+ return __zero;
+ }
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hadd_epi16 (__m128i __A, __m128i __B)
+{
+ const __v16qu __P =
+ { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
+ const __v16qu __Q =
+ { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
+ __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
+ __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
+ return (__m128i) vec_add (__C, __D);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hadd_epi32 (__m128i __A, __m128i __B)
+{
+ const __v16qu __P =
+ { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
+ const __v16qu __Q =
+ { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
+ __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
+ __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
+ return (__m128i) vec_add (__C, __D);
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hadd_pi16 (__m64 __A, __m64 __B)
+{
+ __v8hi __C = (__v8hi) (__v2du) { __A, __B };
+ const __v16qu __P =
+ { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 };
+ const __v16qu __Q =
+ { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 };
+ __v8hi __D = vec_perm (__C, __C, __Q);
+ __C = vec_perm (__C, __C, __P);
+ __C = vec_add (__C, __D);
+ return (__m64) ((__v2du) __C)[1];
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hadd_pi32 (__m64 __A, __m64 __B)
+{
+ __v4si __C = (__v4si) (__v2du) { __A, __B };
+ const __v16qu __P =
+ { 0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11 };
+ const __v16qu __Q =
+ { 4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15 };
+ __v4si __D = vec_perm (__C, __C, __Q);
+ __C = vec_perm (__C, __C, __P);
+ __C = vec_add (__C, __D);
+ return (__m64) ((__v2du) __C)[1];
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hadds_epi16 (__m128i __A, __m128i __B)
+{
+ __v4si __C = { 0 }, __D = { 0 };
+ __C = vec_sum4s ((__v8hi) __A, __C);
+ __D = vec_sum4s ((__v8hi) __B, __D);
+ __C = (__v4si) vec_packs (__C, __D);
+ return (__m128i) __C;
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hadds_pi16 (__m64 __A, __m64 __B)
+{
+ const __v4si __zero = { 0 };
+ __v8hi __C = (__v8hi) (__v2du) { __A, __B };
+ __v4si __D = vec_sum4s (__C, __zero);
+ __C = vec_packs (__D, __D);
+ return (__m64) ((__v2du) __C)[1];
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hsub_epi16 (__m128i __A, __m128i __B)
+{
+ const __v16qu __P =
+ { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
+ const __v16qu __Q =
+ { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
+ __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
+ __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
+ return (__m128i) vec_sub (__C, __D);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hsub_epi32 (__m128i __A, __m128i __B)
+{
+ const __v16qu __P =
+ { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
+ const __v16qu __Q =
+ { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
+ __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
+ __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
+ return (__m128i) vec_sub (__C, __D);
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hsub_pi16 (__m64 __A, __m64 __B)
+{
+ const __v16qu __P =
+ { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 };
+ const __v16qu __Q =
+ { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 };
+ __v8hi __C = (__v8hi) (__v2du) { __A, __B };
+ __v8hi __D = vec_perm (__C, __C, __Q);
+ __C = vec_perm (__C, __C, __P);
+ __C = vec_sub (__C, __D);
+ return (__m64) ((__v2du) __C)[1];
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hsub_pi32 (__m64 __A, __m64 __B)
+{
+ const __v16qu __P =
+ { 0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11 };
+ const __v16qu __Q =
+ { 4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15 };
+ __v4si __C = (__v4si) (__v2du) { __A, __B };
+ __v4si __D = vec_perm (__C, __C, __Q);
+ __C = vec_perm (__C, __C, __P);
+ __C = vec_sub (__C, __D);
+ return (__m64) ((__v2du) __C)[1];
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hsubs_epi16 (__m128i __A, __m128i __B)
+{
+ const __v16qu __P =
+ { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
+ const __v16qu __Q =
+ { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
+ __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
+ __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
+ return (__m128i) vec_subs (__C, __D);
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_hsubs_pi16 (__m64 __A, __m64 __B)
+{
+ const __v16qu __P =
+ { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 };
+ const __v16qu __Q =
+ { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 };
+ __v8hi __C = (__v8hi) (__v2du) { __A, __B };
+ __v8hi __D = vec_perm (__C, __C, __P);
+ __v8hi __E = vec_perm (__C, __C, __Q);
+ __C = vec_subs (__D, __E);
+ return (__m64) ((__v2du) __C)[1];
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shuffle_epi8 (__m128i __A, __m128i __B)
+{
+ const __v16qi __zero = { 0 };
+ __vector __bool char __select = vec_cmplt ((__v16qi) __B, __zero);
+ __v16qi __C = vec_perm ((__v16qi) __A, (__v16qi) __A, (__v16qu) __B);
+ return (__m128i) vec_sel (__C, __zero, __select);
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shuffle_pi8 (__m64 __A, __m64 __B)
+{
+ const __v16qi __zero = { 0 };
+ __v16qi __C = (__v16qi) (__v2du) { __A, __A };
+ __v16qi __D = (__v16qi) (__v2du) { __B, __B };
+ __vector __bool char __select = vec_cmplt ((__v16qi) __D, __zero);
+ __C = vec_perm ((__v16qi) __C, (__v16qi) __C, (__v16qu) __D);
+ __C = vec_sel (__C, __zero, __select);
+ return (__m64) ((__v2du) (__C))[0];
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sign_epi8 (__m128i __A, __m128i __B)
+{
+ const __v16qi __zero = { 0 };
+ __v16qi __selectneg = (__v16qi) vec_cmplt ((__v16qi) __B, __zero);
+ __v16qi __selectpos =
+ (__v16qi) vec_neg ((__v16qi) vec_cmpgt ((__v16qi) __B, __zero));
+ __v16qi __conv = vec_add (__selectneg, __selectpos);
+ return (__m128i) vec_mul ((__v16qi) __A, (__v16qi) __conv);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sign_epi16 (__m128i __A, __m128i __B)
+{
+ const __v8hi __zero = { 0 };
+ __v8hi __selectneg = (__v8hi) vec_cmplt ((__v8hi) __B, __zero);
+ __v8hi __selectpos =
+ (__v8hi) vec_neg ((__v8hi) vec_cmpgt ((__v8hi) __B, __zero));
+ __v8hi __conv = vec_add (__selectneg, __selectpos);
+ return (__m128i) vec_mul ((__v8hi) __A, (__v8hi) __conv);
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sign_epi32 (__m128i __A, __m128i __B)
+{
+ const __v4si __zero = { 0 };
+ __v4si __selectneg = (__v4si) vec_cmplt ((__v4si) __B, __zero);
+ __v4si __selectpos =
+ (__v4si) vec_neg ((__v4si) vec_cmpgt ((__v4si) __B, __zero));
+ __v4si __conv = vec_add (__selectneg, __selectpos);
+ return (__m128i) vec_mul ((__v4si) __A, (__v4si) __conv);
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sign_pi8 (__m64 __A, __m64 __B)
+{
+ const __v16qi __zero = { 0 };
+ __v16qi __C = (__v16qi) (__v2du) { __A, __A };
+ __v16qi __D = (__v16qi) (__v2du) { __B, __B };
+ __C = (__v16qi) _mm_sign_epi8 ((__m128i) __C, (__m128i) __D);
+ return (__m64) ((__v2du) (__C))[0];
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sign_pi16 (__m64 __A, __m64 __B)
+{
+ const __v8hi __zero = { 0 };
+ __v8hi __C = (__v8hi) (__v2du) { __A, __A };
+ __v8hi __D = (__v8hi) (__v2du) { __B, __B };
+ __C = (__v8hi) _mm_sign_epi16 ((__m128i) __C, (__m128i) __D);
+ return (__m64) ((__v2du) (__C))[0];
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sign_pi32 (__m64 __A, __m64 __B)
+{
+ const __v4si __zero = { 0 };
+ __v4si __C = (__v4si) (__v2du) { __A, __A };
+ __v4si __D = (__v4si) (__v2du) { __B, __B };
+ __C = (__v4si) _mm_sign_epi32 ((__m128i) __C, (__m128i) __D);
+ return (__m64) ((__v2du) (__C))[0];
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maddubs_epi16 (__m128i __A, __m128i __B)
+{
+ __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
+ __v8hi __C = vec_and (vec_unpackh ((__v16qi) __A), __unsigned);
+ __v8hi __D = vec_and (vec_unpackl ((__v16qi) __A), __unsigned);
+ __v8hi __E = vec_unpackh ((__v16qi) __B);
+ __v8hi __F = vec_unpackl ((__v16qi) __B);
+ __C = vec_mul (__C, __E);
+ __D = vec_mul (__D, __F);
+ const __v16qu __odds =
+ { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
+ const __v16qu __evens =
+ { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
+ __E = vec_perm (__C, __D, __odds);
+ __F = vec_perm (__C, __D, __evens);
+ return (__m128i) vec_adds (__E, __F);
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maddubs_pi16 (__m64 __A, __m64 __B)
+{
+ __v8hi __C = (__v8hi) (__v2du) { __A, __A };
+ __C = vec_unpackl ((__v16qi) __C);
+ const __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
+ __C = vec_and (__C, __unsigned);
+ __v8hi __D = (__v8hi) (__v2du) { __B, __B };
+ __D = vec_unpackl ((__v16qi) __D);
+ __D = vec_mul (__C, __D);
+ const __v16qu __odds =
+ { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
+ const __v16qu __evens =
+ { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
+ __C = vec_perm (__D, __D, __odds);
+ __D = vec_perm (__D, __D, __evens);
+ __C = vec_adds (__C, __D);
+ return (__m64) ((__v2du) (__C))[0];
+}
+
+extern __inline __m128i
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mulhrs_epi16 (__m128i __A, __m128i __B)
+{
+ __v4si __C = vec_unpackh ((__v8hi) __A);
+ __v4si __D = vec_unpackh ((__v8hi) __B);
+ __C = vec_mul (__C, __D);
+ __D = vec_unpackl ((__v8hi) __A);
+ __v4si __E = vec_unpackl ((__v8hi) __B);
+ __D = vec_mul (__D, __E);
+ const __v4su __shift = vec_splats ((unsigned int) 14);
+ __C = vec_sr (__C, __shift);
+ __D = vec_sr (__D, __shift);
+ const __v4si __ones = vec_splats ((signed int) 1);
+ __C = vec_add (__C, __ones);
+ __C = vec_sr (__C, (__v4su) __ones);
+ __D = vec_add (__D, __ones);
+ __D = vec_sr (__D, (__v4su) __ones);
+ return (__m128i) vec_pack (__C, __D);
+}
+
+extern __inline __m64
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mulhrs_pi16 (__m64 __A, __m64 __B)
+{
+ __v4si __C = (__v4si) (__v2du) { __A, __A };
+ __C = vec_unpackh ((__v8hi) __C);
+ __v4si __D = (__v4si) (__v2du) { __B, __B };
+ __D = vec_unpackh ((__v8hi) __D);
+ __C = vec_mul (__C, __D);
+ const __v4su __shift = vec_splats ((unsigned int) 14);
+ __C = vec_sr (__C, __shift);
+ const __v4si __ones = vec_splats ((signed int) 1);
+ __C = vec_add (__C, __ones);
+ __C = vec_sr (__C, (__v4su) __ones);
+ __v8hi __E = vec_pack (__C, __D);
+ return (__m64) ((__v2du) (__E))[0];
+}
+
+#else
+#include_next <tmmintrin.h>
+#endif /* defined(__linux__) && defined(__ppc64__) */
+
+#endif /* TMMINTRIN_H_ */
diff --git a/lib/clang/12.0.0/include/ppc_wrappers/xmmintrin.h b/lib/clang/12.0.0/include/ppc_wrappers/xmmintrin.h
new file mode 100644
index 0000000..0f429fa
--- /dev/null
+++ b/lib/clang/12.0.0/include/ppc_wrappers/xmmintrin.h
@@ -0,0 +1,1844 @@
+/*===---- xmmintrin.h - Implementation of SSE intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+ explicitly from x86_64 to powerpc64/powerpc64le.
+
+ Since X86 SSE intrinsics mainly handles __m128 type, PowerPC
+ VMX/VSX ISA is a good match for vector float SIMD operations.
+ However scalar float operations in vector (XMM) registers require
+ the POWER8 VSX ISA (2.07) level. There are differences for data
+ format and placement of float scalars in the vector register, which
+ require extra steps to match SSE scalar float semantics on POWER.
+
+ It should be noted that there's much difference between X86_64's
+ MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
+ portable <fenv.h> instead of access MXSCR directly.
+
+ Most SSE scalar float intrinsic operations can be performed more
+ efficiently as C language float scalar operations or optimized to
+ use vector SIMD operations. We recommend this for new applications. */
+#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef _XMMINTRIN_H_INCLUDED
+#define _XMMINTRIN_H_INCLUDED
+
+#if defined(__linux__) && defined(__ppc64__)
+
+/* Define four value permute mask */
+#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
+
+#include <altivec.h>
+
+/* Avoid collisions between altivec.h and strict adherence to C++ and
+ C11 standards. This should eventually be done inside altivec.h itself,
+ but only after testing a full distro build. */
+#if defined(__STRICT_ANSI__) && (defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && \
+ __STDC_VERSION__ >= 201112L))
+#undef vector
+#undef pixel
+#undef bool
+#endif
+
+/* We need type definitions from the MMX header file. */
+#include <mmintrin.h>
+
+/* Get _mm_malloc () and _mm_free (). */
+#if __STDC_HOSTED__
+#include <mm_malloc.h>
+#endif
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
+
+/* Unaligned version of the same type. */
+typedef float __m128_u __attribute__ ((__vector_size__ (16), __may_alias__,
+ __aligned__ (1)));
+
+/* Internal data types for implementing the intrinsics. */
+typedef float __v4sf __attribute__ ((__vector_size__ (16)));
+
+/* Create an undefined vector. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_undefined_ps (void)
+{
+ __m128 __Y = __Y;
+ return __Y;
+}
+
+/* Create a vector of zeros. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setzero_ps (void)
+{
+ return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
+}
+
+/* Load four SPFP values from P. The address must be 16-byte aligned. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_ps (float const *__P)
+{
+ return ((__m128)vec_ld(0, (__v4sf*)__P));
+}
+
+/* Load four SPFP values from P. The address need not be 16-byte aligned. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_ps (float const *__P)
+{
+ return (vec_vsx_ld(0, __P));
+}
+
+/* Load four SPFP values in reverse order. The address must be aligned. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadr_ps (float const *__P)
+{
+ __v4sf __tmp;
+ __m128 result;
+ static const __vector unsigned char permute_vector =
+ { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
+ 0x17, 0x10, 0x11, 0x12, 0x13 };
+
+ __tmp = vec_ld (0, (__v4sf *) __P);
+ result = (__m128) vec_perm (__tmp, __tmp, permute_vector);
+ return result;
+}
+
+/* Create a vector with all four elements equal to F. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_ps (float __F)
+{
+ return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_ps1 (float __F)
+{
+ return _mm_set1_ps (__F);
+}
+
+/* Create the vector [Z Y X W]. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
+{
+ return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
+}
+
+/* Create the vector [W X Y Z]. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_ps (float __Z, float __Y, float __X, float __W)
+{
+ return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
+}
+
+/* Store four SPFP values. The address must be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_ps (float *__P, __m128 __A)
+{
+ vec_st((__v4sf)__A, 0, (__v4sf*)__P);
+}
+
+/* Store four SPFP values. The address need not be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_ps (float *__P, __m128 __A)
+{
+ *(__m128_u *)__P = __A;
+}
+
+/* Store four SPFP values in reverse order. The address must be aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storer_ps (float *__P, __m128 __A)
+{
+ __v4sf __tmp;
+ static const __vector unsigned char permute_vector =
+ { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
+ 0x17, 0x10, 0x11, 0x12, 0x13 };
+
+ __tmp = (__m128) vec_perm (__A, __A, permute_vector);
+
+ _mm_store_ps (__P, __tmp);
+}
+
+/* Store the lower SPFP value across four words. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store1_ps (float *__P, __m128 __A)
+{
+ __v4sf __va = vec_splat((__v4sf)__A, 0);
+ _mm_store_ps (__P, __va);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_ps1 (float *__P, __m128 __A)
+{
+ _mm_store1_ps (__P, __A);
+}
+
+/* Create a vector with element 0 as F and the rest zero. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_ss (float __F)
+{
+ return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
+}
+
+/* Sets the low SPFP value of A from the low value of B. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_move_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+
+ return (vec_sel ((__v4sf)__A, (__v4sf)__B, mask));
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_ss (float const *__P)
+{
+ return _mm_set_ss (*__P);
+}
+
+/* Stores the lower SPFP value. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_ss (float *__P, __m128 __A)
+{
+ *__P = ((__v4sf)__A)[0];
+}
+
+/* Perform the respective operation on the lower SPFP (single-precision
+ floating-point) values of A and B; the upper three SPFP values are
+ passed through from A. */
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_ss (__m128 __A, __m128 __B)
+{
+#ifdef _ARCH_PWR7
+ __m128 a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = a + b;
+ /* Then we merge the lower float result with the original upper
+ float elements from __A. */
+ return (vec_sel (__A, c, mask));
+#else
+ __A[0] = __A[0] + __B[0];
+ return (__A);
+#endif
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_ss (__m128 __A, __m128 __B)
+{
+#ifdef _ARCH_PWR7
+ __m128 a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = a - b;
+ /* Then we merge the lower float result with the original upper
+ float elements from __A. */
+ return (vec_sel (__A, c, mask));
+#else
+ __A[0] = __A[0] - __B[0];
+ return (__A);
+#endif
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_ss (__m128 __A, __m128 __B)
+{
+#ifdef _ARCH_PWR7
+ __m128 a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = a * b;
+ /* Then we merge the lower float result with the original upper
+ float elements from __A. */
+ return (vec_sel (__A, c, mask));
+#else
+ __A[0] = __A[0] * __B[0];
+ return (__A);
+#endif
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_ss (__m128 __A, __m128 __B)
+{
+#ifdef _ARCH_PWR7
+ __m128 a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = a / b;
+ /* Then we merge the lower float result with the original upper
+ float elements from __A. */
+ return (vec_sel (__A, c, mask));
+#else
+ __A[0] = __A[0] / __B[0];
+ return (__A);
+#endif
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_ss (__m128 __A)
+{
+ __m128 a, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper double values) we splat the lower double
+ * before we to the operation. */
+ a = vec_splat (__A, 0);
+ c = vec_sqrt (a);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel (__A, c, mask));
+}
+
+/* Perform the respective operation on the four SPFP values in A and B. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) ((__v4sf)__A + (__v4sf)__B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) ((__v4sf)__A - (__v4sf)__B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) ((__v4sf)__A * (__v4sf)__B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) ((__v4sf)__A / (__v4sf)__B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_ps (__m128 __A)
+{
+ return (vec_sqrt ((__v4sf)__A));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rcp_ps (__m128 __A)
+{
+ return (vec_re ((__v4sf)__A));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt_ps (__m128 __A)
+{
+ return (vec_rsqrte (__A));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rcp_ss (__m128 __A)
+{
+ __m128 a, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper double values) we splat the lower double
+ * before we to the operation. */
+ a = vec_splat (__A, 0);
+ c = _mm_rcp_ps (a);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel (__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt_ss (__m128 __A)
+{
+ __m128 a, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper double values) we splat the lower double
+ * before we to the operation. */
+ a = vec_splat (__A, 0);
+ c = vec_rsqrte (a);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel (__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_ss (__m128 __A, __m128 __B)
+{
+ __v4sf a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower float)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper float values) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf)__A, 0);
+ b = vec_splat ((__v4sf)__B, 0);
+ c = vec_min (a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_ss (__m128 __A, __m128 __B)
+{
+ __v4sf a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower float)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper float values) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = vec_max (a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_ps (__m128 __A, __m128 __B)
+{
+ __vector __bool int m = vec_cmpgt ((__v4sf) __B, (__v4sf) __A);
+ return vec_sel (__B, __A, m);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_ps (__m128 __A, __m128 __B)
+{
+ __vector __bool int m = vec_cmpgt ((__v4sf) __A, (__v4sf) __B);
+ return vec_sel (__B, __A, m);
+}
+
+/* Perform logical bit-wise operations on 128-bit values. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_and_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_and ((__v4sf)__A, (__v4sf)__B));
+// return __builtin_ia32_andps (__A, __B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_andnot_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_andc ((__v4sf)__B, (__v4sf)__A));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_or_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_or ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_xor_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_xor ((__v4sf)__A, (__v4sf)__B));
+}
+
+/* Perform a comparison on the four SPFP values of A and B. For each
+ element, if the comparison is true, place a mask of all ones in the
+ result, otherwise a mask of zeros. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpeq ((__v4sf)__A,(__v4sf) __B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_ps (__m128 __A, __m128 __B)
+{
+ __v4sf temp = (__v4sf ) vec_cmpeq ((__v4sf) __A, (__v4sf)__B);
+ return ((__m128)vec_nor (temp, temp));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnlt_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnle_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpngt_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnge_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpord_ps (__m128 __A, __m128 __B)
+{
+ __vector unsigned int a, b;
+ __vector unsigned int c, d;
+ static const __vector unsigned int float_exp_mask =
+ { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+
+ a = (__vector unsigned int) vec_abs ((__v4sf)__A);
+ b = (__vector unsigned int) vec_abs ((__v4sf)__B);
+ c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
+ d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
+ return ((__m128 ) vec_and (c, d));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpunord_ps (__m128 __A, __m128 __B)
+{
+ __vector unsigned int a, b;
+ __vector unsigned int c, d;
+ static const __vector unsigned int float_exp_mask =
+ { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+
+ a = (__vector unsigned int) vec_abs ((__v4sf)__A);
+ b = (__vector unsigned int) vec_abs ((__v4sf)__B);
+ c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
+ d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
+ return ((__m128 ) vec_or (c, d));
+}
+
+/* Perform a comparison on the lower SPFP values of A and B. If the
+ comparison is true, place a mask of all ones in the result, otherwise a
+ mask of zeros. The upper three SPFP values are passed through from A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpeq(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmplt(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmple(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpgt(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpge(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpeq(a, b);
+ c = vec_nor (c, c);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnlt_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpge(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnle_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpgt(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpngt_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmple(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnge_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we do the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmplt(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpord_ss (__m128 __A, __m128 __B)
+{
+ __vector unsigned int a, b;
+ __vector unsigned int c, d;
+ static const __vector unsigned int float_exp_mask =
+ { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+
+ a = (__vector unsigned int) vec_abs ((__v4sf)__A);
+ b = (__vector unsigned int) vec_abs ((__v4sf)__B);
+ c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
+ d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
+ c = vec_and (c, d);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpunord_ss (__m128 __A, __m128 __B)
+{
+ __vector unsigned int a, b;
+ __vector unsigned int c, d;
+ static const __vector unsigned int float_exp_mask =
+ { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+
+ a = (__vector unsigned int) vec_abs ((__v4sf)__A);
+ b = (__vector unsigned int) vec_abs ((__v4sf)__B);
+ c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
+ d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
+ c = vec_or (c, d);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
+}
+
+/* Compare the lower SPFP values of A and B and return 1 if true
+ and 0 if false. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comieq_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] == __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comilt_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] < __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comile_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] <= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comigt_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] > __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comige_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] >= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comineq_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] != __B[0]);
+}
+
+/* FIXME
+ * The __mm_ucomi??_ss implementations below are exactly the same as
+ * __mm_comi??_ss because GCC for PowerPC only generates unordered
+ * compares (scalar and vector).
+ * Technically __mm_comieq_ss et al should be using the ordered
+ * compare and signal for QNaNs.
+ * The __mm_ucomieq_sd et all should be OK, as is.
+ */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomieq_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] == __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomilt_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] < __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomile_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] <= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomigt_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] > __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomige_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] >= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomineq_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] != __B[0]);
+}
+
+extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_f32 (__m128 __A)
+{
+ return ((__v4sf)__A)[0];
+}
+
+/* Convert the lower SPFP value to a 32-bit integer according to the current
+ rounding mode. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_si32 (__m128 __A)
+{
+ __m64 res = 0;
+#ifdef _ARCH_PWR8
+ double dtmp;
+ __asm__(
+#ifdef __LITTLE_ENDIAN__
+ "xxsldwi %x0,%x0,%x0,3;\n"
+#endif
+ "xscvspdp %x2,%x0;\n"
+ "fctiw %2,%2;\n"
+ "mfvsrd %1,%x2;\n"
+ : "+wa" (__A),
+ "=r" (res),
+ "=f" (dtmp)
+ : );
+#else
+ res = __builtin_rint(__A[0]);
+#endif
+ return (res);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_ss2si (__m128 __A)
+{
+ return _mm_cvtss_si32 (__A);
+}
+
+/* Convert the lower SPFP value to a 32-bit integer according to the
+ current rounding mode. */
+
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_si64 (__m128 __A)
+{
+ __m64 res = 0;
+#ifdef _ARCH_PWR8
+ double dtmp;
+ __asm__(
+#ifdef __LITTLE_ENDIAN__
+ "xxsldwi %x0,%x0,%x0,3;\n"
+#endif
+ "xscvspdp %x2,%x0;\n"
+ "fctid %2,%2;\n"
+ "mfvsrd %1,%x2;\n"
+ : "+wa" (__A),
+ "=r" (res),
+ "=f" (dtmp)
+ : );
+#else
+ res = __builtin_llrint(__A[0]);
+#endif
+ return (res);
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_si64x (__m128 __A)
+{
+ return _mm_cvtss_si64 ((__v4sf) __A);
+}
+
+/* Constants for use with _mm_prefetch. */
+enum _mm_hint
+{
+ /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
+ _MM_HINT_ET0 = 7,
+ _MM_HINT_ET1 = 6,
+ _MM_HINT_T0 = 3,
+ _MM_HINT_T1 = 2,
+ _MM_HINT_T2 = 1,
+ _MM_HINT_NTA = 0
+};
+
+/* Loads one cache line from address P to a location "closer" to the
+ processor. The selector I specifies the type of prefetch operation. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_prefetch (const void *__P, enum _mm_hint __I)
+{
+ /* Current PowerPC will ignores the hint parameters. */
+ __builtin_prefetch (__P);
+}
+
+/* Convert the two lower SPFP values to 32-bit integers according to the
+ current rounding mode. Return the integers in packed form. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_pi32 (__m128 __A)
+{
+ /* Splat two lower SPFP values to both halves. */
+ __v4sf temp, rounded;
+ __vector unsigned long long result;
+
+ /* Splat two lower SPFP values to both halves. */
+ temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
+ rounded = vec_rint(temp);
+ result = (__vector unsigned long long) vec_cts (rounded, 0);
+
+ return (__m64) ((__vector long long) result)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_ps2pi (__m128 __A)
+{
+ return _mm_cvtps_pi32 (__A);
+}
+
+/* Truncate the lower SPFP value to a 32-bit integer. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_si32 (__m128 __A)
+{
+ /* Extract the lower float element. */
+ float temp = __A[0];
+ /* truncate to 32-bit integer and return. */
+ return temp;
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_ss2si (__m128 __A)
+{
+ return _mm_cvttss_si32 (__A);
+}
+
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_si64 (__m128 __A)
+{
+ /* Extract the lower float element. */
+ float temp = __A[0];
+ /* truncate to 32-bit integer and return. */
+ return temp;
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_si64x (__m128 __A)
+{
+ /* Extract the lower float element. */
+ float temp = __A[0];
+ /* truncate to 32-bit integer and return. */
+ return temp;
+}
+
+/* Truncate the two lower SPFP values to 32-bit integers. Return the
+ integers in packed form. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttps_pi32 (__m128 __A)
+{
+ __v4sf temp;
+ __vector unsigned long long result;
+
+ /* Splat two lower SPFP values to both halves. */
+ temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
+ result = (__vector unsigned long long) vec_cts (temp, 0);
+
+ return (__m64) ((__vector long long) result)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_ps2pi (__m128 __A)
+{
+ return _mm_cvttps_pi32 (__A);
+}
+
+/* Convert B to a SPFP value and insert it as element zero in A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi32_ss (__m128 __A, int __B)
+{
+ float temp = __B;
+ __A[0] = temp;
+
+ return __A;
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_si2ss (__m128 __A, int __B)
+{
+ return _mm_cvtsi32_ss (__A, __B);
+}
+
+/* Convert B to a SPFP value and insert it as element zero in A. */
+/* Intel intrinsic. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64_ss (__m128 __A, long long __B)
+{
+ float temp = __B;
+ __A[0] = temp;
+
+ return __A;
+}
+
+/* Microsoft intrinsic. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64x_ss (__m128 __A, long long __B)
+{
+ return _mm_cvtsi64_ss (__A, __B);
+}
+
+/* Convert the two 32-bit values in B to SPFP form and insert them
+ as the two lower elements in A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi32_ps (__m128 __A, __m64 __B)
+{
+ __vector signed int vm1;
+ __vector float vf1;
+
+ vm1 = (__vector signed int) (__vector unsigned long long) {__B, __B};
+ vf1 = (__vector float) vec_ctf (vm1, 0);
+
+ return ((__m128) (__vector unsigned long long)
+ { ((__vector unsigned long long)vf1) [0],
+ ((__vector unsigned long long)__A) [1]});
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_pi2ps (__m128 __A, __m64 __B)
+{
+ return _mm_cvtpi32_ps (__A, __B);
+}
+
+/* Convert the four signed 16-bit values in A to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi16_ps (__m64 __A)
+{
+ __vector signed short vs8;
+ __vector signed int vi4;
+ __vector float vf1;
+
+ vs8 = (__vector signed short) (__vector unsigned long long) { __A, __A };
+ vi4 = vec_vupklsh (vs8);
+ vf1 = (__vector float) vec_ctf (vi4, 0);
+
+ return (__m128) vf1;
+}
+
+/* Convert the four unsigned 16-bit values in A to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpu16_ps (__m64 __A)
+{
+ const __vector unsigned short zero =
+ { 0, 0, 0, 0, 0, 0, 0, 0 };
+ __vector unsigned short vs8;
+ __vector unsigned int vi4;
+ __vector float vf1;
+
+ vs8 = (__vector unsigned short) (__vector unsigned long long) { __A, __A };
+ vi4 = (__vector unsigned int) vec_mergel
+#ifdef __LITTLE_ENDIAN__
+ (vs8, zero);
+#else
+ (zero, vs8);
+#endif
+ vf1 = (__vector float) vec_ctf (vi4, 0);
+
+ return (__m128) vf1;
+}
+
+/* Convert the low four signed 8-bit values in A to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi8_ps (__m64 __A)
+{
+ __vector signed char vc16;
+ __vector signed short vs8;
+ __vector signed int vi4;
+ __vector float vf1;
+
+ vc16 = (__vector signed char) (__vector unsigned long long) { __A, __A };
+ vs8 = vec_vupkhsb (vc16);
+ vi4 = vec_vupkhsh (vs8);
+ vf1 = (__vector float) vec_ctf (vi4, 0);
+
+ return (__m128) vf1;
+}
+
+/* Convert the low four unsigned 8-bit values in A to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+_mm_cvtpu8_ps (__m64 __A)
+{
+ const __vector unsigned char zero =
+ { 0, 0, 0, 0, 0, 0, 0, 0 };
+ __vector unsigned char vc16;
+ __vector unsigned short vs8;
+ __vector unsigned int vi4;
+ __vector float vf1;
+
+ vc16 = (__vector unsigned char) (__vector unsigned long long) { __A, __A };
+#ifdef __LITTLE_ENDIAN__
+ vs8 = (__vector unsigned short) vec_mergel (vc16, zero);
+ vi4 = (__vector unsigned int) vec_mergeh (vs8,
+ (__vector unsigned short) zero);
+#else
+ vs8 = (__vector unsigned short) vec_mergel (zero, vc16);
+ vi4 = (__vector unsigned int) vec_mergeh ((__vector unsigned short) zero,
+ vs8);
+#endif
+ vf1 = (__vector float) vec_ctf (vi4, 0);
+
+ return (__m128) vf1;
+}
+
+/* Convert the four signed 32-bit values in A and B to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
+{
+ __vector signed int vi4;
+ __vector float vf4;
+
+ vi4 = (__vector signed int) (__vector unsigned long long) { __A, __B };
+ vf4 = (__vector float) vec_ctf (vi4, 0);
+ return (__m128) vf4;
+}
+
+/* Convert the four SPFP values in A to four signed 16-bit integers. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_pi16 (__m128 __A)
+{
+ __v4sf rounded;
+ __vector signed int temp;
+ __vector unsigned long long result;
+
+ rounded = vec_rint(__A);
+ temp = vec_cts (rounded, 0);
+ result = (__vector unsigned long long) vec_pack (temp, temp);
+
+ return (__m64) ((__vector long long) result)[0];
+}
+
+/* Convert the four SPFP values in A to four signed 8-bit integers. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_pi8 (__m128 __A)
+{
+ __v4sf rounded;
+ __vector signed int tmp_i;
+ static const __vector signed int zero = {0, 0, 0, 0};
+ __vector signed short tmp_s;
+ __vector signed char res_v;
+
+ rounded = vec_rint(__A);
+ tmp_i = vec_cts (rounded, 0);
+ tmp_s = vec_pack (tmp_i, zero);
+ res_v = vec_pack (tmp_s, tmp_s);
+ return (__m64) ((__vector long long) res_v)[0];
+}
+
+/* Selects four specific SPFP values from A and B based on MASK. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
+{
+ unsigned long element_selector_10 = __mask & 0x03;
+ unsigned long element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned int permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+#else
+ 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
+#endif
+ };
+ __vector unsigned int t;
+
+ t[0] = permute_selectors[element_selector_10];
+ t[1] = permute_selectors[element_selector_32];
+ t[2] = permute_selectors[element_selector_54] + 0x10101010;
+ t[3] = permute_selectors[element_selector_76] + 0x10101010;
+ return vec_perm ((__v4sf) __A, (__v4sf)__B, (__vector unsigned char)t);
+}
+
+/* Selects and interleaves the upper two SPFP values from A and B. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) vec_vmrglw ((__v4sf) __A, (__v4sf)__B);
+}
+
+/* Selects and interleaves the lower two SPFP values from A and B. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) vec_vmrghw ((__v4sf) __A, (__v4sf)__B);
+}
+
+/* Sets the upper two SPFP values with 64-bits of data loaded from P;
+ the lower two values are passed through from A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadh_pi (__m128 __A, __m64 const *__P)
+{
+ __vector unsigned long long __a = (__vector unsigned long long)__A;
+ __vector unsigned long long __p = vec_splats(*__P);
+ __a [1] = __p [1];
+
+ return (__m128)__a;
+}
+
+/* Stores the upper two SPFP values of A into P. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeh_pi (__m64 *__P, __m128 __A)
+{
+ __vector unsigned long long __a = (__vector unsigned long long) __A;
+
+ *__P = __a[1];
+}
+
+/* Moves the upper two values of B into the lower two values of A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movehl_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) vec_mergel ((__vector unsigned long long)__B,
+ (__vector unsigned long long)__A);
+}
+
+/* Moves the lower two values of B into the upper two values of A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movelh_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) vec_mergeh ((__vector unsigned long long)__A,
+ (__vector unsigned long long)__B);
+}
+
+/* Sets the lower two SPFP values with 64-bits of data loaded from P;
+ the upper two values are passed through from A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadl_pi (__m128 __A, __m64 const *__P)
+{
+ __vector unsigned long long __a = (__vector unsigned long long)__A;
+ __vector unsigned long long __p = vec_splats(*__P);
+ __a [0] = __p [0];
+
+ return (__m128)__a;
+}
+
+/* Stores the lower two SPFP values of A into P. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storel_pi (__m64 *__P, __m128 __A)
+{
+ __vector unsigned long long __a = (__vector unsigned long long) __A;
+
+ *__P = __a[0];
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum. */
+
+/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movemask_ps (__m128 __A)
+{
+ __vector unsigned long long result;
+ static const __vector unsigned int perm_mask =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x00204060, 0x80808080, 0x80808080, 0x80808080
+#else
+ 0x80808080, 0x80808080, 0x80808080, 0x00204060
+#endif
+ };
+
+ result = ((__vector unsigned long long)
+ vec_vbpermq ((__vector unsigned char) __A,
+ (__vector unsigned char) perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+ return result[1];
+#else
+ return result[0];
+#endif
+}
+#endif /* _ARCH_PWR8 */
+
+/* Create a vector with all four elements equal to *P. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load1_ps (float const *__P)
+{
+ return _mm_set1_ps (*__P);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_ps1 (float const *__P)
+{
+ return _mm_load1_ps (__P);
+}
+
+/* Extracts one of the four words of A. The selector N must be immediate. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_extract_pi16 (__m64 const __A, int const __N)
+{
+ unsigned int shiftr = __N & 3;
+#ifdef __BIG_ENDIAN__
+ shiftr = 3 - shiftr;
+#endif
+
+ return ((__A >> (shiftr * 16)) & 0xffff);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pextrw (__m64 const __A, int const __N)
+{
+ return _mm_extract_pi16 (__A, __N);
+}
+
+/* Inserts word D into one of four words of A. The selector N must be
+ immediate. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
+{
+ const int shiftl = (__N & 3) * 16;
+ const __m64 shiftD = (const __m64) __D << shiftl;
+ const __m64 mask = 0xffffUL << shiftl;
+ __m64 result = (__A & (~mask)) | (shiftD & mask);
+
+ return (result);
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pinsrw (__m64 const __A, int const __D, int const __N)
+{
+ return _mm_insert_pi16 (__A, __D, __N);
+}
+
+/* Compute the element-wise maximum of signed 16-bit values. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+_mm_max_pi16 (__m64 __A, __m64 __B)
+{
+#if _ARCH_PWR8
+ __vector signed short a, b, r;
+ __vector __bool short c;
+
+ a = (__vector signed short)vec_splats (__A);
+ b = (__vector signed short)vec_splats (__B);
+ c = (__vector __bool short)vec_cmpgt (a, b);
+ r = vec_sel (b, a, c);
+ return (__m64) ((__vector long long) r)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __A;
+ m2.as_m64 = __B;
+
+ res.as_short[0] =
+ (m1.as_short[0] > m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
+ res.as_short[1] =
+ (m1.as_short[1] > m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
+ res.as_short[2] =
+ (m1.as_short[2] > m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
+ res.as_short[3] =
+ (m1.as_short[3] > m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
+
+ return (__m64) res.as_m64;
+#endif
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pmaxsw (__m64 __A, __m64 __B)
+{
+ return _mm_max_pi16 (__A, __B);
+}
+
+/* Compute the element-wise maximum of unsigned 8-bit values. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_pu8 (__m64 __A, __m64 __B)
+{
+#if _ARCH_PWR8
+ __vector unsigned char a, b, r;
+ __vector __bool char c;
+
+ a = (__vector unsigned char)vec_splats (__A);
+ b = (__vector unsigned char)vec_splats (__B);
+ c = (__vector __bool char)vec_cmpgt (a, b);
+ r = vec_sel (b, a, c);
+ return (__m64) ((__vector long long) r)[0];
+#else
+ __m64_union m1, m2, res;
+ long i;
+
+ m1.as_m64 = __A;
+ m2.as_m64 = __B;
+
+
+ for (i = 0; i < 8; i++)
+ res.as_char[i] =
+ ((unsigned char) m1.as_char[i] > (unsigned char) m2.as_char[i]) ?
+ m1.as_char[i] : m2.as_char[i];
+
+ return (__m64) res.as_m64;
+#endif
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pmaxub (__m64 __A, __m64 __B)
+{
+ return _mm_max_pu8 (__A, __B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_pi16 (__m64 __A, __m64 __B)
+{
+#if _ARCH_PWR8
+ __vector signed short a, b, r;
+ __vector __bool short c;
+
+ a = (__vector signed short)vec_splats (__A);
+ b = (__vector signed short)vec_splats (__B);
+ c = (__vector __bool short)vec_cmplt (a, b);
+ r = vec_sel (b, a, c);
+ return (__m64) ((__vector long long) r)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __A;
+ m2.as_m64 = __B;
+
+ res.as_short[0] =
+ (m1.as_short[0] < m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
+ res.as_short[1] =
+ (m1.as_short[1] < m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
+ res.as_short[2] =
+ (m1.as_short[2] < m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
+ res.as_short[3] =
+ (m1.as_short[3] < m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
+
+ return (__m64) res.as_m64;
+#endif
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pminsw (__m64 __A, __m64 __B)
+{
+ return _mm_min_pi16 (__A, __B);
+}
+
+/* Compute the element-wise minimum of unsigned 8-bit values. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_pu8 (__m64 __A, __m64 __B)
+{
+#if _ARCH_PWR8
+ __vector unsigned char a, b, r;
+ __vector __bool char c;
+
+ a = (__vector unsigned char)vec_splats (__A);
+ b = (__vector unsigned char)vec_splats (__B);
+ c = (__vector __bool char)vec_cmplt (a, b);
+ r = vec_sel (b, a, c);
+ return (__m64) ((__vector long long) r)[0];
+#else
+ __m64_union m1, m2, res;
+ long i;
+
+ m1.as_m64 = __A;
+ m2.as_m64 = __B;
+
+
+ for (i = 0; i < 8; i++)
+ res.as_char[i] =
+ ((unsigned char) m1.as_char[i] < (unsigned char) m2.as_char[i]) ?
+ m1.as_char[i] : m2.as_char[i];
+
+ return (__m64) res.as_m64;
+#endif
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pminub (__m64 __A, __m64 __B)
+{
+ return _mm_min_pu8 (__A, __B);
+}
+
+/* Create an 8-bit mask of the signs of 8-bit values. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movemask_pi8 (__m64 __A)
+{
+ unsigned long long p =
+#ifdef __LITTLE_ENDIAN__
+ 0x0008101820283038UL; // permute control for sign bits
+#else
+ 0x3830282018100800UL; // permute control for sign bits
+#endif
+ return __builtin_bpermd (p, __A);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pmovmskb (__m64 __A)
+{
+ return _mm_movemask_pi8 (__A);
+}
+
+/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
+ in B and produce the high 16 bits of the 32-bit results. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mulhi_pu16 (__m64 __A, __m64 __B)
+{
+ __vector unsigned short a, b;
+ __vector unsigned short c;
+ __vector unsigned int w0, w1;
+ __vector unsigned char xform1 = {
+#ifdef __LITTLE_ENDIAN__
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
+ 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
+#endif
+ };
+
+ a = (__vector unsigned short)vec_splats (__A);
+ b = (__vector unsigned short)vec_splats (__B);
+
+ w0 = vec_vmuleuh (a, b);
+ w1 = vec_vmulouh (a, b);
+ c = (__vector unsigned short)vec_perm (w0, w1, xform1);
+
+ return (__m64) ((__vector long long) c)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pmulhuw (__m64 __A, __m64 __B)
+{
+ return _mm_mulhi_pu16 (__A, __B);
+}
+
+/* Return a combination of the four 16-bit values in A. The selector
+ must be an immediate. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shuffle_pi16 (__m64 __A, int const __N)
+{
+ unsigned long element_selector_10 = __N & 0x03;
+ unsigned long element_selector_32 = (__N >> 2) & 0x03;
+ unsigned long element_selector_54 = (__N >> 4) & 0x03;
+ unsigned long element_selector_76 = (__N >> 6) & 0x03;
+ static const unsigned short permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+#else
+ 0x0607, 0x0405, 0x0203, 0x0001
+#endif
+ };
+ __m64_union t;
+ __vector unsigned long long a, p, r;
+
+#ifdef __LITTLE_ENDIAN__
+ t.as_short[0] = permute_selectors[element_selector_10];
+ t.as_short[1] = permute_selectors[element_selector_32];
+ t.as_short[2] = permute_selectors[element_selector_54];
+ t.as_short[3] = permute_selectors[element_selector_76];
+#else
+ t.as_short[3] = permute_selectors[element_selector_10];
+ t.as_short[2] = permute_selectors[element_selector_32];
+ t.as_short[1] = permute_selectors[element_selector_54];
+ t.as_short[0] = permute_selectors[element_selector_76];
+#endif
+ p = vec_splats (t.as_m64);
+ a = vec_splats (__A);
+ r = vec_perm (a, a, (__vector unsigned char)p);
+ return (__m64) ((__vector long long) r)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pshufw (__m64 __A, int const __N)
+{
+ return _mm_shuffle_pi16 (__A, __N);
+}
+
+/* Conditionally store byte elements of A into P. The high bit of each
+ byte in the selector N determines whether the corresponding byte from
+ A is stored. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
+{
+ __m64 hibit = 0x8080808080808080UL;
+ __m64 mask, tmp;
+ __m64 *p = (__m64*)__P;
+
+ tmp = *p;
+ mask = _mm_cmpeq_pi8 ((__N & hibit), hibit);
+ tmp = (tmp & (~mask)) | (__A & mask);
+ *p = tmp;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_maskmovq (__m64 __A, __m64 __N, char *__P)
+{
+ _mm_maskmove_si64 (__A, __N, __P);
+}
+
+/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_avg_pu8 (__m64 __A, __m64 __B)
+{
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats (__A);
+ b = (__vector unsigned char)vec_splats (__B);
+ c = vec_avg (a, b);
+ return (__m64) ((__vector long long) c)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pavgb (__m64 __A, __m64 __B)
+{
+ return _mm_avg_pu8 (__A, __B);
+}
+
+/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_avg_pu16 (__m64 __A, __m64 __B)
+{
+ __vector unsigned short a, b, c;
+
+ a = (__vector unsigned short)vec_splats (__A);
+ b = (__vector unsigned short)vec_splats (__B);
+ c = vec_avg (a, b);
+ return (__m64) ((__vector long long) c)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pavgw (__m64 __A, __m64 __B)
+{
+ return _mm_avg_pu16 (__A, __B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sad_pu8 (__m64 __A, __m64 __B)
+{
+ __vector unsigned char a, b;
+ __vector unsigned char vmin, vmax, vabsdiff;
+ __vector signed int vsum;
+ const __vector unsigned int zero =
+ { 0, 0, 0, 0 };
+ __m64_union result = {0};
+
+ a = (__vector unsigned char) (__vector unsigned long long) { 0UL, __A };
+ b = (__vector unsigned char) (__vector unsigned long long) { 0UL, __B };
+ vmin = vec_min (a, b);
+ vmax = vec_max (a, b);
+ vabsdiff = vec_sub (vmax, vmin);
+ /* Sum four groups of bytes into integers. */
+ vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
+ /* Sum across four integers with integer result. */
+ vsum = vec_sums (vsum, (__vector signed int) zero);
+ /* The sum is in the right most 32-bits of the vector result.
+ Transfer to a GPR and truncate to 16 bits. */
+ result.as_short[0] = vsum[3];
+ return result.as_m64;
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_psadbw (__m64 __A, __m64 __B)
+{
+ return _mm_sad_pu8 (__A, __B);
+}
+
+/* Stores the data in A to the address P without polluting the caches. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_pi (__m64 *__P, __m64 __A)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ " dcbtstt 0,%0"
+ :
+ : "b" (__P)
+ : "memory"
+ );
+ *__P = __A;
+}
+
+/* Likewise. The address must be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_ps (float *__P, __m128 __A)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ " dcbtstt 0,%0"
+ :
+ : "b" (__P)
+ : "memory"
+ );
+ _mm_store_ps (__P, __A);
+}
+
+/* Guarantees that every preceding store is globally visible before
+ any subsequent store. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sfence (void)
+{
+ /* Generate a light weight sync. */
+ __atomic_thread_fence (__ATOMIC_RELEASE);
+}
+
+/* The execution of the next instruction is delayed by an implementation
+ specific amount of time. The instruction does not modify the
+ architectural state. This is after the pop_options pragma because
+ it does not require SSE support in the processor--the encoding is a
+ nop on processors that do not support it. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_pause (void)
+{
+ /* There is no exact match with this construct, but the following is
+ close to the desired effect. */
+#if _ARCH_PWR8
+ /* On power8 and later processors we can depend on Program Priority
+ (PRI) and associated "very low" PPI setting. Since we don't know
+ what PPI this thread is running at we: 1) save the current PRI
+ from the PPR SPR into a local GRP, 2) set the PRI to "very low*
+ via the special or 31,31,31 encoding. 3) issue an "isync" to
+ insure the PRI change takes effect before we execute any more
+ instructions.
+ Now we can execute a lwsync (release barrier) while we execute
+ this thread at "very low" PRI. Finally we restore the original
+ PRI and continue execution. */
+ unsigned long __PPR;
+
+ __asm__ volatile (
+ " mfppr %0;"
+ " or 31,31,31;"
+ " isync;"
+ " lwsync;"
+ " isync;"
+ " mtppr %0;"
+ : "=r" (__PPR)
+ :
+ : "memory"
+ );
+#else
+ /* For older processor where we may not even have Program Priority
+ controls we can only depend on Heavy Weight Sync. */
+ __atomic_thread_fence (__ATOMIC_SEQ_CST);
+#endif
+}
+
+/* Transpose the 4x4 matrix composed of row[0-3]. */
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+do { \
+ __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
+ __v4sf __t0 = vec_vmrghw (__r0, __r1); \
+ __v4sf __t1 = vec_vmrghw (__r2, __r3); \
+ __v4sf __t2 = vec_vmrglw (__r0, __r1); \
+ __v4sf __t3 = vec_vmrglw (__r2, __r3); \
+ (row0) = (__v4sf)vec_mergeh ((__vector long long)__t0, \
+ (__vector long long)__t1); \
+ (row1) = (__v4sf)vec_mergel ((__vector long long)__t0, \
+ (__vector long long)__t1); \
+ (row2) = (__v4sf)vec_mergeh ((__vector long long)__t2, \
+ (__vector long long)__t3); \
+ (row3) = (__v4sf)vec_mergel ((__vector long long)__t2, \
+ (__vector long long)__t3); \
+} while (0)
+
+/* For backward source compatibility. */
+//# include <emmintrin.h>
+
+#else
+#include_next <xmmintrin.h>
+#endif /* defined(__linux__) && defined(__ppc64__) */
+
+#endif /* _XMMINTRIN_H_INCLUDED */