17#if defined(_MSC_VER) && !defined(__clang__)
25 unsigned __int8 m128i_u8[16];
39#define __ATTRIBUTE_SSE2__
43typedef double __m128d
__attribute__((__vector_size__(16), __aligned__(16)));
44typedef long long __m128i
__attribute__((__vector_size__(16), __aligned__(16)));
46typedef double __m128d_u
__attribute__((__vector_size__(16), __aligned__(1)));
47typedef long long __m128i_u
__attribute__((__vector_size__(16), __aligned__(1)));
56typedef unsigned long long __v2du
__attribute__((__vector_size__(16)));
57typedef unsigned short __v8hu
__attribute__((__vector_size__(16)));
58typedef unsigned char __v16qu
__attribute__((__vector_size__(16)));
62typedef signed char __v16qs
__attribute__((__vector_size__(16)));
65#define __ATTRIBUTE_SSE2__ __attribute__((__target__("sse2"),__min_vector_width__(128)))
67#define __ATTRIBUTE_SSE2__ __attribute__((__target__("sse2")))
69#define __INTRIN_INLINE_SSE2 __INTRIN_INLINE __ATTRIBUTE_SSE2__
228extern __m128d _mm_cvtsi64_sd(__m128d
a,
long long b);
229extern long long _mm_cvtsd_si64(__m128d
a);
230extern long long _mm_cvttsd_si64(__m128d
a);
252extern __m128i
_mm_set_epi16(
short w7,
short w6,
short w5,
short w4,
short w3,
short w2,
short w1,
short w0);
254extern __m128i
_mm_set_epi8(
char b15,
char b14,
char b13,
char b12,
char b11,
char b10,
char b9,
char b8,
char b7,
char b6,
char b5,
char b4,
char b3,
char b2,
char b1,
char b0);
265extern __m128i
_mm_setr_epi16(
short w0,
short w1,
short w2,
short w3,
short w4,
short w5,
short w6,
short w7);
267extern __m128i
_mm_setr_epi8(
char b15,
char b14,
char b13,
char b12,
char b11,
char b10,
char b9,
char b8,
char b7,
char b6,
char b5,
char b4,
char b3,
char b2,
char b1,
char b0);
315#define _mm_set_pd1(a) _mm_set1_pd(a)
316#define _mm_load_pd1(p) _mm_load1_pd(p)
317#define _mm_store_pd1(p, a) _mm_store1_pd((p), (a))
318#define _mm_bslli_si128 _mm_slli_si128
319#define _mm_bsrli_si128 _mm_srli_si128
320#define _mm_stream_si64 _mm_stream_si64x
322#if defined(_MSC_VER) && !defined(__clang__)
324#pragma intrinsic(_mm_add_sd)
325#pragma intrinsic(_mm_add_pd)
326#pragma intrinsic(_mm_sub_sd)
327#pragma intrinsic(_mm_sub_pd)
328#pragma intrinsic(_mm_mul_sd)
329#pragma intrinsic(_mm_mul_pd)
330#pragma intrinsic(_mm_div_sd)
331#pragma intrinsic(_mm_div_pd)
332#pragma intrinsic(_mm_sqrt_sd)
333#pragma intrinsic(_mm_sqrt_pd)
334#pragma intrinsic(_mm_min_sd)
335#pragma intrinsic(_mm_min_pd)
336#pragma intrinsic(_mm_max_sd)
337#pragma intrinsic(_mm_max_pd)
338#pragma intrinsic(_mm_and_pd)
339#pragma intrinsic(_mm_andnot_pd)
340#pragma intrinsic(_mm_or_pd)
341#pragma intrinsic(_mm_xor_pd)
342#pragma intrinsic(_mm_cmpeq_pd)
343#pragma intrinsic(_mm_cmplt_pd)
344#pragma intrinsic(_mm_cmple_pd)
345#pragma intrinsic(_mm_cmpgt_pd)
346#pragma intrinsic(_mm_cmpge_pd)
347#pragma intrinsic(_mm_cmpord_pd)
348#pragma intrinsic(_mm_cmpunord_pd)
349#pragma intrinsic(_mm_cmpneq_pd)
350#pragma intrinsic(_mm_cmpnlt_pd)
351#pragma intrinsic(_mm_cmpnle_pd)
352#pragma intrinsic(_mm_cmpngt_pd)
353#pragma intrinsic(_mm_cmpnge_pd)
354#pragma intrinsic(_mm_cmpeq_sd)
355#pragma intrinsic(_mm_cmplt_sd)
356#pragma intrinsic(_mm_cmple_sd)
357#pragma intrinsic(_mm_cmpgt_sd)
358#pragma intrinsic(_mm_cmpge_sd)
359#pragma intrinsic(_mm_cmpord_sd)
360#pragma intrinsic(_mm_cmpunord_sd)
361#pragma intrinsic(_mm_cmpneq_sd)
362#pragma intrinsic(_mm_cmpnlt_sd)
363#pragma intrinsic(_mm_cmpnle_sd)
364#pragma intrinsic(_mm_cmpngt_sd)
365#pragma intrinsic(_mm_cmpnge_sd)
366#pragma intrinsic(_mm_comieq_sd)
367#pragma intrinsic(_mm_comilt_sd)
368#pragma intrinsic(_mm_comile_sd)
369#pragma intrinsic(_mm_comigt_sd)
370#pragma intrinsic(_mm_comige_sd)
371#pragma intrinsic(_mm_comineq_sd)
372#pragma intrinsic(_mm_ucomieq_sd)
373#pragma intrinsic(_mm_ucomilt_sd)
374#pragma intrinsic(_mm_ucomile_sd)
375#pragma intrinsic(_mm_ucomigt_sd)
376#pragma intrinsic(_mm_ucomige_sd)
377#pragma intrinsic(_mm_ucomineq_sd)
378#pragma intrinsic(_mm_cvtpd_ps)
379#pragma intrinsic(_mm_cvtps_pd)
380#pragma intrinsic(_mm_cvtepi32_pd)
381#pragma intrinsic(_mm_cvtpd_epi32)
382#pragma intrinsic(_mm_cvtsd_si32)
383#pragma intrinsic(_mm_cvtsd_ss)
384#pragma intrinsic(_mm_cvtsi32_sd)
385#pragma intrinsic(_mm_cvtss_sd)
386#pragma intrinsic(_mm_cvttpd_epi32)
387#pragma intrinsic(_mm_cvttsd_si32)
391#pragma intrinsic(_mm_cvtsd_f64)
392#pragma intrinsic(_mm_load_pd)
393#pragma intrinsic(_mm_load1_pd)
394#pragma intrinsic(_mm_loadr_pd)
395#pragma intrinsic(_mm_loadu_pd)
399#pragma intrinsic(_mm_load_sd)
400#pragma intrinsic(_mm_loadh_pd)
401#pragma intrinsic(_mm_loadl_pd)
403#pragma intrinsic(_mm_set_sd)
404#pragma intrinsic(_mm_set1_pd)
405#pragma intrinsic(_mm_set_pd)
406#pragma intrinsic(_mm_setr_pd)
407#pragma intrinsic(_mm_setzero_pd)
408#pragma intrinsic(_mm_move_sd)
409#pragma intrinsic(_mm_store_sd)
410#pragma intrinsic(_mm_store_pd)
411#pragma intrinsic(_mm_store1_pd)
412#pragma intrinsic(_mm_storeu_pd)
413#pragma intrinsic(_mm_storer_pd)
414#pragma intrinsic(_mm_storeh_pd)
415#pragma intrinsic(_mm_storel_pd)
416#pragma intrinsic(_mm_add_epi8)
417#pragma intrinsic(_mm_add_epi16)
418#pragma intrinsic(_mm_add_epi32)
420#pragma intrinsic(_mm_add_epi64)
421#pragma intrinsic(_mm_adds_epi8)
422#pragma intrinsic(_mm_adds_epi16)
423#pragma intrinsic(_mm_adds_epu8)
424#pragma intrinsic(_mm_adds_epu16)
425#pragma intrinsic(_mm_avg_epu8)
426#pragma intrinsic(_mm_avg_epu16)
427#pragma intrinsic(_mm_madd_epi16)
428#pragma intrinsic(_mm_max_epi16)
429#pragma intrinsic(_mm_max_epu8)
430#pragma intrinsic(_mm_min_epi16)
431#pragma intrinsic(_mm_min_epu8)
432#pragma intrinsic(_mm_mulhi_epi16)
433#pragma intrinsic(_mm_mulhi_epu16)
434#pragma intrinsic(_mm_mullo_epi16)
436#pragma intrinsic(_mm_mul_epu32)
437#pragma intrinsic(_mm_sad_epu8)
438#pragma intrinsic(_mm_sub_epi8)
439#pragma intrinsic(_mm_sub_epi16)
440#pragma intrinsic(_mm_sub_epi32)
442#pragma intrinsic(_mm_sub_epi64)
443#pragma intrinsic(_mm_subs_epi8)
444#pragma intrinsic(_mm_subs_epi16)
445#pragma intrinsic(_mm_subs_epu8)
446#pragma intrinsic(_mm_subs_epu16)
447#pragma intrinsic(_mm_and_si128)
448#pragma intrinsic(_mm_andnot_si128)
449#pragma intrinsic(_mm_or_si128)
450#pragma intrinsic(_mm_xor_si128)
451#pragma intrinsic(_mm_slli_si128)
452#pragma intrinsic(_mm_slli_epi16)
453#pragma intrinsic(_mm_sll_epi16)
454#pragma intrinsic(_mm_slli_epi32)
455#pragma intrinsic(_mm_sll_epi32)
456#pragma intrinsic(_mm_slli_epi64)
457#pragma intrinsic(_mm_sll_epi64)
458#pragma intrinsic(_mm_srai_epi16)
459#pragma intrinsic(_mm_sra_epi16)
460#pragma intrinsic(_mm_srai_epi32)
461#pragma intrinsic(_mm_sra_epi32)
462#pragma intrinsic(_mm_srli_si128)
463#pragma intrinsic(_mm_srli_epi16)
464#pragma intrinsic(_mm_srl_epi16)
465#pragma intrinsic(_mm_srli_epi32)
466#pragma intrinsic(_mm_srl_epi32)
467#pragma intrinsic(_mm_srli_epi64)
468#pragma intrinsic(_mm_srl_epi64)
469#pragma intrinsic(_mm_cmpeq_epi8)
470#pragma intrinsic(_mm_cmpeq_epi16)
471#pragma intrinsic(_mm_cmpeq_epi32)
472#pragma intrinsic(_mm_cmpgt_epi8)
473#pragma intrinsic(_mm_cmpgt_epi16)
474#pragma intrinsic(_mm_cmpgt_epi32)
475#pragma intrinsic(_mm_cmplt_epi8)
476#pragma intrinsic(_mm_cmplt_epi16)
477#pragma intrinsic(_mm_cmplt_epi32)
479#pragma intrinsic(_mm_cvtsi64_sd)
480#pragma intrinsic(_mm_cvtsd_si64)
481#pragma intrinsic(_mm_cvttsd_si64)
483#pragma intrinsic(_mm_cvtepi32_ps)
484#pragma intrinsic(_mm_cvtps_epi32)
485#pragma intrinsic(_mm_cvttps_epi32)
486#pragma intrinsic(_mm_cvtsi32_si128)
488#pragma intrinsic(_mm_cvtsi64_si128)
490#pragma intrinsic(_mm_cvtsi128_si32)
492#pragma intrinsic(_mm_cvtsi128_si64)
494#pragma intrinsic(_mm_load_si128)
495#pragma intrinsic(_mm_loadu_si128)
496#pragma intrinsic(_mm_loadl_epi64)
498#pragma intrinsic(_mm_set_epi64x)
500#pragma intrinsic(_mm_set_epi32)
501#pragma intrinsic(_mm_set_epi16)
502#pragma intrinsic(_mm_set_epi8)
503#pragma intrinsic(_mm_set1_epi64x)
505#pragma intrinsic(_mm_set1_epi32)
506#pragma intrinsic(_mm_set1_epi16)
507#pragma intrinsic(_mm_set1_epi8)
508#pragma intrinsic(_mm_setl_epi64)
510#pragma intrinsic(_mm_setr_epi32)
511#pragma intrinsic(_mm_setr_epi16)
512#pragma intrinsic(_mm_setr_epi8)
513#pragma intrinsic(_mm_setzero_si128)
514#pragma intrinsic(_mm_store_si128)
515#pragma intrinsic(_mm_storeu_si128)
519#pragma intrinsic(_mm_maskmoveu_si128)
520#pragma intrinsic(_mm_storel_epi64)
521#pragma intrinsic(_mm_stream_pd)
522#pragma intrinsic(_mm_stream_si128)
523#pragma intrinsic(_mm_stream_si32)
524#pragma intrinsic(_mm_clflush)
525#pragma intrinsic(_mm_lfence)
526#pragma intrinsic(_mm_mfence)
527#pragma intrinsic(_mm_packs_epi16)
528#pragma intrinsic(_mm_packs_epi32)
529#pragma intrinsic(_mm_packus_epi16)
530#pragma intrinsic(_mm_extract_epi16)
531#pragma intrinsic(_mm_insert_epi16)
532#pragma intrinsic(_mm_movemask_epi8)
533#pragma intrinsic(_mm_shuffle_epi32)
534#pragma intrinsic(_mm_shufflelo_epi16)
535#pragma intrinsic(_mm_shufflehi_epi16)
536#pragma intrinsic(_mm_unpackhi_epi8)
537#pragma intrinsic(_mm_unpackhi_epi16)
538#pragma intrinsic(_mm_unpackhi_epi32)
539#pragma intrinsic(_mm_unpackhi_epi64)
540#pragma intrinsic(_mm_unpacklo_epi8)
541#pragma intrinsic(_mm_unpacklo_epi16)
542#pragma intrinsic(_mm_unpacklo_epi32)
543#pragma intrinsic(_mm_unpacklo_epi64)
546#pragma intrinsic(_mm_move_epi64)
547#pragma intrinsic(_mm_unpackhi_pd)
548#pragma intrinsic(_mm_unpacklo_pd)
549#pragma intrinsic(_mm_movemask_pd)
550#pragma intrinsic(_mm_shuffle_pd)
551#pragma intrinsic(_mm_castpd_ps)
552#pragma intrinsic(_mm_castpd_si128)
553#pragma intrinsic(_mm_castps_pd)
554#pragma intrinsic(_mm_castps_si128)
555#pragma intrinsic(_mm_castsi128_ps)
556#pragma intrinsic(_mm_castsi128_pd)
557#pragma intrinsic(_mm_pause)
575 return (__m128d)((__v2df)
a + (__v2df)
b);
586 return (__m128d)((__v2df)
a - (__v2df)
b);
597 return (__m128d)((__v2df)
a * (__v2df)
b);
608 return (__m128d)((__v2df)
a / (__v2df)
b);
613 __m128d
__c = __builtin_ia32_sqrtsd((__v2df)
b);
614 return __extension__(__m128d){
__c[0],
a[1]};
619 return __builtin_ia32_sqrtpd((__v2df)
a);
624 return __builtin_ia32_minsd((__v2df)
a, (__v2df)
b);
629 return __builtin_ia32_minpd((__v2df)
a, (__v2df)
b);
634 return __builtin_ia32_maxsd((__v2df)
a, (__v2df)
b);
639 return __builtin_ia32_maxpd((__v2df)
a, (__v2df)
b);
644 return (__m128d)((__v2du)
a & (__v2du)
b);
649 return (__m128d)(~(__v2du)
a & (__v2du)
b);
654 return (__m128d)((__v2du)
a | (__v2du)
b);
659 return (__m128d)((__v2du)
a ^ (__v2du)
b);
664 return (__m128d)__builtin_ia32_cmpeqpd((__v2df)
a, (__v2df)
b);
669 return (__m128d)__builtin_ia32_cmpltpd((__v2df)
a, (__v2df)
b);
674 return (__m128d)__builtin_ia32_cmplepd((__v2df)
a, (__v2df)
b);
679 return (__m128d)__builtin_ia32_cmpltpd((__v2df)
b, (__v2df)
a);
684 return (__m128d)__builtin_ia32_cmplepd((__v2df)
b, (__v2df)
a);
689 return (__m128d)__builtin_ia32_cmpordpd((__v2df)
a, (__v2df)
b);
694 return (__m128d)__builtin_ia32_cmpunordpd((__v2df)
a, (__v2df)
b);
699 return (__m128d)__builtin_ia32_cmpneqpd((__v2df)
a, (__v2df)
b);
704 return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
a, (__v2df)
b);
709 return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
a, (__v2df)
b);
714 return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
b, (__v2df)
a);
719 return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
b, (__v2df)
a);
724 return (__m128d)__builtin_ia32_cmpeqsd((__v2df)
a, (__v2df)
b);
729 return (__m128d)__builtin_ia32_cmpltsd((__v2df)
a, (__v2df)
b);
734 return (__m128d)__builtin_ia32_cmplesd((__v2df)
a, (__v2df)
b);
739 __m128d
__c = __builtin_ia32_cmpltsd((__v2df)
b, (__v2df)
a);
740 return __extension__(__m128d){
__c[0],
a[1]};
745 __m128d
__c = __builtin_ia32_cmplesd((__v2df)
b, (__v2df)
a);
746 return __extension__(__m128d){
__c[0],
a[1]};
751 return (__m128d)__builtin_ia32_cmpordsd((__v2df)
a, (__v2df)
b);
756 return (__m128d)__builtin_ia32_cmpunordsd((__v2df)
a, (__v2df)
b);
761 return (__m128d)__builtin_ia32_cmpneqsd((__v2df)
a, (__v2df)
b);
766 return (__m128d)__builtin_ia32_cmpnltsd((__v2df)
a, (__v2df)
b);
771 return (__m128d)__builtin_ia32_cmpnlesd((__v2df)
a, (__v2df)
b);
776 __m128d
__c = __builtin_ia32_cmpnltsd((__v2df)
b, (__v2df)
a);
777 return __extension__(__m128d){
__c[0],
a[1]};
782 __m128d
__c = __builtin_ia32_cmpnlesd((__v2df)
b, (__v2df)
a);
783 return __extension__(__m128d){
__c[0],
a[1]};
788 return __builtin_ia32_comisdeq((__v2df)
a, (__v2df)
b);
793 return __builtin_ia32_comisdlt((__v2df)
a, (__v2df)
b);
798 return __builtin_ia32_comisdle((__v2df)
a, (__v2df)
b);
803 return __builtin_ia32_comisdgt((__v2df)
a, (__v2df)
b);
808 return __builtin_ia32_comisdge((__v2df)
a, (__v2df)
b);
813 return __builtin_ia32_comisdneq((__v2df)
a, (__v2df)
b);
818 return __builtin_ia32_ucomisdeq((__v2df)
a, (__v2df)
b);
823 return __builtin_ia32_ucomisdlt((__v2df)
a, (__v2df)
b);
828 return __builtin_ia32_ucomisdle((__v2df)
a, (__v2df)
b);
833 return __builtin_ia32_ucomisdgt((__v2df)
a, (__v2df)
b);
838 return __builtin_ia32_ucomisdge((__v2df)
a, (__v2df)
b);
843 return __builtin_ia32_ucomisdneq((__v2df)
a, (__v2df)
b);
848 return __builtin_ia32_cvtpd2ps((__v2df)
a);
853#if HAS_BUILTIN(__builtin_convertvector)
854 return (__m128d)__builtin_convertvector(__builtin_shufflevector((__v4sf)
a, (__v4sf)
a, 0, 1), __v2df);
856 return __builtin_ia32_cvtps2pd(
a);
862#if HAS_BUILTIN(__builtin_convertvector)
863 return (__m128d)__builtin_convertvector(__builtin_shufflevector((__v4si)
a, (__v4si)
a, 0, 1), __v2df);
865 return __builtin_ia32_cvtdq2pd((__v4si)
a);
871 return (__m128i)__builtin_ia32_cvtpd2dq((__v2df)
a);
876 return __builtin_ia32_cvtsd2si((__v2df)
a);
881 return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)
a, (__v2df)
b);
899 return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)
a);
904 return __builtin_ia32_cvttsd2si((__v2df)
a);
909 return (__m64)__builtin_ia32_cvtpd2pi((__v2df)
a);
914 return (__m64)__builtin_ia32_cvttpd2pi((__v2df)
a);
919 return __builtin_ia32_cvtpi2pd((__v2si)
a);
929 return *(
const __m128d *)dp;
934 struct __mm_load1_pd_struct {
937 double __u = ((
const struct __mm_load1_pd_struct *)dp)->__u;
938 return __extension__(__m128d){__u, __u};
943#define _MM_SHUFFLE2(fp1,fp0) \
944 (((fp1) << 1) | (fp0))
948#if HAS_BUILTIN(__builtin_shufflevector)
949 __m128d
u = *(
const __m128d *)dp;
950 return __builtin_shufflevector((__v2df)
u, (__v2df)
u, 1, 0);
952 return (__m128d){ dp[1], dp[0] };
961 return ((
const struct __loadu_pd *)dp)->__v;
966 struct __loadu_si64 {
969 long long __u = ((
const struct __loadu_si64 *)
a)->__v;
970 return __extension__(__m128i)(__v2di){__u, 0
LL};
975 struct __loadu_si32 {
978 int __u = ((
const struct __loadu_si32 *)
a)->__v;
979 return __extension__(__m128i)(__v4si){__u, 0, 0, 0};
984 struct __loadu_si16 {
987 short __u = ((
const struct __loadu_si16 *)
a)->__v;
988 return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
993 struct __mm_load_sd_struct {
996 double __u = ((
const struct __mm_load_sd_struct *)dp)->__u;
997 return __extension__(__m128d){__u, 0};
1002 struct __mm_loadh_pd_struct {
1005 double __u = ((
const struct __mm_loadh_pd_struct *)dp)->__u;
1006 return __extension__(__m128d){
a[0], __u};
1011 struct __mm_loadl_pd_struct {
1014 double __u = ((
const struct __mm_loadl_pd_struct *)dp)->__u;
1015 return __extension__(__m128d){__u,
a[1]};
1020#if HAS_BUILTIN(__builtin_ia32_undef128)
1021 return (__m128d)__builtin_ia32_undef128();
1023 __m128d undef = undef;
1030 return __extension__(__m128d){
w, 0};
1035 return __extension__(__m128d){
w,
w};
1040 return __extension__(__m128d){
x,
w};
1045 return __extension__(__m128d){
w,
x};
1050 return __extension__(__m128d){0, 0};
1061 struct __mm_store_sd_struct {
1064 ((
struct __mm_store_sd_struct *)dp)->__u =
a[0];
1074#if HAS_BUILTIN(__builtin_shufflevector)
1075 a = __builtin_shufflevector((__v2df)
a, (__v2df)
a, 0, 0);
1085 struct __storeu_pd {
1088 ((
struct __storeu_pd *)dp)->__v =
a;
1093#if HAS_BUILTIN(__builtin_shufflevector)
1094 a = __builtin_shufflevector((__v2df)
a, (__v2df)
a, 1, 0);
1104 struct __mm_storeh_pd_struct {
1107 ((
struct __mm_storeh_pd_struct *)dp)->__u =
a[1];
1112 struct __mm_storeh_pd_struct {
1115 ((
struct __mm_storeh_pd_struct *)dp)->__u =
a[0];
1120 return (__m128i)((__v16qu)
a + (__v16qu)
b);
1125 return (__m128i)((__v8hu)
a + (__v8hu)
b);
1130 return (__m128i)((__v4su)
a + (__v4su)
b);
1135 return (__m64)__builtin_ia32_paddq((__v1di)
a, (__v1di)
b);
1140 return (__m128i)((__v2du)
a + (__v2du)
b);
1145#if HAS_BUILTIN(__builtin_elementwise_add_sat)
1146 return (__m128i)__builtin_elementwise_add_sat((__v16qs)
a, (__v16qs)
b);
1148 return (__m128i)__builtin_ia32_paddsb128((__v16qi)
a, (__v16qi)
b);
1154#if HAS_BUILTIN(__builtin_elementwise_add_sat)
1155 return (__m128i)__builtin_elementwise_add_sat((__v8hi)
a, (__v8hi)
b);
1157 return (__m128i)__builtin_ia32_paddsw128((__v8hi)
a, (__v8hi)
b);
1163#if HAS_BUILTIN(__builtin_elementwise_add_sat)
1164 return (__m128i)__builtin_elementwise_add_sat((__v16qu)
a, (__v16qu)
b);
1166 return (__m128i)__builtin_ia32_paddusb128((__v16qi)
a, (__v16qi)
b);
1172#if HAS_BUILTIN(__builtin_elementwise_add_sat)
1173 return (__m128i)__builtin_elementwise_add_sat((__v8hu)
a, (__v8hu)
b);
1175 return (__m128i)__builtin_ia32_paddusw128((__v8hi)
a, (__v8hi)
b);
1181 return (__m128i)__builtin_ia32_pavgb128((__v16qi)
a, (__v16qi)
b);
1186 return (__m128i)__builtin_ia32_pavgw128((__v8hi)
a, (__v8hi)
b);
1191 return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)
a, (__v8hi)
b);
1196#if HAS_BUILTIN(__builtin_elementwise_max)
1197 return (__m128i)__builtin_elementwise_max((__v8hi)
a, (__v8hi)
b);
1199 return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)
a, (__v8hi)
b);
1205#if HAS_BUILTIN(__builtin_elementwise_max)
1206 return (__m128i)__builtin_elementwise_max((__v16qu)
a, (__v16qu)
b);
1208 return (__m128i)__builtin_ia32_pmaxub128((__v16qi)
a, (__v16qi)
b);
1214#if HAS_BUILTIN(__builtin_elementwise_min)
1215 return (__m128i)__builtin_elementwise_min((__v8hi)
a, (__v8hi)
b);
1217 return (__m128i)__builtin_ia32_pminsw128((__v8hi)
a, (__v8hi)
b);
1223#if HAS_BUILTIN(__builtin_elementwise_min)
1224 return (__m128i)__builtin_elementwise_min((__v16qu)
a, (__v16qu)
b);
1226 return (__m128i)__builtin_ia32_pminub128((__v16qi)
a, (__v16qi)
b);
1232 return (__m128i)__builtin_ia32_pmulhw128((__v8hi)
a, (__v8hi)
b);
1237 return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)
a, (__v8hi)
b);
1242 return (__m128i)((__v8hu)
a * (__v8hu)
b);
1247 return (__m64)__builtin_ia32_pmuludq((__v2si)
a, (__v2si)
b);
1252 return __builtin_ia32_pmuludq128((__v4si)
a, (__v4si)
b);
1257 return __builtin_ia32_psadbw128((__v16qi)
a, (__v16qi)
b);
1262 return (__m128i)((__v16qu)
a - (__v16qu)
b);
1267 return (__m128i)((__v8hu)
a - (__v8hu)
b);
1272 return (__m128i)((__v4su)
a - (__v4su)
b);
1277 return (__m64)__builtin_ia32_psubq((__v1di)
a, (__v1di)
b);
1282 return (__m128i)((__v2du)
a - (__v2du)
b);
1287#if HAS_BUILTIN(__builtin_elementwise_sub_sat)
1288 return (__m128i)__builtin_elementwise_sub_sat((__v16qs)
a, (__v16qs)
b);
1290 return (__m128i)__builtin_ia32_psubsb128((__v16qi)
a, (__v16qi)
b);
1296#if HAS_BUILTIN(__builtin_elementwise_sub_sat)
1297 return (__m128i)__builtin_elementwise_sub_sat((__v8hi)
a, (__v8hi)
b);
1299 return (__m128i)__builtin_ia32_psubsw128((__v8hi)
a, (__v8hi)
b);
1305#if HAS_BUILTIN(__builtin_elementwise_sub_sat)
1306 return (__m128i)__builtin_elementwise_sub_sat((__v16qu)
a, (__v16qu)
b);
1308 return (__m128i)__builtin_ia32_psubusb128((__v16qi)
a, (__v16qi)
b);
1314#if HAS_BUILTIN(__builtin_elementwise_sub_sat)
1315 return (__m128i)__builtin_elementwise_sub_sat((__v8hu)
a, (__v8hu)
b);
1317 return (__m128i)__builtin_ia32_psubusw128((__v8hi)
a, (__v8hi)
b);
1323 return (__m128i)((__v2du)
a & (__v2du)
b);
1328 return (__m128i)(~(__v2du)
a & (__v2du)
b);
1333 return (__m128i)((__v2du)
a | (__v2du)
b);
1338 return (__m128i)((__v2du)
a ^ (__v2du)
b);
1341#define _mm_slli_si128(a, imm) \
1342 ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
1346 return (__m128i)__builtin_ia32_psllwi128((__v8hi)
a,
count);
1351 return (__m128i)__builtin_ia32_psllw128((__v8hi)
a, (__v8hi)
count);
1356 return (__m128i)__builtin_ia32_pslldi128((__v4si)
a,
count);
1361 return (__m128i)__builtin_ia32_pslld128((__v4si)
a, (__v4si)
count);
1366 return __builtin_ia32_psllqi128((__v2di)
a,
count);
1371 return __builtin_ia32_psllq128((__v2di)
a, (__v2di)
count);
1376 return (__m128i)__builtin_ia32_psrawi128((__v8hi)
a,
count);
1381 return (__m128i)__builtin_ia32_psraw128((__v8hi)
a, (__v8hi)
count);
1386 return (__m128i)__builtin_ia32_psradi128((__v4si)
a,
count);
1391 return (__m128i)__builtin_ia32_psrad128((__v4si)
a, (__v4si)
count);
1394#define _mm_srli_si128(a, imm) \
1395 ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
1399 return (__m128i)__builtin_ia32_psrlwi128((__v8hi)
a,
count);
1404 return (__m128i)__builtin_ia32_psrlw128((__v8hi)
a, (__v8hi)
count);
1409 return (__m128i)__builtin_ia32_psrldi128((__v4si)
a,
count);
1414 return (__m128i)__builtin_ia32_psrld128((__v4si)
a, (__v4si)
count);
1419 return __builtin_ia32_psrlqi128((__v2di)
a,
count);
1424 return __builtin_ia32_psrlq128((__v2di)
a, (__v2di)
count);
1429 return (__m128i)((__v16qi)
a == (__v16qi)
b);
1434 return (__m128i)((__v8hi)
a == (__v8hi)
b);
1439 return (__m128i)((__v4si)
a == (__v4si)
b);
1446 return (__m128i)((__v16qs)
a > (__v16qs)
b);
1451 return (__m128i)((__v8hi)
a > (__v8hi)
b);
1456 return (__m128i)((__v4si)
a > (__v4si)
b);
1484 return __builtin_ia32_cvtsd2si64((__v2df)
a);
1489 return __builtin_ia32_cvttsd2si64((__v2df)
a);
1495#if HAS_BUILTIN(__builtin_convertvector)
1496 return (__m128)__builtin_convertvector((__v4si)
a, __v4sf);
1498 return __builtin_ia32_cvtdq2ps((__v4si)
a);
1504 return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)
a);
1509 return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)
a);
1514 return __extension__(__m128i)(__v4si){
a, 0, 0, 0};
1519 return __extension__(__m128i)(__v2di){
a, 0};
1524 __v4si
b = (__v4si)
a;
1540 struct __loadu_si128 {
1543 return ((
const struct __loadu_si128 *)
p)->__v;
1548 struct __mm_loadl_epi64_struct {
1551 return __extension__(__m128i){
1552 ((
const struct __mm_loadl_epi64_struct *)
p)->__u, 0};
1557#if HAS_BUILTIN(__builtin_ia32_undef128)
1558 return (__m128i)__builtin_ia32_undef128();
1560 __m128i undef = undef;
1567 return __extension__(__m128i)(__v2di){q0, q1};
1577 return __extension__(__m128i)(__v4si){i0, i1, i2, i3};
1581 short w7,
short w6,
short w5,
short w4,
1582 short w3,
short w2,
short w1,
short w0)
1584 return __extension__(__m128i)(__v8hi){w0,
w1,
w2, w3, w4, w5, w6, w7};
1588 char b15,
char b14,
char b13,
char b12,
1589 char b11,
char b10,
char b9,
char b8,
1590 char b7,
char b6,
char b5,
char b4,
1591 char b3,
char b2,
char b1,
char b0)
1593 return __extension__(__m128i)(__v16qi){
1595 b8, b9, b10, b11, b12, b13, b14, b15};
1620 return _mm_set_epi8(
b,
b,
b,
b,
b,
b,
b,
b,
b,
b,
b,
1635 short w0,
short w1,
short w2,
short w3,
1636 short w4,
short w5,
short w6,
short w7)
1642 char b0,
char b1,
char b2,
char b3,
1643 char b4,
char b5,
char b6,
char b7,
1644 char b8,
char b9,
char b10,
char b11,
1645 char b12,
char b13,
char b14,
char b15)
1647 return _mm_set_epi8(b15, b14, b13, b12, b11, b10, b9, b8,
1653 return __extension__(__m128i)(__v2di){0
LL, 0
LL};
1663 struct __storeu_si128 {
1666 ((
struct __storeu_si128 *)
p)->__v =
b;
1671 struct __storeu_si64 {
1674 ((
struct __storeu_si64 *)
p)->__v = ((__v2di)
b)[0];
1679 struct __storeu_si32 {
1682 ((
struct __storeu_si32 *)
p)->__v = ((__v4si)
b)[0];
1687 struct __storeu_si16 {
1690 ((
struct __storeu_si16 *)
p)->__v = ((__v8hi)
b)[0];
1695 __builtin_ia32_maskmovdqu((__v16qi)
d, (__v16qi)
n,
p);
1700 struct __mm_storel_epi64_struct {
1703 ((
struct __mm_storel_epi64_struct *)
p)->__u =
a[0];
1708#if HAS_BUILTIN(__builtin_nontemporal_store)
1709 __builtin_nontemporal_store((__v2df)
a, (__v2df *)
p);
1711 __builtin_ia32_movntpd(
p,
a);
1717#if HAS_BUILTIN(__builtin_nontemporal_store)
1718 __builtin_nontemporal_store((__v2di)
a, (__v2di*)
p);
1720 __builtin_ia32_movntdq(
p,
a);
1726 __builtin_ia32_movnti(
p,
a);
1732 __builtin_ia32_movnti64(
p,
a);
1744 return (__m128i)__builtin_ia32_packsswb128((__v8hi)
a, (__v8hi)
b);
1749 return (__m128i)__builtin_ia32_packssdw128((__v4si)
a, (__v4si)
b);
1754 return (__m128i)__builtin_ia32_packuswb128((__v8hi)
a, (__v8hi)
b);
1757#define _mm_extract_epi16(a, imm) \
1758 ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
1761#define _mm_insert_epi16(a, b, imm) \
1762 ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
1767 return __builtin_ia32_pmovmskb128((__v16qi)
a);
1770#define _mm_shuffle_epi32(a, imm) \
1771 ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
1773#define _mm_shufflelo_epi16(a, imm) \
1774 ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
1776#define _mm_shufflehi_epi16(a, imm) \
1777 ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
1781#if HAS_BUILTIN(__builtin_shufflevector)
1782 return (__m128i)__builtin_shufflevector(
1783 (__v16qi)
a, (__v16qi)
b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
1784 16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
1786 return (__m128i)__builtin_ia32_punpckhbw128((__v16qi)
a, (__v16qi)
b);
1792#if HAS_BUILTIN(__builtin_shufflevector)
1793 return (__m128i)__builtin_shufflevector((__v8hi)
a, (__v8hi)
b, 4, 8 + 4, 5,
1794 8 + 5, 6, 8 + 6, 7, 8 + 7);
1796 return (__m128i)__builtin_ia32_punpckhwd128((__v8hi)
a, (__v8hi)
b);
1802#if HAS_BUILTIN(__builtin_shufflevector)
1803 return (__m128i)__builtin_shufflevector((__v4si)
a, (__v4si)
b, 2, 4 + 2, 3,
1806 return (__m128i)__builtin_ia32_punpckhdq128((__v4si)
a, (__v4si)
b);
1812#if HAS_BUILTIN(__builtin_shufflevector)
1813 return (__m128i)__builtin_shufflevector((__v2di)
a, (__v2di)
b, 1, 2 + 1);
1815 return (__m128i)__builtin_ia32_punpckhqdq128((__v2di)
a, (__v2di)
b);
1821#if HAS_BUILTIN(__builtin_shufflevector)
1822 return (__m128i)__builtin_shufflevector(
1823 (__v16qi)
a, (__v16qi)
b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
1824 16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
1826 return (__m128i)__builtin_ia32_punpcklbw128((__v16qi)
a, (__v16qi)
b);
1832#if HAS_BUILTIN(__builtin_shufflevector)
1833 return (__m128i)__builtin_shufflevector((__v8hi)
a, (__v8hi)
b, 0, 8 + 0, 1,
1834 8 + 1, 2, 8 + 2, 3, 8 + 3);
1836 return (__m128i)__builtin_ia32_punpcklwd128((__v8hi)
a, (__v8hi)
b);
1842#if HAS_BUILTIN(__builtin_shufflevector)
1843 return (__m128i)__builtin_shufflevector((__v4si)
a, (__v4si)
b, 0, 4 + 0, 1,
1846 return (__m128i)__builtin_ia32_punpckldq128((__v4si)
a, (__v4si)
b);
1852#if HAS_BUILTIN(__builtin_shufflevector)
1853 return (__m128i)__builtin_shufflevector((__v2di)
a, (__v2di)
b, 0, 2 + 0);
1855 return (__m128i)__builtin_ia32_punpcklqdq128((__v2di)
a, (__v2di)
b);
1866 return __extension__(__m128i)(__v2di){(
long long)
a, 0};
1871#if HAS_BUILTIN(__builtin_shufflevector)
1874 return (__m128i)__builtin_ia32_movq128((__v2di)
a);
1880#if HAS_BUILTIN(__builtin_shufflevector)
1881 return __builtin_shufflevector((__v2df)
a, (__v2df)
b, 1, 2 + 1);
1883 return (__m128d)__builtin_ia32_unpckhpd((__v2df)
a, (__v2df)
b);
1889#if HAS_BUILTIN(__builtin_shufflevector)
1890 return __builtin_shufflevector((__v2df)
a, (__v2df)
b, 0, 2 + 0);
1892 return (__m128d)__builtin_ia32_unpcklpd((__v2df)
a, (__v2df)
b);
1898 return __builtin_ia32_movmskpd((__v2df)
a);
1901#define _mm_shuffle_pd(a, b, i) \
1902 ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
#define _DECLSPEC_INTRIN_TYPE
#define _STATIC_ASSERT(expr)
int align(int length, int align)
__m128 _mm_cvtpd_ps(__m128d a)
__m128d _mm_cmpnge_sd(__m128d a, __m128d b)
void _mm_storeu_pd(double *dp, __m128d a)
__m128d _mm_add_sd(__m128d a, __m128d b)
void _mm_storeu_si128(__m128i_u *p, __m128i b)
__m128i _mm_set_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0)
__m128i _mm_cmpgt_epi8(__m128i a, __m128i b)
__m128i _mm_set1_epi16(short w)
int _mm_cvtsi128_si32(__m128i a)
__m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
void _mm_store_si128(__m128i *p, __m128i b)
__m128i _mm_adds_epu16(__m128i a, __m128i b)
__m128i _mm_movpi64_epi64(__m64 a)
__m128i _mm_slli_epi64(__m128i a, int count)
int _mm_ucomile_sd(__m128d a, __m128d b)
__m128d _mm_cmpeq_sd(__m128d a, __m128d b)
int _mm_cvtsd_si32(__m128d a)
int _mm_comile_sd(__m128d a, __m128d b)
__m128i _mm_castps_si128(__m128 a)
__m128d _mm_cmpnlt_pd(__m128d a, __m128d b)
__m128i _mm_setr_epi32(int i0, int i1, int i2, int i3)
__m128i _mm_setzero_si128(void)
__m128i _mm_srl_epi64(__m128i a, __m128i count)
__m128d _mm_add_pd(__m128d a, __m128d b)
__m128i _mm_cvtpd_epi32(__m128d a)
__m128i _mm_xor_si128(__m128i a, __m128i b)
__m128i _mm_move_epi64(__m128i a)
__m128d _mm_sub_sd(__m128d a, __m128d b)
__m128d _mm_loadh_pd(__m128d a, double const *dp)
__m128d _mm_cmpnlt_sd(__m128d a, __m128d b)
__m128i _mm_srli_epi64(__m128i a, int count)
__m128d _mm_setr_pd(double w, double x)
#define __INTRIN_INLINE_SSE2
__m128d _mm_cmpord_sd(__m128d a, __m128d b)
__m128i _mm_set1_epi64(__m64 q)
__m128d _mm_cmpunord_pd(__m128d a, __m128d b)
__m128i _mm_packs_epi32(__m128i a, __m128i b)
__m128d _mm_castps_pd(__m128 a)
void _mm_store1_pd(double *dp, __m128d a)
__m128i _mm_sad_epu8(__m128i a, __m128i b)
__INTRIN_INLINE_SSE2 __m128d _mm_undefined_pd(void)
__INTRIN_INLINE_SSE2 void _mm_storeu_si32(void *p, __m128i b)
__m128d _mm_setzero_pd(void)
__m128 _mm_castsi128_ps(__m128i a)
__m128d _mm_cmpneq_pd(__m128d a, __m128d b)
void _mm_storel_epi64(__m128i_u *p, __m128i a)
__m128i _mm_packus_epi16(__m128i a, __m128i b)
__m128d _mm_set_sd(double w)
__m128 _mm_cvtepi32_ps(__m128i a)
#define _mm_slli_si128(a, imm)
__m128i _mm_adds_epu8(__m128i a, __m128i b)
__m128i _mm_sub_epi32(__m128i a, __m128i b)
__m128i _mm_castpd_si128(__m128d a)
__m128i _mm_add_epi16(__m128i a, __m128i b)
__m128d _mm_cmpnge_pd(__m128d a, __m128d b)
int _mm_comige_sd(__m128d a, __m128d b)
__m128d _mm_cmpge_pd(__m128d a, __m128d b)
__INTRIN_INLINE_SSE2 void _mm_storeu_si16(void *p, __m128i b)
__m128d _mm_loadr_pd(double const *dp)
__m128i _mm_mulhi_epu16(__m128i a, __m128i b)
__m128i _mm_slli_epi32(__m128i a, int count)
__m64 _mm_sub_si64(__m64 a, __m64 b)
__m128i _mm_load_si128(__m128i const *p)
__m128d _mm_max_sd(__m128d a, __m128d b)
__m64 _mm_cvtpd_pi32(__m128d a)
__m128d _mm_and_pd(__m128d a, __m128d b)
__m128i _mm_mul_epu32(__m128i a, __m128i b)
__m128i _mm_cvttpd_epi32(__m128d a)
int _mm_ucomige_sd(__m128d a, __m128d b)
__m128d _mm_sub_pd(__m128d a, __m128d b)
__m128i _mm_cmpeq_epi8(__m128i a, __m128i b)
__m128d _mm_load1_pd(double const *dp)
__m128d _mm_load_pd(double const *dp)
__m128d _mm_min_pd(__m128d a, __m128d b)
__m128i _mm_sll_epi32(__m128i a, __m128i count)
__m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
void _mm_stream_si32(int *p, int a)
__m128i _mm_subs_epu8(__m128i a, __m128i b)
__m128i _mm_srl_epi32(__m128i a, __m128i count)
__m128i _mm_mulhi_epi16(__m128i a, __m128i b)
#define _mm_shuffle_pd(a, b, i)
void _mm_stream_si128(__m128i *p, __m128i a)
__m128d _mm_or_pd(__m128d a, __m128d b)
__m128i _mm_cmpeq_epi16(__m128i a, __m128i b)
__m128d _mm_cmpge_sd(__m128d a, __m128d b)
__m128d _mm_mul_sd(__m128d a, __m128d b)
__m128i _mm_set1_epi8(char b)
__m128i _mm_sra_epi32(__m128i a, __m128i count)
__m128d _mm_sqrt_sd(__m128d a, __m128d b)
__m128i _mm_srai_epi32(__m128i a, int count)
__m128i _mm_cvtsi32_si128(int a)
__m64 _mm_add_si64(__m64 a, __m64 b)
__m128i _mm_slli_epi16(__m128i a, int count)
__m128d _mm_cmpeq_pd(__m128d a, __m128d b)
__m128i _mm_subs_epi8(__m128i a, __m128i b)
void _mm_storer_pd(double *dp, __m128d a)
double __m128d __attribute__((__vector_size__(16), __aligned__(16)))
__INTRIN_INLINE_SSE2 __m128i _mm_cvtsi64_si128(long long a)
int _mm_movemask_pd(__m128d a)
__m128i _mm_setr_epi64(__m64 q0, __m64 q1)
__m128i _mm_sub_epi64(__m128i a, __m128i b)
__m128d _mm_move_sd(__m128d a, __m128d b)
__m128i _mm_min_epu8(__m128i a, __m128i b)
__INTRIN_INLINE_SSE2 __m128i _mm_set_epi64x(long long q1, long long q0)
__m128d _mm_unpackhi_pd(__m128d a, __m128d b)
int _mm_cvttsd_si32(__m128d a)
__m128d _mm_cmpnle_pd(__m128d a, __m128d b)
__m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
__m128d _mm_cmpnle_sd(__m128d a, __m128d b)
__m128i _mm_add_epi8(__m128i a, __m128i b)
__m128d _mm_cmple_sd(__m128d a, __m128d b)
__m128d _mm_cmple_pd(__m128d a, __m128d b)
__m128i _mm_cmplt_epi32(__m128i a, __m128i b)
#define _mm_insert_epi16(a, b, imm)
__m128d _mm_cvtpi32_pd(__m64 a)
__m128d _mm_loadu_pd(double const *dp)
__m64 _mm_mul_su32(__m64 a, __m64 b)
__m128i _mm_avg_epu16(__m128i a, __m128i b)
__m128d _mm_cvtss_sd(__m128d a, __m128 b)
int _mm_ucomieq_sd(__m128d a, __m128d b)
__m128i _mm_setr_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0)
__m128i _mm_sll_epi64(__m128i a, __m128i count)
__m128d _mm_cmpngt_sd(__m128d a, __m128d b)
void _mm_storel_pd(double *dp, __m128d a)
__m128d _mm_cmplt_sd(__m128d a, __m128d b)
__m128d _mm_cvtsi32_sd(__m128d a, int b)
__m128i _mm_or_si128(__m128i a, __m128i b)
__INTRIN_INLINE_SSE2 long long _mm_cvtsi128_si64(__m128i a)
__m128i _mm_cmplt_epi16(__m128i a, __m128i b)
__m128i _mm_subs_epi16(__m128i a, __m128i b)
__INTRIN_INLINE_SSE2 __m128i _mm_loadu_si64(void const *a)
__m128d _mm_cmpngt_pd(__m128d a, __m128d b)
#define _mm_extract_epi16(a, imm)
double _mm_cvtsd_f64(__m128d a)
#define _mm_shufflelo_epi16(a, imm)
__m128i _mm_packs_epi16(__m128i a, __m128i b)
__INTRIN_INLINE_SSE2 __m128i _mm_set1_epi64x(long long q)
__m128d _mm_min_sd(__m128d a, __m128d b)
void _mm_store_pd(double *dp, __m128d a)
__m128i _mm_srli_epi16(__m128i a, int count)
__m128i _mm_sub_epi8(__m128i a, __m128i b)
__m128d _mm_castsi128_pd(__m128i a)
__m128d _mm_cmpord_pd(__m128d a, __m128d b)
void _mm_storeh_pd(double *dp, __m128d a)
__m128d _mm_mul_pd(__m128d a, __m128d b)
__INTRIN_INLINE_SSE2 __m128i _mm_loadu_si32(void const *a)
__m128i _mm_sll_epi16(__m128i a, __m128i count)
__m128d _mm_sqrt_pd(__m128d a)
__m128i _mm_mullo_epi16(__m128i a, __m128i b)
__m128i _mm_sra_epi16(__m128i a, __m128i count)
int _mm_comieq_sd(__m128d a, __m128d b)
__m128i _mm_cvttps_epi32(__m128 a)
__m128d _mm_load_sd(double const *dp)
__m64 _mm_movepi64_pi64(__m128i a)
void _mm_maskmoveu_si128(__m128i d, __m128i n, _Out_writes_bytes_(16) char *p)
int _mm_movemask_epi8(__m128i a)
__m128i _mm_madd_epi16(__m128i a, __m128i b)
__m128d _mm_cmpgt_sd(__m128d a, __m128d b)
int _mm_ucomilt_sd(__m128d a, __m128d b)
__m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
__m128i _mm_srai_epi16(__m128i a, int count)
__m128i _mm_set_epi16(short w7, short w6, short w5, short w4, short w3, short w2, short w1, short w0)
__m128i _mm_setr_epi16(short w0, short w1, short w2, short w3, short w4, short w5, short w6, short w7)
int _mm_ucomineq_sd(__m128d a, __m128d b)
__m128i _mm_cvtps_epi32(__m128 a)
__m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
__m128d _mm_andnot_pd(__m128d a, __m128d b)
__m128d _mm_loadl_pd(__m128d a, double const *dp)
__m128d _mm_cmpneq_sd(__m128d a, __m128d b)
__m128i _mm_min_epi16(__m128i a, __m128i b)
__m128 _mm_cvtsd_ss(__m128 a, __m128d b)
__m128i _mm_andnot_si128(__m128i a, __m128i b)
__m128i _mm_and_si128(__m128i a, __m128i b)
__m128i _mm_setl_epi64(__m128i q)
void _mm_stream_pd(double *p, __m128d a)
__m128 _mm_castpd_ps(__m128d a)
__INTRIN_INLINE_SSE2 __m128i _mm_undefined_si128(void)
int _mm_comineq_sd(__m128d a, __m128d b)
__m128i _mm_avg_epu8(__m128i a, __m128i b)
int _mm_ucomigt_sd(__m128d a, __m128d b)
__m128i _mm_adds_epi16(__m128i a, __m128i b)
__m128i _mm_unpackhi_epi64(__m128i a, __m128i b)
__m128i _mm_adds_epi8(__m128i a, __m128i b)
int _mm_comilt_sd(__m128d a, __m128d b)
__m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
__m128i _mm_cmplt_epi8(__m128i a, __m128i b)
#define _mm_shufflehi_epi16(a, imm)
__m128d _mm_cvtepi32_pd(__m128i a)
__m128i _mm_max_epi16(__m128i a, __m128i b)
__m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
__m128d _mm_xor_pd(__m128d a, __m128d b)
__INTRIN_INLINE_SSE2 void _mm_storeu_si64(void *p, __m128i b)
void _mm_clflush(void const *p)
__m128d _mm_cvtps_pd(__m128 a)
__m128d _mm_cmpgt_pd(__m128d a, __m128d b)
__INTRIN_INLINE_SSE2 __m128i _mm_loadu_si16(void const *a)
void _mm_store_sd(double *dp, __m128d a)
__m128d _mm_set_pd(double w, double x)
__m128i _mm_srli_epi32(__m128i a, int count)
int _mm_comigt_sd(__m128d a, __m128d b)
__m128i _mm_set_epi64(__m64 q1, __m64 q0)
__m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
__m128d _mm_cmplt_pd(__m128d a, __m128d b)
__m128i _mm_add_epi32(__m128i a, __m128i b)
__m128i _mm_sub_epi16(__m128i a, __m128i b)
__m64 _mm_cvttpd_pi32(__m128d a)
__m128i _mm_loadl_epi64(__m128i_u const *p)
__m128i _mm_add_epi64(__m128i a, __m128i b)
#define _mm_srli_si128(a, imm)
__m128d _mm_div_sd(__m128d a, __m128d b)
__m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
#define _mm_shuffle_epi32(a, imm)
__m128d _mm_set1_pd(double w)
__m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
__m128d _mm_div_pd(__m128d a, __m128d b)
__m128i _mm_srl_epi16(__m128i a, __m128i count)
__m128d _mm_max_pd(__m128d a, __m128d b)
__m128d _mm_cmpunord_sd(__m128d a, __m128d b)
__m128d _mm_unpacklo_pd(__m128d a, __m128d b)
__m128i _mm_subs_epu16(__m128i a, __m128i b)
__m128i _mm_loadu_si128(__m128i_u const *p)
__m128i _mm_set1_epi32(int i)
__m128i _mm_unpacklo_epi64(__m128i a, __m128i b)
__m128i _mm_max_epu8(__m128i a, __m128i b)
GLint GLint GLint GLint GLint x
GLuint GLuint GLsizei count
GLdouble GLdouble GLdouble GLdouble q
GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint GLdouble GLdouble w2
GLboolean GLboolean GLboolean b
GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint GLdouble w1
GLboolean GLboolean GLboolean GLboolean a
GLubyte GLubyte GLubyte GLubyte w
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble * u
#define __INTRIN_INLINE_MMX
static CRYPT_DATA_BLOB b4
static CRYPT_DATA_BLOB b3[]
static CRYPT_DATA_BLOB b2[]
static CRYPT_DATA_BLOB b1[]
#define _Out_writes_bytes_(size)
#define __INTRIN_INLINE_SSE